Source code for ml_toolkit.functions.llm.prompt.interface

from typing import Dict, List, Optional, Union

from yipit_databricks_client.helpers.telemetry import track_usage

from ml_toolkit.functions.llm.prompt.function import (
    create_or_fetch_prompt_from_mlflow,
    delete_prompt_from_mlflow,
    delete_version_from_mlflow,
    load_prompt_from_mlflow,
    register_prompt_to_mlflow,
    search_prompts_in_mlflow,
    set_alias_for_prompt,
)
from ml_toolkit.ops.helpers.logger import get_logger


@track_usage
[docs] def create_or_fetch_prompt( name: str, template: str, commit_message: Optional[str] = None, tags: Optional[Dict[str, str]] = None, create_if_missing: bool = True, **kwargs, ) -> Optional[dict]: """ Create or fetch a prompt from MLflow Prompt Registry with automatic deduplication across all versions. This function intelligently handles prompt creation by checking if an identical template already exists in ANY version (not just the latest). It uses a SHA256 hash of the template stored as a tag to avoid creating duplicate versions with the same content. **Key Benefits:** - **True Deduplication**: Avoids creating duplicate versions by checking ALL existing versions - **Handles Rollbacks**: Returns existing version if you revert to an older template - **Content-based lookup**: Finds prompts by their content, not just name - **Efficient versioning**: Only creates new versions when template actually changes - **Idempotent**: Safe to call multiple times with same template **Documentation:** https://docs.databricks.com/aws/en/mlflow3/genai/prompt-version-mgmt/prompt-registry/examples#register_prompt How It Works ^^^^^^^^^^^^ 1. Calculates SHA256 hash of the template (full 256-bit for guaranteed uniqueness) 2. Searches ALL existing versions of the prompt 3. Checks each version's template_hash tag for a match 4. If any version matches, returns that version (no new version created) 5. If no match found, creates new version with hash tag Template Syntax ^^^^^^^^^^^^^^^ Use double curly braces ``{{variable_name}}`` to define placeholders in your prompt template. .. code-block:: python template = "You are a {{role}}. Answer this question: {{question}}" Parameters ^^^^^^^^^^ :param name: Fully qualified name of the prompt in format "catalog.schema.prompt_name". Must be a valid Unity Catalog three-level namespace. :param template: The prompt template string. Use ``{{variable_name}}`` syntax for placeholders. :param commit_message: Optional message describing this version of the prompt. :param tags: Optional dict of key-value tags for storing metadata. All values must be strings. Note: 'template_hash' will be automatically added to tags. :param create_if_missing: If True (default), creates new version if needed. If False, returns None when prompt doesn't exist. :returns: Dictionary containing prompt details including name, version, template, and tags. Returns None if create_if_missing=False and prompt is not found. :raises ValueError: If name or template is empty/None, or if name format is invalid. Examples ^^^^^^^^ .. code-block:: python :caption: Basic usage - first call creates, second call reuses from ml_toolkit.functions.llm import create_or_fetch_prompt # First call - creates version 1 result = create_or_fetch_prompt( name="mycatalog.myschema.qa_prompt", template="Answer this question: {{question}}", commit_message="Initial QA prompt" ) print(f"Version: {result['version']}") # 1 # Second call with same template - returns existing version 1 result = create_or_fetch_prompt( name="mycatalog.myschema.qa_prompt", template="Answer this question: {{question}}", # Same template ) print(f"Version: {result['version']}") # Still 1 (no new version) # Third call with different template - creates version 2 result = create_or_fetch_prompt( name="mycatalog.myschema.qa_prompt", template="Please provide an answer to: {{question}}", # Different commit_message="Updated wording" ) print(f"Version: {result['version']}") # 2 # Fourth call reverting to v1 template - returns v1 (not v3!) result = create_or_fetch_prompt( name="mycatalog.myschema.qa_prompt", template="Answer this question: {{question}}", # Same as v1 ) print(f"Version: {result['version']}") # Returns 1 (found existing!) .. code-block:: python :caption: With tags for metadata tracking from ml_toolkit.functions.llm import create_or_fetch_prompt result = create_or_fetch_prompt( name="mycatalog.myschema.summarizer", template=\"\"\"Summarize the following text in {{num_sentences}} sentences: Text: {{content}} Focus on: {{focus_areas}}\"\"\", commit_message="Summarization prompt with focus areas", tags={ "tested_with": "gpt-4", "avg_latency_ms": "1200", "team": "content", "project": "summarization-v2" } ) print(f"Created/fetched version {result['version']}") print(f"Template hash: {result['tags']['template_hash']}") .. code-block:: python :caption: Check if prompt exists without creating from ml_toolkit.functions.llm import create_or_fetch_prompt # Try to fetch, but don't create if missing result = create_or_fetch_prompt( name="mycatalog.myschema.new_prompt", template="Some template: {{input}}", create_if_missing=False ) if result is None: print("Prompt doesn't exist or template changed") else: print(f"Found existing prompt version {result['version']}") .. code-block:: python :caption: Idempotent prompt deployment from ml_toolkit.functions.llm import create_or_fetch_prompt # Safe to run multiple times - only creates new version if template changes def deploy_prompt(): prompt = create_or_fetch_prompt( name="mycatalog.myschema.customer_support", template=\"\"\"You are a helpful customer support assistant. Customer question: {{question}} Customer context: {{context}} Provide a helpful, professional response.\"\"\", commit_message="Customer support prompt deployment", tags={"environment": "production", "team": "support"} ) return prompt # First deployment - creates version 1 result1 = deploy_prompt() # Subsequent deployments - reuses version 1 (template unchanged) result2 = deploy_prompt() assert result1['version'] == result2['version'] """ logger = get_logger() # Validate required parameters if not name or name.strip() == "": raise ValueError("`name` is required and cannot be empty.") if not template or template.strip() == "": raise ValueError("`template` is required and cannot be empty.") # Validate name format (should be three-level namespace) name_parts = name.split(".") if len(name_parts) != 3: raise ValueError( f"`name` must be in the format 'catalog.schema.prompt_name'. Got: {name}" ) # Validate tags if provided (all values must be strings) if tags is not None: for key, value in tags.items(): if not isinstance(value, str): raise ValueError( f"All tag values must be strings. Tag '{key}' has value of type {type(value).__name__}" ) # Call the core function prompt = create_or_fetch_prompt_from_mlflow( name=name, template=template, commit_message=commit_message, tags=tags, create_if_missing=create_if_missing, **kwargs, ) # Handle case where prompt was not created/found if prompt is None: logger.warning( f"Prompt '{name}' not found or template unchanged, and create_if_missing={create_if_missing}" ) return None # Package result result = { "name": prompt.name, "version": prompt.version, "template": prompt.template, "commit_message": commit_message, "tags": getattr(prompt, "tags", None), } logger.info( f"Successfully created/fetched prompt '{name}' version {prompt.version}" ) return result
@track_usage
[docs] def delete_prompt(name: str) -> dict: """ Delete a prompt from MLflow Prompt Registry. This function permanently deletes a prompt from the MLflow prompt registry. For Unity Catalog registries, you MUST delete all versions first before deleting the prompt itself. **Documentation:** https://docs.databricks.com/aws/en/mlflow3/genai/prompt-version-mgmt/prompt-registry/examples#delete_prompt .. warning:: **Unity Catalog Requirement:** All prompt versions must be deleted before deleting the prompt. Use ``delete_prompt_version()`` to delete each version first. Otherwise, this operation will fail. .. caution:: This operation is **permanent** and cannot be undone. Make sure you have backups or exports of any prompts you want to preserve. Parameters ^^^^^^^^^^ :param name: Fully qualified name of the prompt in format "catalog.schema.prompt_name". :returns: Dictionary containing the name of the deleted prompt. :raises ValueError: If name is empty/None or format is invalid. :raises Exception: If prompt has undeleted versions (Unity Catalog). Examples ^^^^^^^^ .. code-block:: python :caption: Delete a prompt (after deleting all versions) from ml_toolkit.functions.llm import delete_prompt_version, delete_prompt prompt_name = "mycatalog.myschema.old_prompt" # First, delete all versions delete_prompt_version(prompt_name, version="1") delete_prompt_version(prompt_name, version="2") delete_prompt_version(prompt_name, version="3") # Then delete the prompt result = delete_prompt(prompt_name) print(f"Deleted prompt: {result['name']}") .. code-block:: python :caption: Delete all versions programmatically before deleting prompt from ml_toolkit.functions.llm import ( search_prompts, load_prompt, delete_prompt_version, delete_prompt ) prompt_name = "mycatalog.myschema.deprecated_prompt" # Load the prompt to get version info prompt = load_prompt(prompt_name, version=1) # Delete all versions (you need to know how many exist) # In practice, you might need to query this information for version in range(1, 5): # versions 1-4 try: delete_prompt_version(prompt_name, version=str(version)) print(f"Deleted version {version}") except Exception as e: print(f"Version {version} not found or already deleted") # Delete the prompt delete_prompt(prompt_name) .. code-block:: python :caption: Safe deletion with error handling from ml_toolkit.functions.llm import delete_prompt try: result = delete_prompt("mycatalog.myschema.test_prompt") print(f"Successfully deleted: {result['name']}") except Exception as e: print(f"Failed to delete prompt: {e}") print("Make sure all versions are deleted first!") """ logger = get_logger() # Validate required parameters if not name or name.strip() == "": raise ValueError("`name` is required and cannot be empty.") # Validate name format (should be three-level namespace) name_parts = name.split(".") if len(name_parts) != 3: raise ValueError( f"`name` must be in the format 'catalog.schema.prompt_name'. Got: {name}" ) # Call the core function delete_prompt_from_mlflow(name=name) # Package result result = {"name": name} logger.info(f"Successfully deleted prompt '{name}'") return result
@track_usage
[docs] def delete_prompt_version(name: str, version: Union[str, int]) -> dict: """ Delete a specific version of a prompt from MLflow Prompt Registry. This function permanently deletes a specific version of a prompt from the MLflow prompt registry. In Unity Catalog, all versions must be deleted before you can delete the prompt itself using ``delete_prompt()``. **Documentation:** https://docs.databricks.com/aws/en/mlflow3/genai/prompt-version-mgmt/prompt-registry/examples#delete_prompt_version .. caution:: This operation is **permanent** and cannot be undone. Make sure you have backups or exports of any prompt versions you want to preserve. Parameters ^^^^^^^^^^ :param name: Fully qualified name of the prompt in format "catalog.schema.prompt_name". :param version: Version to delete. Can be a string or integer (e.g., "2" or 2). :returns: Dictionary containing the name and deleted version. :raises ValueError: If name or version is empty/None, or if name format is invalid. Examples ^^^^^^^^ .. code-block:: python :caption: Delete a specific prompt version from ml_toolkit.functions.llm import delete_prompt_version result = delete_prompt_version( name="mycatalog.myschema.qa_prompt", version="2" ) print(f"Deleted version {result['version']} of {result['name']}") .. code-block:: python :caption: Delete old versions to clean up from ml_toolkit.functions.llm import delete_prompt_version prompt_name = "mycatalog.myschema.customer_support" # Delete old versions that are no longer needed old_versions = ["1", "2", "3"] for ver in old_versions: try: delete_prompt_version(prompt_name, version=ver) print(f"Deleted version {ver}") except Exception as e: print(f"Failed to delete version {ver}: {e}") .. code-block:: python :caption: Delete all versions before deleting prompt from ml_toolkit.functions.llm import delete_prompt_version, delete_prompt prompt_name = "mycatalog.myschema.deprecated_prompt" versions_to_delete = ["1", "2", "3", "4", "5"] # Delete all versions for version in versions_to_delete: delete_prompt_version(prompt_name, version=version) print(f"Deleted version {version}") # Now safe to delete the prompt itself delete_prompt(prompt_name) print(f"Deleted prompt {prompt_name}") .. code-block:: python :caption: Delete with error handling from ml_toolkit.functions.llm import delete_prompt_version try: result = delete_prompt_version( name="mycatalog.myschema.test_prompt", version=3 ) print(f"Successfully deleted version {result['version']}") except Exception as e: print(f"Failed to delete version: {e}") """ logger = get_logger() # Validate required parameters if not name or name.strip() == "": raise ValueError("`name` is required and cannot be empty.") if version is None or str(version).strip() == "": raise ValueError("`version` is required and cannot be empty.") # Validate name format (should be three-level namespace) name_parts = name.split(".") if len(name_parts) != 3: raise ValueError( f"`name` must be in the format 'catalog.schema.prompt_name'. Got: {name}" ) # Convert version to string version_str = str(version) # Call the core function delete_version_from_mlflow(name=name, version=version_str) # Package result result = {"name": name, "version": version_str} logger.info(f"Successfully deleted version {version_str} of prompt '{name}'") return result
[docs] def load_prompt( name_or_uri: str, version: Optional[Union[str, int]] = None, allow_missing: bool = False, link_to_model: bool = True, model_id: Optional[str] = None, ) -> dict: """ Load a prompt from MLflow Prompt Registry. This function retrieves a registered prompt from the MLflow prompt registry. You can load prompts by name with a specific version, or by using a prompt URI that includes the version or alias. **Documentation:** https://docs.databricks.com/aws/en/mlflow3/genai/prompt-version-mgmt/prompt-registry/examples#load_prompt Loading Methods ^^^^^^^^^^^^^^^ 1. **By name and version**: Specify name and version separately 2. **By URI with version**: Use ``prompts:/catalog.schema.prompt_name/version`` 3. **By URI with alias**: Use ``prompts:/catalog.schema.prompt_name@alias`` Parameters ^^^^^^^^^^ :param name_or_uri: Either a prompt URI (e.g., "prompts:/catalog.schema.prompt@production") or a prompt name (e.g., "catalog.schema.prompt_name") :param version: Specific version number to load. Ignored if name_or_uri includes version/alias. Can be an integer or string. :param allow_missing: If True, returns None when prompt not found instead of raising an error. Default is False. :param link_to_model: Whether to link this prompt to a model. Default is True. :param model_id: Optional model ID to link the prompt to. :returns: Dictionary containing prompt details including name, version, template, and tags. Returns None if allow_missing=True and prompt is not found. :raises ValueError: If name_or_uri is empty/None. Examples ^^^^^^^^ .. code-block:: python :caption: Load prompt by name and version from ml_toolkit.functions.llm import load_prompt result = load_prompt( name_or_uri="mycatalog.myschema.qa_prompt", version=2 ) print(f"Template: {result['template']}") print(f"Version: {result['version']}") .. code-block:: python :caption: Load prompt using URI with version from ml_toolkit.functions.llm import load_prompt result = load_prompt( name_or_uri="prompts:/mycatalog.myschema.qa_prompt/2" ) print(f"Loaded version {result['version']}") .. code-block:: python :caption: Load prompt using alias (e.g., production) from ml_toolkit.functions.llm import load_prompt result = load_prompt( name_or_uri="prompts:/mycatalog.myschema.qa_prompt@production" ) print(f"Production version: {result['version']}") .. code-block:: python :caption: Handle missing prompts gracefully from ml_toolkit.functions.llm import load_prompt result = load_prompt( name_or_uri="mycatalog.myschema.nonexistent_prompt", version=1, allow_missing=True ) if result is None: print("Prompt not found") else: print(f"Found: {result['name']}") """ logger = get_logger() # Validate required parameters if not name_or_uri or name_or_uri.strip() == "": raise ValueError("`name_or_uri` is required and cannot be empty.") # Call the core function prompt = load_prompt_from_mlflow( name_or_uri=name_or_uri, version=version, allow_missing=allow_missing, link_to_model=link_to_model, model_id=model_id, ) # Handle missing prompt case if prompt is None: logger.warning(f"Prompt '{name_or_uri}' not found") return None # Package result result = { "name": prompt.name, "version": prompt.version, "template": prompt.template, "tags": getattr(prompt, "tags", None), } logger.info(f"Successfully loaded prompt '{prompt.name}' version {prompt.version}") return result
@track_usage def register_prompt( name: str, template: str, commit_message: Optional[str] = None, tags: Optional[Dict[str, str]] = None, ) -> dict: """ Register a prompt template to MLflow Prompt Registry. This function provides a user-friendly interface to register prompts to MLflow's GenAI prompt registry. It validates inputs, handles errors, and returns a structured result dictionary. Prompts are versioned automatically - each time you register a prompt with the same name, a new version is created. **Documentation:** https://docs.databricks.com/aws/en/mlflow3/genai/prompt-version-mgmt/prompt-registry/examples#register_prompt Template Syntax ^^^^^^^^^^^^^^^ Use double curly braces ``{{variable_name}}`` to define placeholders in your prompt template. These placeholders can be filled in later when using the prompt. .. code-block:: python template = "You are a {{role}}. Answer this question: {{question}}" Parameters ^^^^^^^^^^ :param name: Fully qualified name of the prompt in the format "catalog.schema.prompt_name". Must be a valid Unity Catalog three-level namespace. :param template: The prompt template string. Use ``{{variable_name}}`` syntax for placeholders. Avoid using single quotes (') as they may cause formatting issues. :param commit_message: Optional message describing this version of the prompt. Useful for tracking changes across versions. :param tags: Optional dict of key-value tags for storing metadata. All values must be strings. Useful for categorizing prompts (e.g., team, project, tested_with, avg_latency_ms). :returns: Dictionary containing prompt registration details including name, version, template, commit_message, and tags. :raises ValueError: If name or template is empty/None, or if name format is invalid. Examples ^^^^^^^^ .. code-block:: python :caption: Basic prompt registration from ml_toolkit.functions.prompt import register_prompt result = register_prompt( name="yd_dpe_example.mlflow_prompt_registry.customer_support", template="You are a helpful assistant. Answer this question: {{question}}", commit_message="Initial customer support prompt" ) print(f"Created version {result['version']}") .. code-block:: python :caption: Multi-variable prompt with tags from ml_toolkit.functions.prompt import register_prompt result = register_prompt( name="mycatalog.myschema.summarization", template=\"\"\"Summarize the following text in {{num_sentences}} sentences: Text: {{content}} Focus on: {{focus_areas}}\"\"\", commit_message="Added focus areas parameter", tags={ "tested_with": "gpt-4", "avg_latency_ms": "1200", "team": "content", "project": "summarization-v2" } ) print(f"Registered prompt version {result['version']}") .. code-block:: python :caption: Updating an existing prompt (creates new version) from ml_toolkit.functions.prompt import register_prompt # First registration creates version 1 result_v1 = register_prompt( name="yd_dpe_example.mlflow_prompt_registry.translator", template="Translate to {{language}}: {{text}}", commit_message="Basic translation prompt", tags={"team": "translation", "status": "experimental"} ) # Second registration with same name creates version 2 result_v2 = register_prompt( name="yd_dpe_example.mlflow_prompt_registry.translator", template="You are a professional translator. Translate the following text to {{language}}, maintaining the original tone: {{text}}", commit_message="Improved translation prompt with tone preservation", tags={"team": "translation", "status": "production"} ) print(f"Version 1: {result_v1['version']}, Version 2: {result_v2['version']}") """ logger = get_logger() # Validate required parameters if not name or name.strip() == "": raise ValueError("`name` is required and cannot be empty.") if not template or template.strip() == "": raise ValueError("`template` is required and cannot be empty.") # Validate name format (should be three-level namespace) name_parts = name.split(".") if len(name_parts) != 3: raise ValueError( f"`name` must be in the format 'catalog.schema.prompt_name'. Got: {name}" ) # Validate tags if provided (all values must be strings) if tags is not None: for key, value in tags.items(): if not isinstance(value, str): raise ValueError( f"All tag values must be strings. Tag '{key}' has value of type {type(value).__name__}" ) # Call the core function prompt = register_prompt_to_mlflow( name=name, template=template, commit_message=commit_message, tags=tags, ) # Package result result = { "name": prompt.name, "version": prompt.version, "template": prompt.template, "commit_message": commit_message, "tags": tags, } logger.info(f"Successfully registered prompt '{name}' as version {prompt.version}") return result @track_usage
[docs] def search_prompts( filter_string: Optional[str] = None, max_results: Optional[int] = None, ) -> List[dict]: """ Search for prompts in MLflow Prompt Registry. This function searches for prompts in the MLflow prompt registry using SQL-like filter strings. For Unity Catalog, you must specify catalog and schema. The function returns a list of prompts matching the search criteria. **Documentation:** https://docs.databricks.com/aws/en/mlflow3/genai/prompt-version-mgmt/prompt-registry/examples#search_prompts Filter Limitations (Unity Catalog) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - **Required**: Must specify catalog and schema - **Cannot filter by**: Name patterns, tags, or exact prompt names - **Workaround**: Retrieve all prompts in a catalog/schema, then filter programmatically Parameters ^^^^^^^^^^ :param filter_string: Optional SQL-like filter string. For Unity Catalog: "catalog = 'mycatalog' AND schema = 'myschema'" :param max_results: Maximum number of prompts to return. If None, returns all matches. :returns: List of dictionaries, each containing prompt details (name, latest_version, tags). Examples ^^^^^^^^ .. code-block:: python :caption: Search all prompts in a catalog and schema from ml_toolkit.functions.llm import search_prompts prompts = search_prompts( filter_string="catalog = 'mycatalog' AND schema = 'myschema'" ) for prompt in prompts: print(f"{prompt['name']}: v{prompt['latest_version']}") .. code-block:: python :caption: Limit search results from ml_toolkit.functions.llm import search_prompts prompts = search_prompts( filter_string="catalog = 'mycatalog' AND schema = 'myschema'", max_results=10 ) print(f"Found {len(prompts)} prompts (max 10)") .. code-block:: python :caption: Filter by name after retrieval (workaround for Unity Catalog) from ml_toolkit.functions.llm import search_prompts # Get all prompts first all_prompts = search_prompts( filter_string="catalog = 'mycatalog' AND schema = 'myschema'" ) # Filter by name pattern programmatically qa_prompts = [p for p in all_prompts if 'qa' in p['name']] print(f"Found {len(qa_prompts)} QA prompts") .. code-block:: python :caption: Search and display prompt details from ml_toolkit.functions.llm import search_prompts prompts = search_prompts( filter_string="catalog = 'yd_production' AND schema = 'ml_prompts'" ) for prompt in prompts: print(f"Name: {prompt['name']}") print(f"Latest Version: {prompt['latest_version']}") if prompt['tags']: print(f"Tags: {prompt['tags']}") print("---") """ logger = get_logger() # Call the core function prompts = search_prompts_in_mlflow( filter_string=filter_string, max_results=max_results, ) # Package results results = [] for prompt in prompts: results.append( { "name": prompt.name, "latest_version": getattr(prompt, "latest_version", None), "tags": getattr(prompt, "tags", None), } ) logger.info(f"Found {len(results)} prompts") return results
@track_usage
[docs] def set_prompt_alias( name: str, alias: str, version: int, ) -> dict: """ Set an alias for a specific prompt version in MLflow Prompt Registry. This function assigns an alias (like "production", "staging", or "latest") to a specific version of a prompt. Aliases make it easier to reference stable versions of prompts without hardcoding version numbers. **Documentation:** https://docs.databricks.com/aws/en/mlflow3/genai/prompt-version-mgmt/prompt-registry/examples#set_prompt_alias Common Aliases ^^^^^^^^^^^^^^ - ``production``: The current production version - ``staging``: Version being tested before production - ``latest``: Most recent stable version - ``champion``: Best performing version Parameters ^^^^^^^^^^ :param name: Fully qualified name of the prompt in format "catalog.schema.prompt_name". :param alias: Alias name to assign (e.g., "production", "staging"). :param version: Version number to assign the alias to. Must be an integer. :returns: Dictionary containing the name, alias, and version. :raises ValueError: If name or alias is empty/None, or if version is not positive. Examples ^^^^^^^^ .. code-block:: python :caption: Set production alias from ml_toolkit.functions.llm import set_prompt_alias result = set_prompt_alias( name="mycatalog.myschema.qa_prompt", alias="production", version=3 ) print(f"Set {result['alias']} alias to version {result['version']}") .. code-block:: python :caption: Promote a version to production from ml_toolkit.functions.llm import set_prompt_alias, load_prompt # Test version 5 in staging set_prompt_alias( name="mycatalog.myschema.customer_support", alias="staging", version=5 ) # After testing, promote to production set_prompt_alias( name="mycatalog.myschema.customer_support", alias="production", version=5 ) # Load using alias prompt = load_prompt("prompts:/mycatalog.myschema.customer_support@production") .. code-block:: python :caption: Update alias to new version from ml_toolkit.functions.llm import set_prompt_alias # Update production alias from v3 to v4 set_prompt_alias( name="mycatalog.myschema.summarizer", alias="production", version=4 # This replaces the previous production alias ) .. code-block:: python :caption: Set multiple aliases for different environments from ml_toolkit.functions.llm import set_prompt_alias prompt_name = "mycatalog.myschema.translator" # Development version set_prompt_alias(prompt_name, alias="dev", version=7) # Staging version set_prompt_alias(prompt_name, alias="staging", version=6) # Production version set_prompt_alias(prompt_name, alias="production", version=5) """ logger = get_logger() # Validate required parameters if not name or name.strip() == "": raise ValueError("`name` is required and cannot be empty.") if not alias or alias.strip() == "": raise ValueError("`alias` is required and cannot be empty.") if not isinstance(version, int) or version <= 0: raise ValueError("`version` must be a positive integer.") # Validate name format (should be three-level namespace) name_parts = name.split(".") if len(name_parts) != 3: raise ValueError( f"`name` must be in the format 'catalog.schema.prompt_name'. Got: {name}" ) # Call the core function set_alias_for_prompt( name=name, alias=alias, version=version, ) # Package result result = { "name": name, "alias": alias, "version": version, } logger.info( f"Successfully set alias '{alias}' to version {version} for prompt '{name}'" ) return result