evals.models.anthropic#

class AnthropicModel(default_concurrency: int = 20, _verbose: bool = False, _rate_limiter: phoenix.evals.models.rate_limiters.RateLimiter = <factory>, model: str = 'claude-2.1', temperature: float = 0.0, max_tokens: int = 256, top_p: float = 1, top_k: int = 256, stop_sequences: List[str] = <factory>, extra_parameters: Dict[str, Any] = <factory>, max_content_size: Optional[int] = None)#

Bases: BaseModel

extra_parameters: Dict[str, Any]#

Any extra parameters to add to the request body (e.g., countPenalty for a21 models)

invocation_parameters() Dict[str, Any]#
max_content_size: int | None = None#

If you’re using a fine-tuned model, set this to the maximum content size

max_tokens: int = 256#

The maximum number of tokens to generate in the completion.

model: str = 'claude-2.1'#

The model name to use.

stop_sequences: List[str]#

If the model encounters a stop sequence, it stops generating further tokens.

temperature: float = 0.0#

What sampling temperature to use.

top_k: int = 256#

The cutoff where the model no longer selects the words.

top_p: float = 1#

Total probability mass of tokens to consider at each step.

anthropic_version(version_str: str) Tuple[int, ...]#