Since OpenAI supports a variety of date-stamped models, we explicitly list the latest models but
allow any name in the type hints.
See the OpenAI docs for a full list.
Using this more broad type for the model name instead of the ChatModel definition
allows this model to be used more easily with other model types (ie, Ollama, Deepseek).
classOpenAIModelSettings(ModelSettings,total=False):"""Settings used for an OpenAI model request. ALL FIELDS MUST BE `openai_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS. """openai_reasoning_effort:ReasoningEffort"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. """openai_user:str"""A unique identifier representing the end-user, which can help OpenAI monitor and detect abuse. See [OpenAI's safety best practices](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids) for more details. """
Currently supported values are low, medium, and high. Reducing reasoning effort can
result in faster responses and fewer tokens used on reasoning in a response.
classOpenAIResponsesModelSettings(OpenAIModelSettings,total=False):"""Settings used for an OpenAI Responses model request. ALL FIELDS MUST BE `openai_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS. """openai_builtin_tools:Sequence[FileSearchToolParam|WebSearchToolParam|ComputerToolParam]"""The provided OpenAI built-in tools to use. See [OpenAI's built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses) for more details. """openai_reasoning_generate_summary:Literal['detailed','concise']"""A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of `concise` or `detailed`. Check the [OpenAI Computer use documentation](https://platform.openai.com/docs/guides/tools-computer-use#1-send-a-request-to-the-model) for more details. """openai_truncation:Literal['disabled','auto']"""The truncation strategy to use for the model response. It can be either: - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - `auto`: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation. """
The truncation strategy to use for the model response.
It can be either:
- disabled (default): If a model response will exceed the context window size for a model, the
request will fail with a 400 error.
- auto: If the context of this response and previous ones exceeds the model's context window size,
the model will truncate the response to fit the context window by dropping input items in the
middle of the conversation.
@dataclass(init=False)classOpenAIModel(Model):"""A model that uses the OpenAI API. Internally, this uses the [OpenAI Python client](https://github.com/openai/openai-python) to interact with the API. Apart from `__init__`, all methods are private or match those of the base class. """client:AsyncOpenAI=field(repr=False)system_prompt_role:OpenAISystemPromptRole|None=field(default=None,repr=False)_model_name:OpenAIModelName=field(repr=False)_system:str=field(default='openai',repr=False)def__init__(self,model_name:OpenAIModelName,*,provider:Literal['openai','deepseek','azure']|Provider[AsyncOpenAI]='openai',system_prompt_role:OpenAISystemPromptRole|None=None,):"""Initialize an OpenAI model. Args: model_name: The name of the OpenAI model to use. List of model names available [here](https://github.com/openai/openai-python/blob/v1.54.3/src/openai/types/chat_model.py#L7) (Unfortunately, despite being ask to do so, OpenAI do not provide `.inv` files for their API). provider: The provider to use. Defaults to `'openai'`. system_prompt_role: The role to use for the system prompt message. If not provided, defaults to `'system'`. In the future, this may be inferred from the model name. """self._model_name=model_nameifisinstance(provider,str):provider=infer_provider(provider)self.client=provider.clientself.system_prompt_role=system_prompt_role@propertydefbase_url(self)->str:returnstr(self.client.base_url)asyncdefrequest(self,messages:list[ModelMessage],model_settings:ModelSettings|None,model_request_parameters:ModelRequestParameters,)->tuple[ModelResponse,usage.Usage]:check_allow_model_requests()response=awaitself._completions_create(messages,False,cast(OpenAIModelSettings,model_settingsor{}),model_request_parameters)returnself._process_response(response),_map_usage(response)@asynccontextmanagerasyncdefrequest_stream(self,messages:list[ModelMessage],model_settings:ModelSettings|None,model_request_parameters:ModelRequestParameters,)->AsyncIterator[StreamedResponse]:check_allow_model_requests()response=awaitself._completions_create(messages,True,cast(OpenAIModelSettings,model_settingsor{}),model_request_parameters)asyncwithresponse:yieldawaitself._process_streamed_response(response)defcustomize_request_parameters(self,model_request_parameters:ModelRequestParameters)->ModelRequestParameters:return_customize_request_parameters(model_request_parameters)@propertydefmodel_name(self)->OpenAIModelName:"""The model name."""returnself._model_name@propertydefsystem(self)->str:"""The system / model provider."""returnself._system@overloadasyncdef_completions_create(self,messages:list[ModelMessage],stream:Literal[True],model_settings:OpenAIModelSettings,model_request_parameters:ModelRequestParameters,)->AsyncStream[ChatCompletionChunk]:...@overloadasyncdef_completions_create(self,messages:list[ModelMessage],stream:Literal[False],model_settings:OpenAIModelSettings,model_request_parameters:ModelRequestParameters,)->chat.ChatCompletion:...asyncdef_completions_create(self,messages:list[ModelMessage],stream:bool,model_settings:OpenAIModelSettings,model_request_parameters:ModelRequestParameters,)->chat.ChatCompletion|AsyncStream[ChatCompletionChunk]:tools=self._get_tools(model_request_parameters)# standalone function to make it easier to overrideifnottools:tool_choice:Literal['none','required','auto']|None=Noneelifnotmodel_request_parameters.allow_text_output:tool_choice='required'else:tool_choice='auto'openai_messages:list[chat.ChatCompletionMessageParam]=[]forminmessages:asyncformsginself._map_message(m):openai_messages.append(msg)try:returnawaitself.client.chat.completions.create(model=self._model_name,messages=openai_messages,n=1,parallel_tool_calls=model_settings.get('parallel_tool_calls',NOT_GIVEN),tools=toolsorNOT_GIVEN,tool_choice=tool_choiceorNOT_GIVEN,stream=stream,stream_options={'include_usage':True}ifstreamelseNOT_GIVEN,stop=model_settings.get('stop_sequences',NOT_GIVEN),max_completion_tokens=model_settings.get('max_tokens',NOT_GIVEN),temperature=model_settings.get('temperature',NOT_GIVEN),top_p=model_settings.get('top_p',NOT_GIVEN),timeout=model_settings.get('timeout',NOT_GIVEN),seed=model_settings.get('seed',NOT_GIVEN),presence_penalty=model_settings.get('presence_penalty',NOT_GIVEN),frequency_penalty=model_settings.get('frequency_penalty',NOT_GIVEN),logit_bias=model_settings.get('logit_bias',NOT_GIVEN),reasoning_effort=model_settings.get('openai_reasoning_effort',NOT_GIVEN),user=model_settings.get('openai_user',NOT_GIVEN),extra_headers={'User-Agent':get_user_agent()},)exceptAPIStatusErrorase:if(status_code:=e.status_code)>=400:raiseModelHTTPError(status_code=status_code,model_name=self.model_name,body=e.body)fromeraisedef_process_response(self,response:chat.ChatCompletion)->ModelResponse:"""Process a non-streamed response, and prepare a message to return."""timestamp=datetime.fromtimestamp(response.created,tz=timezone.utc)choice=response.choices[0]items:list[ModelResponsePart]=[]ifchoice.message.contentisnotNone:items.append(TextPart(choice.message.content))ifchoice.message.tool_callsisnotNone:forcinchoice.message.tool_calls:items.append(ToolCallPart(c.function.name,c.function.arguments,tool_call_id=c.id))returnModelResponse(items,model_name=response.model,timestamp=timestamp)asyncdef_process_streamed_response(self,response:AsyncStream[ChatCompletionChunk])->OpenAIStreamedResponse:"""Process a streamed response, and prepare a streaming response to return."""peekable_response=_utils.PeekableAsyncStream(response)first_chunk=awaitpeekable_response.peek()ifisinstance(first_chunk,_utils.Unset):raiseUnexpectedModelBehavior('Streamed response ended without content or tool calls')returnOpenAIStreamedResponse(_model_name=self._model_name,_response=peekable_response,_timestamp=datetime.fromtimestamp(first_chunk.created,tz=timezone.utc),)def_get_tools(self,model_request_parameters:ModelRequestParameters)->list[chat.ChatCompletionToolParam]:tools=[self._map_tool_definition(r)forrinmodel_request_parameters.function_tools]ifmodel_request_parameters.output_tools:tools+=[self._map_tool_definition(r)forrinmodel_request_parameters.output_tools]returntoolsasyncdef_map_message(self,message:ModelMessage)->AsyncIterable[chat.ChatCompletionMessageParam]:"""Just maps a `pydantic_ai.Message` to a `openai.types.ChatCompletionMessageParam`."""ifisinstance(message,ModelRequest):asyncforiteminself._map_user_message(message):yielditemelifisinstance(message,ModelResponse):texts:list[str]=[]tool_calls:list[chat.ChatCompletionMessageToolCallParam]=[]foriteminmessage.parts:ifisinstance(item,TextPart):texts.append(item.content)elifisinstance(item,ToolCallPart):tool_calls.append(self._map_tool_call(item))else:assert_never(item)message_param=chat.ChatCompletionAssistantMessageParam(role='assistant')iftexts:# Note: model responses from this model should only have one text item, so the following# shouldn't merge multiple texts into one unless you switch models between runs:message_param['content']='\n\n'.join(texts)iftool_calls:message_param['tool_calls']=tool_callsyieldmessage_paramelse:assert_never(message)@staticmethoddef_map_tool_call(t:ToolCallPart)->chat.ChatCompletionMessageToolCallParam:returnchat.ChatCompletionMessageToolCallParam(id=_guard_tool_call_id(t=t),type='function',function={'name':t.tool_name,'arguments':t.args_as_json_str()},)@staticmethoddef_map_tool_definition(f:ToolDefinition)->chat.ChatCompletionToolParam:tool_param:chat.ChatCompletionToolParam={'type':'function','function':{'name':f.name,'description':f.description,'parameters':f.parameters_json_schema,},}iff.strict:tool_param['function']['strict']=f.strictreturntool_paramasyncdef_map_user_message(self,message:ModelRequest)->AsyncIterable[chat.ChatCompletionMessageParam]:forpartinmessage.parts:ifisinstance(part,SystemPromptPart):ifself.system_prompt_role=='developer':yieldchat.ChatCompletionDeveloperMessageParam(role='developer',content=part.content)elifself.system_prompt_role=='user':yieldchat.ChatCompletionUserMessageParam(role='user',content=part.content)else:yieldchat.ChatCompletionSystemMessageParam(role='system',content=part.content)elifisinstance(part,UserPromptPart):yieldawaitself._map_user_prompt(part)elifisinstance(part,ToolReturnPart):yieldchat.ChatCompletionToolMessageParam(role='tool',tool_call_id=_guard_tool_call_id(t=part),content=part.model_response_str(),)elifisinstance(part,RetryPromptPart):ifpart.tool_nameisNone:yieldchat.ChatCompletionUserMessageParam(role='user',content=part.model_response())else:yieldchat.ChatCompletionToolMessageParam(role='tool',tool_call_id=_guard_tool_call_id(t=part),content=part.model_response(),)else:assert_never(part)@staticmethodasyncdef_map_user_prompt(part:UserPromptPart)->chat.ChatCompletionUserMessageParam:content:str|list[ChatCompletionContentPartParam]ifisinstance(part.content,str):content=part.contentelse:content=[]foriteminpart.content:ifisinstance(item,str):content.append(ChatCompletionContentPartTextParam(text=item,type='text'))elifisinstance(item,ImageUrl):image_url=ImageURL(url=item.url)content.append(ChatCompletionContentPartImageParam(image_url=image_url,type='image_url'))elifisinstance(item,BinaryContent):base64_encoded=base64.b64encode(item.data).decode('utf-8')ifitem.is_image:image_url=ImageURL(url=f'data:{item.media_type};base64,{base64_encoded}')content.append(ChatCompletionContentPartImageParam(image_url=image_url,type='image_url'))elifitem.is_audio:assertitem.formatin('wav','mp3')audio=InputAudio(data=base64_encoded,format=item.format)content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio,type='input_audio'))else:# pragma: no coverraiseRuntimeError(f'Unsupported binary content type: {item.media_type}')elifisinstance(item,AudioUrl):# pragma: no coverclient=cached_async_http_client()response=awaitclient.get(item.url)response.raise_for_status()base64_encoded=base64.b64encode(response.content).decode('utf-8')audio=InputAudio(data=base64_encoded,format=response.headers.get('content-type'))content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio,type='input_audio'))elifisinstance(item,DocumentUrl):# pragma: no coverraiseNotImplementedError('DocumentUrl is not supported for OpenAI')# The following implementation should have worked, but it seems we have the following error:# pydantic_ai.exceptions.ModelHTTPError: status_code: 400, model_name: gpt-4o, body:# {# 'message': "Unknown parameter: 'messages[1].content[1].file.data'.",# 'type': 'invalid_request_error',# 'param': 'messages[1].content[1].file.data',# 'code': 'unknown_parameter'# }## client = cached_async_http_client()# response = await client.get(item.url)# response.raise_for_status()# base64_encoded = base64.b64encode(response.content).decode('utf-8')# media_type = response.headers.get('content-type').split(';')[0]# file_data = f'data:{media_type};base64,{base64_encoded}'# file = File(file={'file_data': file_data, 'file_name': item.url, 'file_id': item.url}, type='file')# content.append(file)elifisinstance(item,VideoUrl):# pragma: no coverraiseNotImplementedError('VideoUrl is not supported for OpenAI')else:assert_never(item)returnchat.ChatCompletionUserMessageParam(role='user',content=content)
The name of the OpenAI model to use. List of model names available
here
(Unfortunately, despite being ask to do so, OpenAI do not provide .inv files for their API).
def__init__(self,model_name:OpenAIModelName,*,provider:Literal['openai','deepseek','azure']|Provider[AsyncOpenAI]='openai',system_prompt_role:OpenAISystemPromptRole|None=None,):"""Initialize an OpenAI model. Args: model_name: The name of the OpenAI model to use. List of model names available [here](https://github.com/openai/openai-python/blob/v1.54.3/src/openai/types/chat_model.py#L7) (Unfortunately, despite being ask to do so, OpenAI do not provide `.inv` files for their API). provider: The provider to use. Defaults to `'openai'`. system_prompt_role: The role to use for the system prompt message. If not provided, defaults to `'system'`. In the future, this may be inferred from the model name. """self._model_name=model_nameifisinstance(provider,str):provider=infer_provider(provider)self.client=provider.clientself.system_prompt_role=system_prompt_role
@dataclass(init=False)classOpenAIResponsesModel(Model):"""A model that uses the OpenAI Responses API. The [OpenAI Responses API](https://platform.openai.com/docs/api-reference/responses) is the new API for OpenAI models. The Responses API has built-in tools, that you can use instead of building your own: - [Web search](https://platform.openai.com/docs/guides/tools-web-search) - [File search](https://platform.openai.com/docs/guides/tools-file-search) - [Computer use](https://platform.openai.com/docs/guides/tools-computer-use) Use the `openai_builtin_tools` setting to add these tools to your model. If you are interested in the differences between the Responses API and the Chat Completions API, see the [OpenAI API docs](https://platform.openai.com/docs/guides/responses-vs-chat-completions). """client:AsyncOpenAI=field(repr=False)system_prompt_role:OpenAISystemPromptRole|None=field(default=None)_model_name:OpenAIModelName=field(repr=False)_system:str=field(default='openai',repr=False)def__init__(self,model_name:OpenAIModelName,*,provider:Literal['openai','deepseek','azure']|Provider[AsyncOpenAI]='openai',):"""Initialize an OpenAI Responses model. Args: model_name: The name of the OpenAI model to use. provider: The provider to use. Defaults to `'openai'`. """self._model_name=model_nameifisinstance(provider,str):provider=infer_provider(provider)self.client=provider.client@propertydefmodel_name(self)->OpenAIModelName:"""The model name."""returnself._model_name@propertydefsystem(self)->str:"""The system / model provider."""returnself._systemasyncdefrequest(self,messages:list[ModelRequest|ModelResponse],model_settings:ModelSettings|None,model_request_parameters:ModelRequestParameters,)->tuple[ModelResponse,usage.Usage]:check_allow_model_requests()response=awaitself._responses_create(messages,False,cast(OpenAIResponsesModelSettings,model_settingsor{}),model_request_parameters)returnself._process_response(response),_map_usage(response)@asynccontextmanagerasyncdefrequest_stream(self,messages:list[ModelMessage],model_settings:ModelSettings|None,model_request_parameters:ModelRequestParameters,)->AsyncIterator[StreamedResponse]:check_allow_model_requests()response=awaitself._responses_create(messages,True,cast(OpenAIResponsesModelSettings,model_settingsor{}),model_request_parameters)asyncwithresponse:yieldawaitself._process_streamed_response(response)defcustomize_request_parameters(self,model_request_parameters:ModelRequestParameters)->ModelRequestParameters:return_customize_request_parameters(model_request_parameters)def_process_response(self,response:responses.Response)->ModelResponse:"""Process a non-streamed response, and prepare a message to return."""timestamp=datetime.fromtimestamp(response.created_at,tz=timezone.utc)items:list[ModelResponsePart]=[]items.append(TextPart(response.output_text))foriteminresponse.output:ifitem.type=='function_call':items.append(ToolCallPart(item.name,item.arguments,tool_call_id=item.call_id))returnModelResponse(items,model_name=response.model,timestamp=timestamp)asyncdef_process_streamed_response(self,response:AsyncStream[responses.ResponseStreamEvent])->OpenAIResponsesStreamedResponse:"""Process a streamed response, and prepare a streaming response to return."""peekable_response=_utils.PeekableAsyncStream(response)first_chunk=awaitpeekable_response.peek()ifisinstance(first_chunk,_utils.Unset):# pragma: no coverraiseUnexpectedModelBehavior('Streamed response ended without content or tool calls')assertisinstance(first_chunk,responses.ResponseCreatedEvent)returnOpenAIResponsesStreamedResponse(_model_name=self._model_name,_response=peekable_response,_timestamp=datetime.fromtimestamp(first_chunk.response.created_at,tz=timezone.utc),)@overloadasyncdef_responses_create(self,messages:list[ModelRequest|ModelResponse],stream:Literal[False],model_settings:OpenAIResponsesModelSettings,model_request_parameters:ModelRequestParameters,)->responses.Response:...@overloadasyncdef_responses_create(self,messages:list[ModelRequest|ModelResponse],stream:Literal[True],model_settings:OpenAIResponsesModelSettings,model_request_parameters:ModelRequestParameters,)->AsyncStream[responses.ResponseStreamEvent]:...asyncdef_responses_create(self,messages:list[ModelRequest|ModelResponse],stream:bool,model_settings:OpenAIResponsesModelSettings,model_request_parameters:ModelRequestParameters,)->responses.Response|AsyncStream[responses.ResponseStreamEvent]:tools=self._get_tools(model_request_parameters)tools=list(model_settings.get('openai_builtin_tools',[]))+tools# standalone function to make it easier to overrideifnottools:tool_choice:Literal['none','required','auto']|None=Noneelifnotmodel_request_parameters.allow_text_output:tool_choice='required'else:tool_choice='auto'system_prompt,openai_messages=awaitself._map_message(messages)reasoning=self._get_reasoning(model_settings)try:returnawaitself.client.responses.create(input=openai_messages,model=self._model_name,instructions=system_prompt,parallel_tool_calls=model_settings.get('parallel_tool_calls',NOT_GIVEN),tools=toolsorNOT_GIVEN,tool_choice=tool_choiceorNOT_GIVEN,max_output_tokens=model_settings.get('max_tokens',NOT_GIVEN),stream=stream,temperature=model_settings.get('temperature',NOT_GIVEN),top_p=model_settings.get('top_p',NOT_GIVEN),truncation=model_settings.get('openai_truncation',NOT_GIVEN),timeout=model_settings.get('timeout',NOT_GIVEN),reasoning=reasoning,user=model_settings.get('openai_user',NOT_GIVEN),extra_headers={'User-Agent':get_user_agent()},)exceptAPIStatusErrorase:if(status_code:=e.status_code)>=400:raiseModelHTTPError(status_code=status_code,model_name=self.model_name,body=e.body)fromeraisedef_get_reasoning(self,model_settings:OpenAIResponsesModelSettings)->Reasoning|NotGiven:reasoning_effort=model_settings.get('openai_reasoning_effort',None)reasoning_generate_summary=model_settings.get('openai_reasoning_generate_summary',None)ifreasoning_effortisNoneandreasoning_generate_summaryisNone:returnNOT_GIVENreturnReasoning(effort=reasoning_effort,generate_summary=reasoning_generate_summary)def_get_tools(self,model_request_parameters:ModelRequestParameters)->list[responses.FunctionToolParam]:tools=[self._map_tool_definition(r)forrinmodel_request_parameters.function_tools]ifmodel_request_parameters.output_tools:tools+=[self._map_tool_definition(r)forrinmodel_request_parameters.output_tools]returntools@staticmethoddef_map_tool_definition(f:ToolDefinition)->responses.FunctionToolParam:return{'name':f.name,'parameters':f.parameters_json_schema,'type':'function','description':f.description,# NOTE: f.strict should already be a boolean thanks to customize_request_parameters'strict':f.strictorFalse,}asyncdef_map_message(self,messages:list[ModelMessage])->tuple[str,list[responses.ResponseInputItemParam]]:"""Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""system_prompt:str=''openai_messages:list[responses.ResponseInputItemParam]=[]formessageinmessages:ifisinstance(message,ModelRequest):forpartinmessage.parts:ifisinstance(part,SystemPromptPart):system_prompt+=part.contentelifisinstance(part,UserPromptPart):openai_messages.append(awaitself._map_user_prompt(part))elifisinstance(part,ToolReturnPart):openai_messages.append(FunctionCallOutput(type='function_call_output',call_id=_guard_tool_call_id(t=part),output=part.model_response_str(),))elifisinstance(part,RetryPromptPart):# TODO(Marcelo): How do we test this conditional branch?ifpart.tool_nameisNone:# pragma: no coveropenai_messages.append(Message(role='user',content=[{'type':'input_text','text':part.model_response()}]))else:openai_messages.append(FunctionCallOutput(type='function_call_output',call_id=_guard_tool_call_id(t=part),output=part.model_response(),))else:assert_never(part)elifisinstance(message,ModelResponse):foriteminmessage.parts:ifisinstance(item,TextPart):openai_messages.append(responses.EasyInputMessageParam(role='assistant',content=item.content))elifisinstance(item,ToolCallPart):openai_messages.append(self._map_tool_call(item))else:assert_never(item)else:assert_never(message)returnsystem_prompt,openai_messages@staticmethoddef_map_tool_call(t:ToolCallPart)->responses.ResponseFunctionToolCallParam:returnresponses.ResponseFunctionToolCallParam(arguments=t.args_as_json_str(),call_id=_guard_tool_call_id(t=t),name=t.tool_name,type='function_call',)@staticmethodasyncdef_map_user_prompt(part:UserPromptPart)->responses.EasyInputMessageParam:content:str|list[responses.ResponseInputContentParam]ifisinstance(part.content,str):content=part.contentelse:content=[]foriteminpart.content:ifisinstance(item,str):content.append(responses.ResponseInputTextParam(text=item,type='input_text'))elifisinstance(item,BinaryContent):base64_encoded=base64.b64encode(item.data).decode('utf-8')ifitem.is_image:content.append(responses.ResponseInputImageParam(image_url=f'data:{item.media_type};base64,{base64_encoded}',type='input_image',detail='auto',))elifitem.is_document:content.append(responses.ResponseInputFileParam(type='input_file',file_data=f'data:{item.media_type};base64,{base64_encoded}',# NOTE: Type wise it's not necessary to include the filename, but it's required by the# API itself. If we add empty string, the server sends a 500 error - which OpenAI needs# to fix. In any case, we add a placeholder name.filename=f'filename.{item.format}',))elifitem.is_audio:raiseNotImplementedError('Audio as binary content is not supported for OpenAI Responses API.')else:# pragma: no coverraiseRuntimeError(f'Unsupported binary content type: {item.media_type}')elifisinstance(item,ImageUrl):content.append(responses.ResponseInputImageParam(image_url=item.url,type='input_image',detail='auto'))elifisinstance(item,AudioUrl):# pragma: no coverclient=cached_async_http_client()response=awaitclient.get(item.url)response.raise_for_status()base64_encoded=base64.b64encode(response.content).decode('utf-8')content.append(responses.ResponseInputFileParam(type='input_file',file_data=f'data:{item.media_type};base64,{base64_encoded}',))elifisinstance(item,DocumentUrl):# pragma: no coverclient=cached_async_http_client()response=awaitclient.get(item.url)response.raise_for_status()base64_encoded=base64.b64encode(response.content).decode('utf-8')content.append(responses.ResponseInputFileParam(type='input_file',file_data=f'data:{item.media_type};base64,{base64_encoded}',filename=f'filename.{item.format}',))elifisinstance(item,VideoUrl):# pragma: no coverraiseNotImplementedError('VideoUrl is not supported for OpenAI.')else:assert_never(item)returnresponses.EasyInputMessageParam(role='user',content=content)
Source code in pydantic_ai_slim/pydantic_ai/models/openai.py
484485486487488489490491492493494495496497498499
def__init__(self,model_name:OpenAIModelName,*,provider:Literal['openai','deepseek','azure']|Provider[AsyncOpenAI]='openai',):"""Initialize an OpenAI Responses model. Args: model_name: The name of the OpenAI model to use. provider: The provider to use. Defaults to `'openai'`. """self._model_name=model_nameifisinstance(provider,str):provider=infer_provider(provider)self.client=provider.client