
    hhȎ                        d Z ddlZddlZddlZddlmZ ddlmZmZm	Z	m
Z
mZmZmZmZmZmZmZ ddlmZmZ ddlmZ ddlmZmZmZ ddlmZmZmZ dd	lm Z m!Z! dd
l"m#Z#m$Z$m%Z% ddl&m'Z'm(Z(m)Z) ddl*m+Z+ ddl,m-Z- ddl.m/Z/m0Z0m1Z1 ddl2m3Z3m4Z4  ej5        e6          Z7dZ8dede9fdZ: G d dee4          Z; G d de;          Z< G d de;          Z=dS )z:Chat model for OCI data science model deployment endpoint.    N)
itemgetter)AnyAsyncIteratorCallableDictIteratorListLiteralOptionalSequenceTypeUnion)AsyncCallbackManagerForLLMRunCallbackManagerForLLMRun)LanguageModelInput)BaseChatModelagenerate_from_streamgenerate_from_stream)AIMessageChunkBaseMessageBaseMessageChunk)JsonOutputParserPydanticOutputParser)ChatGenerationChatGenerationChunk
ChatResult)RunnableRunnableMapRunnablePassthrough)BaseToolconvert_to_openai_tool)	BaseModelFieldmodel_validator)DEFAULT_MODEL_NAMEBaseOCIModelDeploymentz/v1/chat/completionsobjreturnc                 V    t          | t                    ot          | t                    S N)
isinstancetype
issubclassr#   )r(   s    m/var/www/FlaskApp/flask-venv/lib/python3.11/site-packages/langchain_community/chat_models/oci_data_science.py_is_pydantic_classr0   5   s!    c4  ?ZY%?%??    c                   J    e Zd ZU dZ ee          Zeee	f         e
d<   	 eZee
d<   	 dZeee                  e
d<   	  ed          ed	e	d
e	fd                        Zed
efd            Zed
eee	f         fd            Zed
eee	f         fd            Z	 d+dee         dee         d
ef fdZ	 	 d,dee         deee                  dee         de	d
ef
dZ	 	 d,dee         deee                  dee         de	d
ee         f
dZ	 	 d,dee         deee                  dee          de	d
ef
dZ!	 	 d,dee         deee                  dee          de	d
e"e         f
dZ#	 d-ddddee$ee%e&         f                  de'd         dede	d
e(e)e$ee&f         f         f
dZ*deee                  de	d
efd Z+e,fd!ed"e%e-         d
efd#Z.de/d$ed
efd%Z0e,fd&ed"e%e-         d
efd'Z1d&ed
efd(Z2d)e3e$eee	f         e%e&         e4e5f                  de	d
e(e)ef         f fd*Z6 xZ7S ).ChatOCIModelDeploymentuC  OCI Data Science Model Deployment chat model integration.

    Prerequisite
        The OCI Model Deployment plugins are installable only on
        python version 3.9 and above. If you're working inside the notebook,
        try installing the python 3.10 based conda pack and running the
        following setup.


    Setup:
        Install ``oracle-ads`` and ``langchain-openai``.

        .. code-block:: bash

            pip install -U oracle-ads langchain-openai

        Use `ads.set_auth()` to configure authentication.
        For example, to use OCI resource_principal for authentication:

        .. code-block:: python

            import ads
            ads.set_auth("resource_principal")

        For more details on authentication, see:
        https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html

        Make sure to have the required policies to access the OCI Data
        Science Model Deployment endpoint. See:
        https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm


    Key init args - completion params:
        endpoint: str
            The OCI model deployment endpoint.
        temperature: float
            Sampling temperature.
        max_tokens: Optional[int]
            Max number of tokens to generate.

    Key init args — client params:
        auth: dict
            ADS auth dictionary for OCI authentication.
        default_headers: Optional[Dict]
            The headers to be added to the Model Deployment request.

    Instantiate:
        .. code-block:: python

            from langchain_community.chat_models import ChatOCIModelDeployment

            chat = ChatOCIModelDeployment(
                endpoint="https://modeldeployment.<region>.oci.customer-oci.com/<ocid>/predict",
                model="odsc-llm", # this is the default model name if deployed with AQUA
                streaming=True,
                max_retries=3,
                model_kwargs={
                    "max_token": 512,
                    "temperature": 0.2,
                    # other model parameters ...
                },
                default_headers={
                    "route": "/v1/chat/completions",
                    # other request headers ...
                },
            )

    Invocation:
        .. code-block:: python

            messages = [
                ("system", "Translate the user sentence to French."),
                ("human", "Hello World!"),
            ]
            chat.invoke(messages)

        .. code-block:: python

            AIMessage(
                content='Bonjour le monde!',
                response_metadata={
                    'token_usage': {
                        'prompt_tokens': 40,
                        'total_tokens': 50,
                        'completion_tokens': 10
                    },
                    'model_name': 'odsc-llm',
                    'system_fingerprint': '',
                    'finish_reason': 'stop'
                },
                id='run-cbed62da-e1b3-4abd-9df3-ec89d69ca012-0'
            )

    Streaming:
        .. code-block:: python

            for chunk in chat.stream(messages):
                print(chunk)

        .. code-block:: python

            content='' id='run-02c6-c43f-42de'
            content='
' id='run-02c6-c43f-42de'
            content='B' id='run-02c6-c43f-42de'
            content='on' id='run-02c6-c43f-42de'
            content='j' id='run-02c6-c43f-42de'
            content='our' id='run-02c6-c43f-42de'
            content=' le' id='run-02c6-c43f-42de'
            content=' monde' id='run-02c6-c43f-42de'
            content='!' id='run-02c6-c43f-42de'
            content='' response_metadata={'finish_reason': 'stop'} id='run-02c6-c43f-42de'

    Async:
        .. code-block:: python

            await chat.ainvoke(messages)

            # stream:
            # async for chunk in (await chat.astream(messages))

        .. code-block:: python

            AIMessage(
                content='Bonjour le monde!',
                response_metadata={'finish_reason': 'stop'},
                id='run-8657a105-96b7-4bb6-b98e-b69ca420e5d1-0'
            )

    Structured output:
        .. code-block:: python

            from typing import Optional
            from pydantic import BaseModel, Field

            class Joke(BaseModel):
                setup: str = Field(description="The setup of the joke")
                punchline: str = Field(description="The punchline to the joke")

            structured_llm = chat.with_structured_output(Joke, method="json_mode")
            structured_llm.invoke(
                "Tell me a joke about cats, "
                "respond in JSON with `setup` and `punchline` keys"
            )

        .. code-block:: python

            Joke(
                setup='Why did the cat get stuck in the tree?',
                punchline='Because it was chasing its tail!'
            )

        See ``ChatOCIModelDeployment.with_structured_output()`` for more.

    Customized Usage:
        You can inherit from base class and overwrite the `_process_response`,
        `_process_stream_response`, `_construct_json_body` for customized usage.

        .. code-block:: python

            class MyChatModel(ChatOCIModelDeployment):
                def _process_stream_response(self, response_json: dict) -> ChatGenerationChunk:
                    print("My customized streaming result handler.")
                    return GenerationChunk(...)

                def _process_response(self, response_json:dict) -> ChatResult:
                    print("My customized output handler.")
                    return ChatResult(...)

                def _construct_json_body(self, messages: list, params: dict) -> dict:
                    print("My customized payload handler.")
                    return {
                        "messages": messages,
                        **params,
                    }

            chat = MyChatModel(
                endpoint=f"https://modeldeployment.<region>.oci.customer-oci.com/{ocid}/predict",
                model="odsc-llm",
            }

            chat.invoke("tell me a joke")

    Response metadata
        .. code-block:: python

            ai_msg = chat.invoke(messages)
            ai_msg.response_metadata

        .. code-block:: python

            {
                'token_usage': {
                    'prompt_tokens': 40,
                    'total_tokens': 50,
                    'completion_tokens': 10
                },
                'model_name': 'odsc-llm',
                'system_fingerprint': '',
                'finish_reason': 'stop'
            }

    )default_factorymodel_kwargsmodelNstopbefore)modevaluesr)   c                 b    t           j                            d          st          d          |S )z(Checks if langchain_openai is installed.langchain_openaizaCould not import langchain_openai package. Please install it with `pip install langchain_openai`.)	importlibutil	find_specImportError)clsr:   s     r/   validate_openaiz&ChatOCIModelDeployment.validate_openai  s<     ~''(:;; 	I   r1   c                     dS )Return type of llm."oci_model_depolyment_chat_endpoint selfs    r/   	_llm_typez ChatOCIModelDeployment._llm_type  s
     43r1   c                 :    | j         pi }i | j        |d| j        S )zGet the identifying parameters.)endpointr5   )r5   rK   _default_params)rH   _model_kwargss     r/   _identifying_paramsz*ChatOCIModelDeployment._identifying_params  s8     )/R
=-HH
"
 	
r1   c                 ,    | j         | j        | j        dS zGet the default parameters.)r6   r7   stream)r6   r7   	streamingrG   s    r/   rL   z&ChatOCIModelDeployment._default_params(  s"     ZIn
 
 	
r1   Fis_asyncbodyc                 \    dt           it                                          ||          S )a  Construct and return the headers for a request.

        Args:
            is_async (bool, optional): Indicates if the request is asynchronous.
                Defaults to `False`.
            body (optional): The request body to be included in the headers if
                the request is asynchronous.

        Returns:
            Dict: A dictionary containing the appropriate headers for the request.
        route)rS   rT   )DEFAULT_INFERENCE_ENDPOINT_CHATsuper_headers)rH   rS   rT   	__class__s      r/   rY   zChatOCIModelDeployment._headers1  s4     4
ggt<<
 	
r1   messagesrun_managerkwargsc                 2   | j         r  | j        |f||d|}t          |          S |                    di           } | j        |fi |}|                     ||          } | j        d||d|}	|                     |	                                          S )a  Call out to an OCI Model Deployment Online endpoint.

        Args:
            messages:  The messages in the conversation with the chat model.
            stop: Optional list of stop words to use when generating.

        Returns:
            LangChain ChatResult

        Raises:
            RuntimeError:
                Raise when invoking endpoint fails.

        Example:

            .. code-block:: python

                messages = [
                    (
                        "system",
                        "You are a helpful assistant that translates English to French. Translate the user sentence.",
                    ),
                    ("human", "Hello World!"),
                ]

                response = chat.invoke(messages)
        r7   r\   requests_kwargsdatar\   rF   )	rR   _streamr   pop_invocation_params_construct_json_bodycompletion_with_retry_process_responsejson)
rH   r[   r7   r\   r]   stream_iterr`   paramsrT   ress
             r/   	_generatez ChatOCIModelDeployment._generateD  s    D > 	5&$,# @F K (444 **%6;;((8888((6::(d( 
;
 
2A
 
 %%chhjj111r1   c              +     K   |                     di           }d| _         | j        |fi |}|                     ||          } | j        d||dd|}t
          }	|                     |                                          D ]:}
|                     |
|	          }|r|	                    |j
        |           |V  ;dS )a  Stream OCI Data Science Model Deployment endpoint on given messages.

        Args:
            messages (List[BaseMessage]):
                The messagaes to pass into the model.
            stop (List[str], Optional):
                List of stop words to use when generating.
            kwargs:
                requests_kwargs:
                    Additional ``**kwargs`` to pass to requests.post

        Returns:
            An iterator of ChatGenerationChunk.

        Raises:
            RuntimeError:
                Raise when invoking endpoint fails.

        Example:

            .. code-block:: python

                messages = [
                    (
                        "system",
                        "You are a helpful assistant that translates English to French. Translate the user sentence.",
                    ),
                    ("human", "Hello World!"),
                ]

                chunk_iter = chat.stream(messages)

        r`   Trb   r\   rQ   chunkNrF   )rd   rR   re   rf   rg   r   _parse_stream
iter_lines_handle_sse_lineon_llm_new_tokentext)rH   r[   r7   r\   r]   r`   rk   rT   responsedefault_chunk_classlinerq   s               r/   rc   zChatOCIModelDeployment._streamt  s      P !**%6;;((8888((6::-4- 
;t
 
?N
 
 -&&x':':'<'<== 	 	D))$0CDDE F,,UZu,EEEKKKK		 	r1   c                 *  K   | j         r& | j        |f||d|}t          |           d{V S |                    di           } | j        |fi |}|                     ||          } | j        d||d| d{V }	|                     |	          S )a  Asynchronously call out to OCI Data Science Model Deployment
        endpoint on given messages.

        Args:
            messages (List[BaseMessage]):
                The messagaes to pass into the model.
            stop (List[str], Optional):
                List of stop words to use when generating.
            kwargs:
                requests_kwargs:
                    Additional ``**kwargs`` to pass to requests.post

        Returns:
            LangChain ChatResult.

        Raises:
            ValueError:
                Raise when invoking endpoint fails.

        Example:

            .. code-block:: python

                messages = [
                    (
                        "system",
                        "You are a helpful assistant that translates English to French. Translate the user sentence.",
                    ),
                    ("human", "I love programming."),
                ]

                resp = await chat.ainvoke(messages)

        r_   Nr`   ra   rF   )rR   _astreamr   rd   re   rf   acompletion_with_retryrh   )
rH   r[   r7   r\   r]   rj   r`   rk   rT   rw   s
             r/   
_ageneratez!ChatOCIModelDeployment._agenerate  s     R > 	<'$-# @F K /{;;;;;;;;; **%6;;((8888((6::44 
#
 
 
 
 
 
 
 
 
 

 %%h///r1   c                X  K   |                     di           }d| _         | j        |fi |}|                     ||          }t          } | j        d||dd| d{V 2 3 d{V }	|                     |	|          }
|r"|                    |
j        |
           d{V  |
W V  G6 dS )a  Asynchronously streaming OCI Data Science Model Deployment
        endpoint on given messages.

        Args:
            messages (List[BaseMessage]):
                The messagaes to pass into the model.
            stop (List[str], Optional):
                List of stop words to use when generating.
            kwargs:
                requests_kwargs:
                    Additional ``**kwargs`` to pass to requests.post

        Returns:
            An Asynciterator of ChatGenerationChunk.

        Raises:
            ValueError:
                Raise when invoking endpoint fails.

        Example:

            .. code-block:: python

                messages = [
                    (
                        "system",
                        "You are a helpful assistant that translates English to French. Translate the user sentence.",
                    ),
                    ("human", "I love programming."),
                ]

                chunk_iter = await chat.astream(messages)

        r`   Tro   Nrp   rF   )	rd   rR   re   rf   r   r|   rt   ru   rv   )rH   r[   r7   r\   r]   r`   rk   rT   rx   ry   rq   s              r/   r{   zChatOCIModelDeployment._astream  s1     R !**%6;;((8888((6::, ; ; !
;t!
 !
?N!
 !
 
 
 
 
 
 
 	 	 	 	 	 	 	$ ))$0CDDE L!225:U2KKKKKKKKKKKKKK
 
 
s   "B)	json_mode)methodinclude_rawschemar   r   c                   |rt          d|           t          |          }|dk    r9|                     ddi          }|rt          |          nt	                      }nt          d| d          |rht          j        t          d	          |z  d
           }t          j        d           }	|                    |	gd          }
t          |          |
z  S ||z  S )a   Model wrapper that returns outputs formatted to match the given schema.

        Args:
            schema: The output schema as a dict or a Pydantic class. If a Pydantic class
                then the model output will be an object of that class. If a dict then
                the model output will be a dict. With a Pydantic class the returned
                attributes will be validated, whereas with a dict they will not be. If
                `method` is "function_calling" and `schema` is a dict, then the dict
                must match the OpenAI function-calling spec.
            method: The method for steering model generation, currently only support
                for "json_mode". If "json_mode" then JSON mode will be used. Note that
                if using "json_mode" then you must include instructions for formatting
                the output into the desired schema into the model call.
            include_raw: If False then only the parsed structured output is returned. If
                an error occurs during model output parsing it will be raised. If True
                then both the raw model response (a BaseMessage) and the parsed model
                response will be returned. If an error occurs during output parsing it
                will be caught and returned as well. The final output is always a dict
                with keys "raw", "parsed", and "parsing_error".

        Returns:
            A Runnable that takes any ChatModel input and returns as output:

                If include_raw is True then a dict with keys:
                    raw: BaseMessage
                    parsed: Optional[_DictOrPydantic]
                    parsing_error: Optional[BaseException]

                If include_raw is False then just _DictOrPydantic is returned,
                where _DictOrPydantic depends on the schema:

                If schema is a Pydantic class then _DictOrPydantic is the Pydantic
                    class.

                If schema is a dict then _DictOrPydantic is a dict.

        zReceived unsupported arguments r   r-   json_object)response_format)pydantic_objectz>Unrecognized method argument. Expected `json_mode`.Received: `z`.rawc                     d S r+   rF   _s    r/   <lambda>z?ChatOCIModelDeployment.with_structured_output.<locals>.<lambda>Z  s    RV r1   )parsedparsing_errorc                     d S r+   rF   r   s    r/   r   z?ChatOCIModelDeployment.with_structured_output.<locals>.<lambda>\  s    d r1   )r   r   )exception_key)r   )

ValueErrorr0   bindr   r   r   assignr   with_fallbacksr   )rH   r   r   r   r]   is_pydantic_schemallmoutput_parserparser_assignparser_noneparser_with_fallbacks              r/   with_structured_outputz-ChatOCIModelDeployment.with_structured_output  s?   Z  	IGvGGHHH/77[  ))V],C)DDC &($V<<<<%'' M )$) ) )  
  
	'/6!%((=8  M .4NNKKKK#0#?#?_ $@ $ $  3'''*>>>&&r1   c                 h    | j         }| j        pi }|p|                    dg           |d<   i |||S )z;Combines the invocation parameters with default parameters.r7   )rL   r5   get)rH   r7   r]   rk   rM   s        r/   re   z)ChatOCIModelDeployment._invocation_paramsd  sH    %)/R7FB!7!7v4&4M4V44r1   ry   default_chunk_clsc           	         	 t          j        |          }|                     ||          S # t          $ rU}t                              d| dt          |                      t          t          d                    cY d}~S d}~ww xY w)a  Handle a single Server-Sent Events (SSE) line and process it into
        a chat generation chunk.

        Args:
            line (str): A single line from the SSE stream in string format.
            default_chunk_cls (AIMessageChunk): The default class for message
                chunks to be used during the processing of the stream response.

        Returns:
            ChatGenerationChunk: The processed chat generation chunk. If an error
                occurs, an empty `ChatGenerationChunk` is returned.
        z"Error occurs when processing line=z:  )content)messageN)	ri   loads_process_stream_response	Exceptionloggerdebugstrr   r   )rH   ry   r   r(   es        r/   rt   z'ChatOCIModelDeployment._handle_sse_linek  s    	K*T""C006GHHH 	K 	K 	KLLNdNNc!ffNNOOO&~b/I/I/IJJJJJJJJJ	Ks   ), 
BA
B BBrk   c                 4    ddl m dfd|D             i|S )a  Constructs the request body as a dictionary (JSON).

        Args:
            messages (list): A list of message objects to be included in the
                request body.
            params (dict): A dictionary of additional parameters to be included
                in the request body.

        Returns:
            dict: A dictionary representing the JSON request body, including
                converted messages and additional parameters.

        r   )_convert_message_to_dictr[   c                 &    g | ]} |          S rF   rF   ).0mr   s     r/   
<listcomp>z?ChatOCIModelDeployment._construct_json_body.<locals>.<listcomp>  s%    GGG11!44GGGr1   )!langchain_openai.chat_models.baser   )rH   r[   rk   r   s      @r/   rf   z+ChatOCIModelDeployment._construct_json_body  sF     	ONNNNN GGGGhGGG

 	
r1   response_jsonc                    ddl m} 	 |d         d         }t          |t                    st	          d          n/# t
          t          t          f$ r}t          d          |d}~ww xY w ||d         |          }|j        }|	                    d          }|	                    d	          }i }	||	
                    d|i           ||	
                    d	|i           t          ||	r|	nd
          S )a  Formats streaming response in OpenAI spec.

        Args:
            response_json (dict): The JSON response from the streaming endpoint.
            default_chunk_cls (type, optional): The default class to use for
                creating message chunks. Defaults to `AIMessageChunk`.

        Returns:
            ChatGenerationChunk: An object containing the processed message
                chunk and any relevant generation information such as finish
                reason and usage.

        Raises:
            ValueError: If the response JSON is not well-formed or does not
                contain the expected structure.
        r   )_convert_delta_to_message_chunkchoices%Endpoint response is not well formed.>Error while formatting response payload for chat model of typeNdeltafinish_reasonusager   generation_info)r   r   r,   dict	TypeErrorKeyError
IndexErrorr   rZ   r   updater   )
rH   r   r   r   choicer   rq   r   r   gen_infos
             r/   r   z/ChatOCIModelDeployment._process_stream_response  s8   * 	VUUUUU	"9-a0Ffd++ I GHHHI*i0 	 	 	P 	
 0/wARSS!O

?33

7##$OO_m<===OOWe,---"x+I88T
 
 
 	
s   2; A'A""A'c                    ddl m} g }	 |d         }t          |t                    st	          d          n)# t
          t          f$ r}t          d          |d}~ww xY w|D ]_} ||d                   }d|                    d          i}d	|v r|d	         |d	<   t          ||
          }	|	                    |	           `|                    di           }
|
| j
        |                    dd          d}t          ||          S )a  Formats response in OpenAI spec.

        Args:
            response_json (dict): The JSON response from the chat model endpoint.

        Returns:
            ChatResult: An object containing the list of `ChatGeneration` objects
            and additional LLM output information.

        Raises:
            ValueError: If the response JSON is not well-formed or does not
            contain the expected structure.

        r   )_convert_dict_to_messager   r   r   Nr   r   logprobsr   r   system_fingerprintr   )token_usage
model_namer   )generations
llm_output)r   r   r,   listr   r   r   r   r   appendr6   r   )rH   r   r   r   r   r   r   r   r   genr   r   s               r/   rh   z(ChatOCIModelDeployment._process_response  sj    	ONNNNN	#I.Ggt,, I GHHHI)$ 	 	 	P 	
  
	$ 
	$F..vi/@AAG.

?0K0KLOV##.4Z.@
+  /  C s#####''44&*"/"3"34H""M"M
 


 kjIIIIs   ,7 AAAtoolsc                 R    d |D             } t                      j        dd|i|S )Nc                 ,    g | ]}t          |          S rF   r!   )r   tools     r/   r   z5ChatOCIModelDeployment.bind_tools.<locals>.<listcomp>  s!    JJJD1$77JJJr1   r   rF   )rX   r   )rH   r   r]   formatted_toolsrZ   s       r/   
bind_toolsz!ChatOCIModelDeployment.bind_tools  s:    
 KJEJJJuww|<</<V<<<r1   )FN)NNr+   )8__name__
__module____qualname____doc__r$   r   r5   r   r   r   __annotations__r&   r6   r7   r   r	   r%   classmethodrB   propertyrI   rN   rL   boolrY   r   r   r   rm   r   r   rc   r   r}   r   r{   r   r   r#   r
   r   r   r   re   r   r   rt   r   rf   r   rh   r   r   r    r   __classcell__)rZ   s   @r/   r3   r3   9   sK        I IV $)5#>#>#>L$sCx.>>>1#E3###  $D(49
$$$; _(###S S    [ $# 43 4 4 4 X4 
T#s(^ 
 
 
 X
 
c3h 
 
 
 X
 HL
 
 
6>tn
	
 
 
 
 
 
, %):>	.2 .2{#.2 tCy!.2 67	.2
 .2 
.2 .2 .2 .2f %):>	5 5{#5 tCy!5 67	5
 5 
%	&5 5 5 5t %)?C	70 70{#70 tCy!70 ;<	70
 70 
70 70 70 70x %)?C	5 5{#5 tCy!5 ;<	5
 5 
*	+5 5 5 5r :>G' (3!G' G' G'tT)_456G' $	G'
 G' G' 
$eD)O&<<	=G' G' G' G'R5xS	': 5c 5d 5 5 5 5 FTK KK,01A,BK	K K K K,
T 
4 
D 
 
 
 
0 5C,
 ,
,
   01,
 
	,
 ,
 ,
 ,
\-Jt -J
 -J -J -J -J^=d38nd9oxQRS= = 
$k1	2	= = = = = = = = = =r1   r3   c                      e Zd ZU dZdZeed<   	 dZee	e
ef                  ed<   	 dZee         ed<   	 dZeed	<   	 dZeed
<   	 dZeed<   	 dZeed<   	 dZee         ed<   	 dZee         ed<   	 dZee         ed<   	 dZee         ed<   	 dZee         ed<   	 dZee         ed<   	 dZee         ed<   	 dZee         ed<   	 dZee         ed<   	 dZeee                  ed<   	 dZee         ed<   	 dZee         ed<   	 dZee
         ed<   	 dZ ee
         ed <   	 e!d!e
fd"            Z"e!d!e	e
e#f         fd#            Z$d!ee
         fd$Z%dS )%ChatOCIModelDeploymentVLLMaT  OCI large language chat models deployed with vLLM.

    To use, you must provide the model HTTP endpoint from your deployed
    model, e.g. https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict.

    To authenticate, `oracle-ads` has been used to automatically load
    credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html

    Make sure to have the required policies to access the OCI Data
    Science Model Deployment endpoint. See:
    https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint

    Example:

        .. code-block:: python

            from langchain_community.chat_models import ChatOCIModelDeploymentVLLM

            chat = ChatOCIModelDeploymentVLLM(
                endpoint="https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict",
                frequency_penalty=0.1,
                max_tokens=512,
                temperature=0.2,
                top_p=1.0,
                # other model parameters...
            )

    g        frequency_penaltyN
logit_bias   
max_tokens   npresence_penalty皙?temperatureg      ?top_pbest_ofFuse_beam_searchtop_kmin_prepetition_penaltylength_penaltyearly_stopping
ignore_eosr   
min_tokensstop_token_idsTskip_special_tokensspaces_between_special_tokenstool_choicechat_templater)   c                     dS )rD   'oci_model_depolyment_chat_endpoint_vllmrF   rG   s    r/   rI   z$ChatOCIModelDeploymentVLLM._llm_typep  s
     98r1   c                     | j         | j        | j        d}|                                 D ]<}	 t	          | |          }||                    ||i           -# t          $ r Y 9w xY w|S rP   r6   r7   rR   _get_model_paramsgetattrr   r   rH   rk   	attr_namevalues       r/   rL   z*ChatOCIModelDeploymentVLLM._default_paramsu       ZIn
 

 //11 	 	Ii00$MM9e"4555       )A
A$#A$c                 
    g dS )"Gets the name of model parameters.)r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rF   rG   s    r/   r   z,ChatOCIModelDeploymentVLLM._get_model_params  s    
 
 
 	
r1   )&r   r   r   r   r   floatr   r   r   r   r   r   intr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r	   r   r   r   r   r   rI   r   rL   r   rF   r1   r/   r   r     s         :  #u"""L-1Jc5j)*111D #J###EAsJJJD!e!!!5K+E5D!GXc]!!! ',OXd^+++9E8C=@ E8E?   , +.---I '*NHUO)))' &+NHTN***< "'J&&&# !"J!!!. +/NHT#Y'... +/$...I48!8D>888 "&K#%%% $(M8C=''' 93 9 9 9 X9 c3h    X"
49 
 
 
 
 
 
r1   r   c                   x   e Zd ZU dZdZee         ed<   	 dZee	e
ef                  ed<   	 dZee         ed<   	 dZeed<   	 dZeed	<   	 dZee         ed
<   	 dZee         ed<   	 dZeed<   	 dZee         ed<   	 dZee         ed<   	 ede
fd            Zede	e
ef         fd            Zdee
         fdZdS )ChatOCIModelDeploymentTGIad  OCI large language chat models deployed with Text Generation Inference.

    To use, you must provide the model HTTP endpoint from your deployed
    model, e.g. https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict.

    To authenticate, `oracle-ads` has been used to automatically load
    credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html

    Make sure to have the required policies to access the OCI Data
    Science Model Deployment endpoint. See:
    https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint

    Example:

        .. code-block:: python

            from langchain_community.chat_models import ChatOCIModelDeploymentTGI

            chat = ChatOCIModelDeploymentTGI(
                endpoint="https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict",
                max_token=512,
                temperature=0.2,
                frequency_penalty=0.1,
                seed=42,
                # other model parameters...
            )

    Nr   r   r   r   r   r   r   r   seedr   r   r   top_logprobsr)   c                     dS )rD   &oci_model_depolyment_chat_endpoint_tgirF   rG   s    r/   rI   z#ChatOCIModelDeploymentTGI._llm_type  s
     87r1   c                     | j         | j        | j        d}|                                 D ]<}	 t	          | |          }||                    ||i           -# t          $ r Y 9w xY w|S rP   r   r   s       r/   rL   z)ChatOCIModelDeploymentTGI._default_params  r   r  c                 
    g dS )r  )r   r   r   r   r   r   r  r   r   r   r	  rF   rG   s    r/   r   z+ChatOCIModelDeploymentTGI._get_model_params  s    
 
 
 	
r1   )r   r   r   r   r   r   r  r   r   r   r   r   r   r   r  r   r   r  r   r   r	  r   rI   r   rL   r	   r   rF   r1   r/   r  r    s|         : *.x---L-1Jc5j)*111D#Hhtn###JJEAsJJJD(,huo,,,5D(3-&K+!E8E?!!!D"&L(3-&&&
 83 8 8 8 X8 c3h    X"
49 
 
 
 
 
 
r1   r  )>r   r=   ri   loggingoperatorr   typingr   r   r   r   r   r	   r
   r   r   r   r   langchain_core.callbacksr   r   langchain_core.language_modelsr   *langchain_core.language_models.chat_modelsr   r   r   langchain_core.messagesr   r   r   langchain_core.output_parsersr   r   langchain_core.outputsr   r   r   langchain_core.runnablesr   r   r   langchain_core.toolsr    %langchain_core.utils.function_callingr"   pydanticr#   r$   r%   Clangchain_community.llms.oci_data_science_model_deployment_endpointr&   r'   	getLoggerr   r   rW   r   r0   r3   r   r  rF   r1   r/   <module>r     s   A @                                             > = = = = =         
 R Q Q Q Q Q Q Q Q Q        S R R R R R R R R R O O O O O O O O O O ) ) ) ) ) ) H H H H H H 6 6 6 6 6 6 6 6 6 6       
 
	8	$	$"8 @C @D @ @ @ @@= @= @= @= @=],B @= @= @=Fd
 d
 d
 d
 d
!7 d
 d
 d
Nd
 d
 d
 d
 d
 6 d
 d
 d
 d
 d
r1   