
    ghO                        d dl mZ d dlZd dlmZmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZ d dlmZmZ d d	lmZ d d
lmZ d dlmZ d dlmZ  eddd           G d de                      ZdS )    )annotationsN)AnyOptional)
deprecated)CallbackManagerForChainRun)BaseLanguageModel)BasePromptTemplate)RecursiveCharacterTextSplitterTextSplitter)Field)Chain)LLMChain)PROMPT_SELECTORz0.2.7zexample in API reference with more detail: https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.htmlz1.0)sincealternativeremovalc                      e Zd ZU dZded<   	  e ed                    Zded<   	 d	Zd
ed<   	 dZ	d
ed<   	 dZ
ded<   	 e	 d#d$d            Zed%d            Zed&d            Zed&d            Z	 d#d'd"ZdS )(QAGenerationChaina  Base class for question-answer generation chains.

    This class is deprecated. See below for an alternative implementation.

    Advantages of this implementation include:

    - Supports async and streaming;
    - Surfaces prompt and text splitter for easier customization;
    - Use of JsonOutputParser supports JSONPatch operations in streaming mode,
      as well as robustness to markdown.

        .. code-block:: python

            from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
            # Note: import PROMPT if using a legacy non-chat model.
            from langchain_core.output_parsers import JsonOutputParser
            from langchain_core.runnables import (
                RunnableLambda,
                RunnableParallel,
                RunnablePassthrough,
            )
            from langchain_core.runnables.base import RunnableEach
            from langchain_openai import ChatOpenAI
            from langchain_text_splitters import RecursiveCharacterTextSplitter

            llm = ChatOpenAI()
            text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
            split_text = RunnableLambda(
                lambda x: text_splitter.create_documents([x])
            )

            chain = RunnableParallel(
                text=RunnablePassthrough(),
                questions=(
                    split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
                )
            )
    r   	llm_chaini  )chunk_overlap)defaultr   text_splittertextstr	input_key	questions
output_keyNzOptional[int]kllmr   promptOptional[BasePromptTemplate]kwargsr   returnc                d    |pt          j        |          }t          ||          } | dd|i|S )z
        Create a QAGenerationChain from a language model.

        Args:
            llm: a language model
            prompt: a prompt template
            **kwargs: additional arguments

        Returns:
            a QAGenerationChain class
        )r   r    r    )r   
get_promptr   )clsr   r    r"   _promptchains         `/var/www/FlaskApp/flask-venv/lib/python3.11/site-packages/langchain/chains/qa_generation/base.pyfrom_llmzQAGenerationChain.from_llmO   sF    $ ;O6s;;S111s--U-f---    c                    t           N)NotImplementedErrorselfs    r*   _chain_typezQAGenerationChain._chain_typee   s    !!r,   	list[str]c                    | j         gS r.   )r   r0   s    r*   
input_keyszQAGenerationChain.input_keysi   s    r,   c                    | j         gS r.   )r   r0   s    r*   output_keyszQAGenerationChain.output_keysm   s      r,   inputsdict[str, Any]run_manager$Optional[CallbackManagerForChainRun]dict[str, list]c                    | j                             || j                 g          }| j                            d |D             |          }d |j        D             }| j        |iS )Nc                     g | ]}d |j         iS )r   )page_content).0ds     r*   
<listcomp>z+QAGenerationChain._call.<locals>.<listcomp>x   s    444!fan%444r,   )r:   c                L    g | ]!}t          j        |d          j                  "S )r   )jsonloadsr   )r@   ress     r*   rB   z+QAGenerationChain._call.<locals>.<listcomp>z   s(    EEE#djQ%%EEEr,   )r   create_documentsr   r   generategenerationsr   )r1   r8   r:   docsresultsqas         r*   _callzQAGenerationChain._callq   sy    
 !22F4>4J3KLL.))44t444+ * 
 
 FE1DEEE$$r,   r.   )r   r   r    r!   r"   r   r#   r   )r#   r   )r#   r3   )r8   r9   r:   r;   r#   r<   )__name__
__module____qualname____doc____annotations__r   r
   r   r   r   r   classmethodr+   propertyr2   r5   r7   rM   r%   r,   r*   r   r      sU        % %N I"'%..SAAA# # #M     ;I(!J!!!!)A* 04. . . . [.* " " " X"       X  ! ! ! X! =A
% 
% 
% 
% 
% 
% 
%r,   r   )
__future__r   rD   typingr   r   langchain_core._apir   langchain_core.callbacksr   langchain_core.language_modelsr   langchain_core.promptsr	   langchain_text_splittersr
   r   pydanticr   langchain.chains.baser   langchain.chains.llmr   %langchain.chains.qa_generation.promptr   r   r%   r,   r*   <module>r`      sL   " " " " " "                  * * * * * * ? ? ? ? ? ? < < < < < < 5 5 5 5 5 5 Q Q Q Q Q Q Q Q       ' ' ' ' ' ' ) ) ) ) ) ) A A A A A A 
	w   a% a% a% a% a% a% a% a% a% a%r,   