o
    粪g                     @   sl   d dl mZ d dlmZ d dlmZ G dd deeeef  ZdZ	ee	g ddZ
d	Zeeg d
dZdS )    )Tuple)BaseOutputParser)PromptTemplatec                   @   s:   e Zd ZU dZdZeed< 	 dedeeef fddZ	dS )	FinishedOutputParserz4Output parser that checks if the output is finished.FINISHEDfinished_valuetextreturnc                 C   s$   |  }| j|v }|| jd|fS )N )stripr   replace)selfr   cleanedfinished r   Z/var/www/html/chatdoc2/venv/lib/python3.10/site-packages/langchain/chains/flare/prompts.pyparse   s   
zFinishedOutputParser.parseN)
__name__
__module____qualname____doc__r   str__annotations__r   boolr   r   r   r   r   r      s
   
 r   zRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.

>>> CONTEXT: {context}
>>> USER INPUT: {user_input}
>>> RESPONSE: {response})
user_inputcontextresponse)templateinput_variablesa&  Given a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:

>>> USER INPUT: {user_input}
>>> EXISTING PARTIAL RESPONSE: {current_response}

The question to which the answer is the term/entity/phrase "{uncertain_span}" is:)r   current_responseuncertain_spanN)typingr   langchain_core.output_parsersr   langchain_core.promptsr   r   r   r   PROMPT_TEMPLATEPROMPT"QUESTION_GENERATOR_PROMPT_TEMPLATEQUESTION_GENERATOR_PROMPTr   r   r   r   <module>   s    

