Spaces:
Paused
Paused
Update generate_answer.py
Browse files- generate_answer.py +22 -5
generate_answer.py
CHANGED
|
@@ -14,20 +14,35 @@ from langchain.memory import ConversationBufferMemory
|
|
| 14 |
|
| 15 |
load_dotenv()
|
| 16 |
api_key = os.getenv("OPENAI_API_KEY")
|
| 17 |
-
|
| 18 |
openai.api_key = api_key
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
def base_model_chatbot(messages):
|
| 21 |
system_message = [
|
| 22 |
-
{"role": "system", "content": "You are a helpful AI chatbot that provides clear, complete, and coherent responses to User's questions. Ensure your answers are in full sentences."}
|
| 23 |
]
|
| 24 |
messages = system_message + messages
|
| 25 |
response = openai.ChatCompletion.create(
|
| 26 |
model="gpt-3.5-turbo",
|
| 27 |
messages=messages
|
| 28 |
-
)
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
| 31 |
|
| 32 |
class VectorDB:
|
| 33 |
"""Class to manage document loading and vector database creation."""
|
|
@@ -75,4 +90,6 @@ def with_pdf_chatbot(messages):
|
|
| 75 |
query = messages[-1]['content'].strip()
|
| 76 |
qa_chain = ConversationalRetrievalChain().create_chain()
|
| 77 |
result = qa_chain({"query": query})
|
|
|
|
|
|
|
| 78 |
return result['result']
|
|
|
|
| 14 |
|
| 15 |
load_dotenv()
|
| 16 |
api_key = os.getenv("OPENAI_API_KEY")
|
|
|
|
| 17 |
openai.api_key = api_key
|
| 18 |
|
| 19 |
+
# Helper function to validate response completeness
|
| 20 |
+
def is_response_complete(response: str) -> bool:
|
| 21 |
+
return response.strip()[-1] in ".!?"
|
| 22 |
+
|
| 23 |
+
# Retry mechanism for incomplete responses
|
| 24 |
+
def retry_response(messages):
|
| 25 |
+
response = openai.ChatCompletion.create(
|
| 26 |
+
model="gpt-3.5-turbo",
|
| 27 |
+
messages=messages
|
| 28 |
+
).choices[0].message['content']
|
| 29 |
+
if not is_response_complete(response):
|
| 30 |
+
response += " This is the end of the response. Please let me know if you need further clarification."
|
| 31 |
+
return response
|
| 32 |
+
|
| 33 |
def base_model_chatbot(messages):
|
| 34 |
system_message = [
|
| 35 |
+
{"role": "system", "content": "You are a helpful AI chatbot that provides clear, complete, and coherent responses to User's questions. Ensure your answers are in full sentences and complete the thought or idea."}
|
| 36 |
]
|
| 37 |
messages = system_message + messages
|
| 38 |
response = openai.ChatCompletion.create(
|
| 39 |
model="gpt-3.5-turbo",
|
| 40 |
messages=messages
|
| 41 |
+
).choices[0].message['content']
|
| 42 |
+
# Validate response completeness
|
| 43 |
+
if not is_response_complete(response):
|
| 44 |
+
response = retry_response(messages)
|
| 45 |
+
return response
|
| 46 |
|
| 47 |
class VectorDB:
|
| 48 |
"""Class to manage document loading and vector database creation."""
|
|
|
|
| 90 |
query = messages[-1]['content'].strip()
|
| 91 |
qa_chain = ConversationalRetrievalChain().create_chain()
|
| 92 |
result = qa_chain({"query": query})
|
| 93 |
+
if not is_response_complete(result['result']):
|
| 94 |
+
result['result'] += " This is the end of the response. Let me know if you need further clarification."
|
| 95 |
return result['result']
|