Spaces:
Sleeping
Sleeping
Commit
·
1f19f64
1
Parent(s):
3447ff0
Added streaming to feedback
Browse files- api/llm.py +32 -18
- app.py +6 -6
api/llm.py
CHANGED
|
@@ -91,8 +91,9 @@ class LLMManager:
|
|
| 91 |
return chat_history, chat_display, "", code
|
| 92 |
|
| 93 |
def end_interview(self, problem_description, chat_history):
|
|
|
|
| 94 |
if not chat_history or len(chat_history) <= 2:
|
| 95 |
-
|
| 96 |
|
| 97 |
transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
|
| 98 |
|
|
@@ -100,21 +101,34 @@ class LLMManager:
|
|
| 100 |
if self.is_demo:
|
| 101 |
system_prompt += f" Keep your response very short and simple, no more than {self.demo_word_limit} words."
|
| 102 |
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
{"role": "user", "content": "\n\n".join(transcript)},
|
| 110 |
-
{"role": "user", "content": "Grade the interview based on the transcript provided and give feedback."},
|
| 111 |
-
],
|
| 112 |
-
temperature=0.5,
|
| 113 |
-
)
|
| 114 |
-
if not response.choices:
|
| 115 |
-
raise APIError("LLM End Interview Error", details="No choices in response")
|
| 116 |
-
feedback = response.choices[0].message.content.strip()
|
| 117 |
-
except Exception as e:
|
| 118 |
-
raise APIError(f"LLM End Interview Error: Unexpected error: {e}")
|
| 119 |
|
| 120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
return chat_history, chat_display, "", code
|
| 92 |
|
| 93 |
def end_interview(self, problem_description, chat_history):
|
| 94 |
+
|
| 95 |
if not chat_history or len(chat_history) <= 2:
|
| 96 |
+
yield "No interview content available to review."
|
| 97 |
|
| 98 |
transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
|
| 99 |
|
|
|
|
| 101 |
if self.is_demo:
|
| 102 |
system_prompt += f" Keep your response very short and simple, no more than {self.demo_word_limit} words."
|
| 103 |
|
| 104 |
+
messages = [
|
| 105 |
+
{"role": "system", "content": system_prompt},
|
| 106 |
+
{"role": "user", "content": f"The original problem to solve: {problem_description}"},
|
| 107 |
+
{"role": "user", "content": "\n\n".join(transcript)},
|
| 108 |
+
{"role": "user", "content": "Grade the interview based on the transcript provided and give feedback."},
|
| 109 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
if os.getenv("STREAMING", False):
|
| 112 |
+
try:
|
| 113 |
+
response = self.client.chat.completions.create(
|
| 114 |
+
model=self.config.llm.name,
|
| 115 |
+
messages=messages,
|
| 116 |
+
temperature=0.5,
|
| 117 |
+
stream=True,
|
| 118 |
+
)
|
| 119 |
+
except Exception as e:
|
| 120 |
+
raise APIError(f"LLM End Interview Error: Unexpected error: {e}")
|
| 121 |
+
|
| 122 |
+
feedback = ""
|
| 123 |
+
for chunk in response:
|
| 124 |
+
if chunk.choices[0].delta.content:
|
| 125 |
+
feedback += chunk.choices[0].delta.content
|
| 126 |
+
yield feedback
|
| 127 |
+
# else:
|
| 128 |
+
# response = self.client.chat.completions.create(
|
| 129 |
+
# model=self.config.llm.name,
|
| 130 |
+
# messages=messages,
|
| 131 |
+
# temperature=0.5,
|
| 132 |
+
# )
|
| 133 |
+
# feedback = response.choices[0].message.content.strip()
|
| 134 |
+
# return feedback
|
app.py
CHANGED
|
@@ -139,16 +139,16 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
| 139 |
coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat, started_coding], outputs=[chat])
|
| 140 |
|
| 141 |
start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).then(
|
| 142 |
-
fn=lambda: True,
|
| 143 |
).then(
|
| 144 |
fn=llm.get_problem,
|
| 145 |
inputs=[requirements, difficulty_select, topic_select],
|
| 146 |
outputs=[description, chat_history],
|
| 147 |
scroll_to_output=True,
|
| 148 |
).then(
|
| 149 |
-
fn=hide_settings,
|
| 150 |
).then(
|
| 151 |
-
fn=show_solution,
|
| 152 |
)
|
| 153 |
|
| 154 |
end_btn.click(
|
|
@@ -156,11 +156,11 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
| 156 |
inputs=[chat],
|
| 157 |
outputs=[chat],
|
| 158 |
).then(
|
| 159 |
-
fn=
|
| 160 |
-
).then(fn=
|
| 161 |
|
| 162 |
audio_input.stop_recording(fn=stt.speech_to_text, inputs=[audio_input], outputs=[message]).then(
|
| 163 |
-
fn=lambda: None,
|
| 164 |
).then(fn=add_candidate_message, inputs=[message, chat], outputs=[chat]).then(
|
| 165 |
fn=llm.send_request,
|
| 166 |
inputs=[code, previous_code, message, chat_history, chat],
|
|
|
|
| 139 |
coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat, started_coding], outputs=[chat])
|
| 140 |
|
| 141 |
start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).then(
|
| 142 |
+
fn=lambda: True, outputs=[started_coding]
|
| 143 |
).then(
|
| 144 |
fn=llm.get_problem,
|
| 145 |
inputs=[requirements, difficulty_select, topic_select],
|
| 146 |
outputs=[description, chat_history],
|
| 147 |
scroll_to_output=True,
|
| 148 |
).then(
|
| 149 |
+
fn=hide_settings, outputs=[init_acc, start_btn]
|
| 150 |
).then(
|
| 151 |
+
fn=show_solution, outputs=[solution_acc, end_btn, audio_input]
|
| 152 |
)
|
| 153 |
|
| 154 |
end_btn.click(
|
|
|
|
| 156 |
inputs=[chat],
|
| 157 |
outputs=[chat],
|
| 158 |
).then(
|
| 159 |
+
fn=hide_solution, outputs=[solution_acc, end_btn, problem_acc, audio_input]
|
| 160 |
+
).then(fn=llm.end_interview, inputs=[description, chat_history], outputs=[feedback])
|
| 161 |
|
| 162 |
audio_input.stop_recording(fn=stt.speech_to_text, inputs=[audio_input], outputs=[message]).then(
|
| 163 |
+
fn=lambda: None, outputs=[audio_input]
|
| 164 |
).then(fn=add_candidate_message, inputs=[message, chat], outputs=[chat]).then(
|
| 165 |
fn=llm.send_request,
|
| 166 |
inputs=[code, previous_code, message, chat_history, chat],
|