Spaces:
Sleeping
Sleeping
Commit
·
9fc1785
1
Parent(s):
81f0a03
Refactored LLM
Browse files- api/llm.py +37 -31
- resources/data.py +1 -1
- resources/prompts.py +5 -2
- ui/coding.py +6 -5
api/llm.py
CHANGED
|
@@ -5,13 +5,30 @@ from openai import OpenAI
|
|
| 5 |
from utils.errors import APIError
|
| 6 |
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
class LLMManager:
|
| 9 |
def __init__(self, config, prompts):
|
| 10 |
self.config = config
|
| 11 |
self.client = OpenAI(base_url=config.llm.url, api_key=config.llm.key)
|
| 12 |
-
self.
|
| 13 |
-
self.is_demo = os.getenv("IS_DEMO")
|
| 14 |
-
self.demo_word_limit = os.getenv("DEMO_WORD_LIMIT")
|
| 15 |
|
| 16 |
self.status = self.test_llm()
|
| 17 |
if self.status:
|
|
@@ -86,46 +103,37 @@ class LLMManager:
|
|
| 86 |
except:
|
| 87 |
return False
|
| 88 |
|
| 89 |
-
def init_bot(self, problem=""):
|
| 90 |
-
system_prompt = self.
|
| 91 |
-
if self.is_demo:
|
| 92 |
-
system_prompt += f" Keep your responses very short and simple, no more than {self.demo_word_limit} words."
|
| 93 |
|
| 94 |
return [
|
| 95 |
{"role": "system", "content": system_prompt},
|
| 96 |
{"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
|
| 97 |
]
|
| 98 |
|
| 99 |
-
def get_problem_prepare_messages(self, requirements, difficulty, topic):
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
f"Additional requirements: {requirements}. "
|
| 103 |
-
"The problem should be clearly stated, well-formatted, and solvable within 30 minutes. "
|
| 104 |
-
"Ensure the problem varies each time to provide a wide range of challenges."
|
| 105 |
-
)
|
| 106 |
-
|
| 107 |
-
if self.is_demo:
|
| 108 |
-
full_prompt += f" Keep your response very short and simple, no more than {self.demo_word_limit} words."
|
| 109 |
|
| 110 |
messages = [
|
| 111 |
-
{"role": "system", "content":
|
| 112 |
{"role": "user", "content": full_prompt},
|
| 113 |
]
|
| 114 |
|
| 115 |
return messages
|
| 116 |
|
| 117 |
-
def get_problem_full(self, requirements, difficulty, topic):
|
| 118 |
-
messages = self.get_problem_prepare_messages(requirements, difficulty, topic)
|
| 119 |
return self.get_text(messages)
|
| 120 |
|
| 121 |
-
def get_problem_stream(self, requirements, difficulty, topic):
|
| 122 |
-
messages = self.get_problem_prepare_messages(requirements, difficulty, topic)
|
| 123 |
yield from self.get_text_stream(messages)
|
| 124 |
|
| 125 |
def update_chat_history(self, code, previous_code, chat_history, chat_display):
|
| 126 |
message = chat_display[-1][0]
|
| 127 |
if code != previous_code:
|
| 128 |
-
chat_history.append({"role": "user", "content": f"My latest
|
| 129 |
chat_history.append({"role": "user", "content": message})
|
| 130 |
|
| 131 |
return chat_history
|
|
@@ -152,12 +160,10 @@ class LLMManager:
|
|
| 152 |
|
| 153 |
yield chat_history, chat_display, code
|
| 154 |
|
| 155 |
-
def end_interview_prepare_messages(self, problem_description, chat_history):
|
| 156 |
transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
|
| 157 |
|
| 158 |
-
system_prompt = self.
|
| 159 |
-
if self.is_demo:
|
| 160 |
-
system_prompt += f" Keep your response very short and simple, no more than {self.demo_word_limit} words."
|
| 161 |
|
| 162 |
messages = [
|
| 163 |
{"role": "system", "content": system_prompt},
|
|
@@ -168,16 +174,16 @@ class LLMManager:
|
|
| 168 |
|
| 169 |
return messages
|
| 170 |
|
| 171 |
-
def end_interview_full(self, problem_description, chat_history):
|
| 172 |
if len(chat_history) <= 2:
|
| 173 |
return "No interview history available"
|
| 174 |
else:
|
| 175 |
-
messages = self.end_interview_prepare_messages(problem_description, chat_history)
|
| 176 |
return self.get_text_stream(messages)
|
| 177 |
|
| 178 |
-
def end_interview_stream(self, problem_description, chat_history):
|
| 179 |
if len(chat_history) <= 2:
|
| 180 |
yield "No interview history available"
|
| 181 |
else:
|
| 182 |
-
messages = self.end_interview_prepare_messages(problem_description, chat_history)
|
| 183 |
yield from self.get_text_stream(messages)
|
|
|
|
| 5 |
from utils.errors import APIError
|
| 6 |
|
| 7 |
|
| 8 |
+
class PromptManager:
|
| 9 |
+
def __init__(self, prompts):
|
| 10 |
+
self.prompts = prompts
|
| 11 |
+
self.limit = os.getenv("DEMO_WORD_LIMIT")
|
| 12 |
+
|
| 13 |
+
def add_limit(self, prompt):
|
| 14 |
+
if self.limit:
|
| 15 |
+
prompt += f" Keep your responses very short and simple, no more than {self.limit} words."
|
| 16 |
+
return prompt
|
| 17 |
+
|
| 18 |
+
def get_system_prompt(self, key):
|
| 19 |
+
prompt = self.prompts[key]
|
| 20 |
+
return self.add_limit(prompt)
|
| 21 |
+
|
| 22 |
+
def get_problem_requirements_prompt(self, type, difficulty=None, topic=None, requirements=None):
|
| 23 |
+
prompt = f"Create a {difficulty} {topic} {type} problem. " f"Additional requirements: {requirements}. "
|
| 24 |
+
return self.add_limit(prompt)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
class LLMManager:
|
| 28 |
def __init__(self, config, prompts):
|
| 29 |
self.config = config
|
| 30 |
self.client = OpenAI(base_url=config.llm.url, api_key=config.llm.key)
|
| 31 |
+
self.prompt_manager = PromptManager(prompts)
|
|
|
|
|
|
|
| 32 |
|
| 33 |
self.status = self.test_llm()
|
| 34 |
if self.status:
|
|
|
|
| 103 |
except:
|
| 104 |
return False
|
| 105 |
|
| 106 |
+
def init_bot(self, problem, interview_type="coding"):
|
| 107 |
+
system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_interviewer_prompt")
|
|
|
|
|
|
|
| 108 |
|
| 109 |
return [
|
| 110 |
{"role": "system", "content": system_prompt},
|
| 111 |
{"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
|
| 112 |
]
|
| 113 |
|
| 114 |
+
def get_problem_prepare_messages(self, requirements, difficulty, topic, interview_type):
|
| 115 |
+
system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_problem_generation_prompt")
|
| 116 |
+
full_prompt = self.prompt_manager.get_problem_requirements_prompt(interview_type, difficulty, topic, requirements)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
messages = [
|
| 119 |
+
{"role": "system", "content": system_prompt},
|
| 120 |
{"role": "user", "content": full_prompt},
|
| 121 |
]
|
| 122 |
|
| 123 |
return messages
|
| 124 |
|
| 125 |
+
def get_problem_full(self, requirements, difficulty, topic, interview_type="coding"):
|
| 126 |
+
messages = self.get_problem_prepare_messages(requirements, difficulty, topic, interview_type)
|
| 127 |
return self.get_text(messages)
|
| 128 |
|
| 129 |
+
def get_problem_stream(self, requirements, difficulty, topic, interview_type="coding"):
|
| 130 |
+
messages = self.get_problem_prepare_messages(requirements, difficulty, topic, interview_type)
|
| 131 |
yield from self.get_text_stream(messages)
|
| 132 |
|
| 133 |
def update_chat_history(self, code, previous_code, chat_history, chat_display):
|
| 134 |
message = chat_display[-1][0]
|
| 135 |
if code != previous_code:
|
| 136 |
+
chat_history.append({"role": "user", "content": f"My latest solution:\n{code}"})
|
| 137 |
chat_history.append({"role": "user", "content": message})
|
| 138 |
|
| 139 |
return chat_history
|
|
|
|
| 160 |
|
| 161 |
yield chat_history, chat_display, code
|
| 162 |
|
| 163 |
+
def end_interview_prepare_messages(self, problem_description, chat_history, interview_type):
|
| 164 |
transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
|
| 165 |
|
| 166 |
+
system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_grading_feedback_prompt")
|
|
|
|
|
|
|
| 167 |
|
| 168 |
messages = [
|
| 169 |
{"role": "system", "content": system_prompt},
|
|
|
|
| 174 |
|
| 175 |
return messages
|
| 176 |
|
| 177 |
+
def end_interview_full(self, problem_description, chat_history, interview_type="coding"):
|
| 178 |
if len(chat_history) <= 2:
|
| 179 |
return "No interview history available"
|
| 180 |
else:
|
| 181 |
+
messages = self.end_interview_prepare_messages(problem_description, chat_history, interview_type)
|
| 182 |
return self.get_text_stream(messages)
|
| 183 |
|
| 184 |
+
def end_interview_stream(self, problem_description, chat_history, interview_type="coding"):
|
| 185 |
if len(chat_history) <= 2:
|
| 186 |
yield "No interview history available"
|
| 187 |
else:
|
| 188 |
+
messages = self.end_interview_prepare_messages(problem_description, chat_history, interview_type)
|
| 189 |
yield from self.get_text_stream(messages)
|
resources/data.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
|
| 2 |
"Arrays",
|
| 3 |
"Strings",
|
| 4 |
"Linked Lists",
|
|
|
|
| 1 |
+
coding_topics_list = [
|
| 2 |
"Arrays",
|
| 3 |
"Strings",
|
| 4 |
"Linked Lists",
|
resources/prompts.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
| 1 |
prompts = {
|
| 2 |
-
"
|
| 3 |
"You are AI acting as a coding round interviewer for a big-tech company. "
|
| 4 |
"Generate a problem that tests the candidate's ability to solve real-world coding challenges efficiently. "
|
| 5 |
"Ensure the problem tests for problem-solving skills, technical proficiency, code quality, and handling of edge cases. "
|
|
|
|
|
|
|
| 6 |
),
|
| 7 |
"coding_interviewer_prompt": (
|
| 8 |
"As an AI acting as a coding interviewer for a major tech company, you are to maintain a professional and analytical demeanor. "
|
|
@@ -13,8 +15,9 @@ prompts = {
|
|
| 13 |
"Encourage the candidate to think about real-world applications and scalability of their solutions, asking how changes to the problem parameters might affect their approach. "
|
| 14 |
"Ask the candidate about the time and space complexity of the candidate's solutions after each significant problem-solving step. "
|
| 15 |
"Prompt the candidate to explain how they compute these complexities, and guide them through the process if necessary, without providing the answers directly. "
|
|
|
|
| 16 |
),
|
| 17 |
-
"
|
| 18 |
"You are the AI grader for a coding interview at a major tech firm. "
|
| 19 |
"The following is the interview transcript with the candidate's responses. "
|
| 20 |
"Ignore minor transcription errors unless they impact comprehension. "
|
|
|
|
| 1 |
prompts = {
|
| 2 |
+
"coding_problem_generation_prompt": (
|
| 3 |
"You are AI acting as a coding round interviewer for a big-tech company. "
|
| 4 |
"Generate a problem that tests the candidate's ability to solve real-world coding challenges efficiently. "
|
| 5 |
"Ensure the problem tests for problem-solving skills, technical proficiency, code quality, and handling of edge cases. "
|
| 6 |
+
"The problem should be clearly stated, well-formatted, and solvable within 30 minutes. "
|
| 7 |
+
"Ensure the problem varies each time to provide a wide range of challenges."
|
| 8 |
),
|
| 9 |
"coding_interviewer_prompt": (
|
| 10 |
"As an AI acting as a coding interviewer for a major tech company, you are to maintain a professional and analytical demeanor. "
|
|
|
|
| 15 |
"Encourage the candidate to think about real-world applications and scalability of their solutions, asking how changes to the problem parameters might affect their approach. "
|
| 16 |
"Ask the candidate about the time and space complexity of the candidate's solutions after each significant problem-solving step. "
|
| 17 |
"Prompt the candidate to explain how they compute these complexities, and guide them through the process if necessary, without providing the answers directly. "
|
| 18 |
+
"Keep your answers concise and clear, avoiding jargon or overly complex explanations. "
|
| 19 |
),
|
| 20 |
+
"coding_grading_feedback_prompt": (
|
| 21 |
"You are the AI grader for a coding interview at a major tech firm. "
|
| 22 |
"The following is the interview transcript with the candidate's responses. "
|
| 23 |
"Ignore minor transcription errors unless they impact comprehension. "
|
ui/coding.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
|
| 4 |
-
from resources.data import
|
| 5 |
from utils.ui import add_candidate_message, add_interviewer_message
|
| 6 |
|
| 7 |
|
|
@@ -10,6 +10,7 @@ def get_codding_ui(llm, tts, stt, default_audio_params, audio_output):
|
|
| 10 |
chat_history = gr.State([])
|
| 11 |
previous_code = gr.State("")
|
| 12 |
started_coding = gr.State(False)
|
|
|
|
| 13 |
with gr.Accordion("Settings") as init_acc:
|
| 14 |
with gr.Row():
|
| 15 |
with gr.Column():
|
|
@@ -26,7 +27,7 @@ def get_codding_ui(llm, tts, stt, default_audio_params, audio_output):
|
|
| 26 |
with gr.Row():
|
| 27 |
gr.Markdown("Topic (can type custom value)")
|
| 28 |
topic_select = gr.Dropdown(
|
| 29 |
-
label="Select topic", choices=
|
| 30 |
)
|
| 31 |
with gr.Column(scale=2):
|
| 32 |
requirements = gr.Textbox(label="Requirements", placeholder="Specify additional requirements", lines=5)
|
|
@@ -68,11 +69,11 @@ def get_codding_ui(llm, tts, stt, default_audio_params, audio_output):
|
|
| 68 |
fn=lambda: (gr.update(open=False), gr.update(interactive=False)), outputs=[init_acc, start_btn]
|
| 69 |
).success(
|
| 70 |
fn=llm.get_problem,
|
| 71 |
-
inputs=[requirements, difficulty_select, topic_select],
|
| 72 |
outputs=[description],
|
| 73 |
scroll_to_output=True,
|
| 74 |
).success(
|
| 75 |
-
fn=llm.init_bot, inputs=[description], outputs=[chat_history]
|
| 76 |
).success(
|
| 77 |
fn=lambda: (gr.update(open=True), gr.update(interactive=True), gr.update(interactive=True)),
|
| 78 |
outputs=[solution_acc, end_btn, audio_input],
|
|
@@ -86,7 +87,7 @@ def get_codding_ui(llm, tts, stt, default_audio_params, audio_output):
|
|
| 86 |
fn=lambda: (gr.update(open=False), gr.update(interactive=False), gr.update(open=False), gr.update(interactive=False)),
|
| 87 |
outputs=[solution_acc, end_btn, problem_acc, audio_input],
|
| 88 |
).success(
|
| 89 |
-
fn=llm.end_interview, inputs=[description, chat_history], outputs=[feedback]
|
| 90 |
)
|
| 91 |
|
| 92 |
send_btn.click(fn=add_candidate_message, inputs=[message, chat], outputs=[chat]).success(
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
|
| 4 |
+
from resources.data import coding_topics_list, fixed_messages
|
| 5 |
from utils.ui import add_candidate_message, add_interviewer_message
|
| 6 |
|
| 7 |
|
|
|
|
| 10 |
chat_history = gr.State([])
|
| 11 |
previous_code = gr.State("")
|
| 12 |
started_coding = gr.State(False)
|
| 13 |
+
interview_type = gr.State("coding")
|
| 14 |
with gr.Accordion("Settings") as init_acc:
|
| 15 |
with gr.Row():
|
| 16 |
with gr.Column():
|
|
|
|
| 27 |
with gr.Row():
|
| 28 |
gr.Markdown("Topic (can type custom value)")
|
| 29 |
topic_select = gr.Dropdown(
|
| 30 |
+
label="Select topic", choices=coding_topics_list, value="Arrays", container=False, allow_custom_value=True
|
| 31 |
)
|
| 32 |
with gr.Column(scale=2):
|
| 33 |
requirements = gr.Textbox(label="Requirements", placeholder="Specify additional requirements", lines=5)
|
|
|
|
| 69 |
fn=lambda: (gr.update(open=False), gr.update(interactive=False)), outputs=[init_acc, start_btn]
|
| 70 |
).success(
|
| 71 |
fn=llm.get_problem,
|
| 72 |
+
inputs=[requirements, difficulty_select, topic_select, interview_type],
|
| 73 |
outputs=[description],
|
| 74 |
scroll_to_output=True,
|
| 75 |
).success(
|
| 76 |
+
fn=llm.init_bot, inputs=[description, interview_type], outputs=[chat_history]
|
| 77 |
).success(
|
| 78 |
fn=lambda: (gr.update(open=True), gr.update(interactive=True), gr.update(interactive=True)),
|
| 79 |
outputs=[solution_acc, end_btn, audio_input],
|
|
|
|
| 87 |
fn=lambda: (gr.update(open=False), gr.update(interactive=False), gr.update(open=False), gr.update(interactive=False)),
|
| 88 |
outputs=[solution_acc, end_btn, problem_acc, audio_input],
|
| 89 |
).success(
|
| 90 |
+
fn=llm.end_interview, inputs=[description, chat_history, interview_type], outputs=[feedback]
|
| 91 |
)
|
| 92 |
|
| 93 |
send_btn.click(fn=add_candidate_message, inputs=[message, chat], outputs=[chat]).success(
|