peterciank commited on
Commit
df7213c
·
verified ·
1 Parent(s): d5e9d54

Create app_dev.py

Browse files
Files changed (1) hide show
  1. app_dev.py +46 -0
app_dev.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from transformers import pipeline
4
+
5
+ # ... (other setup or imports you may need)
6
+
7
+ # Pre-Configuration & Model Loading
8
+ openchat_preprompt = """
9
+ ... (same content as before) ...
10
+ """
11
+
12
+ def get_usernames(model: str):
13
+ # ... (Implementation remains identical) ...
14
+ return preprompt, user_name, assistant_name, sep
15
+
16
+ # Text Generation API Interaction (Modify as needed)
17
+ def predict(model, inputs, typical_p, top_p, temperature,
18
+ top_k, repetition_penalty, watermark, chatbot, history):
19
+ # ... (Logic adapted for your text generation API) ...
20
+
21
+ # Streamlit UI
22
+ st.title("Large Language Model Chat API")
23
+ st.markdown("**Description:** ... (Your description)")
24
+
25
+ # Model Selection
26
+ model_name = st.radio("Model", [
27
+ "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
28
+ "OpenAssistant/oasst-sft-1-pythia-12b",
29
+ # ... (other supported models)
30
+ ])
31
+
32
+ # Parameter Controls (Conditional display as in Gradio)
33
+ # ... (Streamlit controls for typical_p, top_p, etc.)
34
+
35
+ # Chat Interface
36
+ chatbot = st.empty() # Placeholder for the chatbot display
37
+ user_input = st.text_input("Type an input and press Enter", "")
38
+ history = []
39
+
40
+ if user_input:
41
+ history.append(user_input)
42
+ with st.spinner("Generating response..."):
43
+ chat_output, history = predict(model_name, user_input, # ... other parameters)
44
+ chatbot.text("\n".join([f"{data[0]}{data[1]}" for data in chat_output]))
45
+
46
+ # ... (Rest of the UI, if needed) ...