Spaces:
Sleeping
Sleeping
File size: 1,463 Bytes
571ae6f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import os
import streamlit as st
from transformers import pipeline
# ... (other setup or imports you may need)
# Pre-Configuration & Model Loading
openchat_preprompt = """
... (same content as before) ...
"""
def get_usernames(model: str):
# ... (Implementation remains identical) ...
return preprompt, user_name, assistant_name, sep
# Text Generation API Interaction (Modify as needed)
def predict(model, inputs, typical_p, top_p, temperature,
top_k, repetition_penalty, watermark, chatbot, history):
# ... (Logic adapted for your text generation API) ...
# Streamlit UI
st.title("Large Language Model Chat API")
st.markdown("**Description:** ... (Your description)")
# Model Selection
model_name = st.radio("Model", [
"OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
"OpenAssistant/oasst-sft-1-pythia-12b",
# ... (other supported models)
])
# Parameter Controls (Conditional display as in Gradio)
# ... (Streamlit controls for typical_p, top_p, etc.)
# Chat Interface
chatbot = st.empty() # Placeholder for the chatbot display
user_input = st.text_input("Type an input and press Enter", "")
history = []
if user_input:
history.append(user_input)
with st.spinner("Generating response..."):
chat_output, history = predict(model_name, user_input, # ... other parameters)
chatbot.text("\n".join([f"{data[0]}{data[1]}" for data in chat_output]))
# ... (Rest of the UI, if needed) ...
|