Spaces:
Sleeping
Sleeping
| import os | |
| import streamlit as st | |
| from transformers import pipeline | |
| # ... (other setup or imports you may need) | |
| # Pre-Configuration & Model Loading | |
| openchat_preprompt = """ | |
| (same content as before) ... | |
| """ | |
| def get_usernames(model: str): | |
| # ... (Implementation remains identical) ... | |
| return preprompt, user_name, assistant_name, sep | |
| # Text Generation API Interaction (Modify as needed) | |
| def predict(model, inputs, typical_p, top_p, temperature, | |
| top_k, repetition_penalty, watermark, chatbot, history): | |
| # ... (Logic adapted for your text generation API) ... | |
| # Streamlit UI | |
| st.title("Large Language Model Chat API") | |
| st.markdown("**Description:** ... (Your description)") | |
| # Model Selection | |
| model_name = st.radio("Model", [ | |
| "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", | |
| "OpenAssistant/oasst-sft-1-pythia-12b", | |
| # ... (other supported models) | |
| ]) | |
| # Parameter Controls (Conditional display as in Gradio) | |
| # ... (Streamlit controls for typical_p, top_p, etc.) | |
| # Chat Interface | |
| chatbot = st.empty() # Placeholder for the chatbot display | |
| user_input = st.text_input("Type an input and press Enter", "") | |
| history = [] | |
| if user_input: | |
| history.append(user_input) | |
| with st.spinner("Generating response..."): | |
| chat_output, history = predict(model_name, user_input, # ... other parameters) | |
| chatbot.text("\n".join([f"{data[0]}{data[1]}" for data in chat_output])) | |
| # ... (Rest of the UI, if needed) ... | |