Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,12 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
url = "https://app.embedchain.ai/api/v1/pipelines/f14b3df8-db63-456c-8a7f-4323b4467271/context/"
|
| 5 |
|
| 6 |
|
|
@@ -18,7 +24,15 @@ def greet(name):
|
|
| 18 |
response = requests.request("POST", url, headers=headers, json=payload)
|
| 19 |
|
| 20 |
print(name)
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
|
| 4 |
+
from g4f import Provider, models
|
| 5 |
+
from langchain.llms.base import LLM
|
| 6 |
+
|
| 7 |
+
from langchain_g4f import G4FLLM
|
| 8 |
+
|
| 9 |
+
|
| 10 |
url = "https://app.embedchain.ai/api/v1/pipelines/f14b3df8-db63-456c-8a7f-4323b4467271/context/"
|
| 11 |
|
| 12 |
|
|
|
|
| 24 |
response = requests.request("POST", url, headers=headers, json=payload)
|
| 25 |
|
| 26 |
print(name)
|
| 27 |
+
c = response.text
|
| 28 |
+
llm = LLM = G4FLLM(
|
| 29 |
+
model=models.gpt_35_turbo,
|
| 30 |
+
provider=Provider.Aichat,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
res = llm(f"contexte:{c}. {name}")
|
| 34 |
+
print(res)
|
| 35 |
+
return res
|
| 36 |
|
| 37 |
|
| 38 |
|