Spaces:
Paused
Paused
deploy 3
Browse files- api/question_answering/qa_model.py +5 -6
- requirements.txt +0 -1
api/question_answering/qa_model.py
CHANGED
|
@@ -13,12 +13,11 @@ from langchain.llms import HuggingFacePipeline
|
|
| 13 |
from langchain.llms.base import LLM
|
| 14 |
from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceHubEmbeddings, HuggingFaceInstructEmbeddings
|
| 15 |
from langchain.vectorstores import FAISS
|
| 16 |
-
from llama_cpp import Llama
|
| 17 |
|
| 18 |
from api.logger import logger
|
| 19 |
from api.question_answering.response import Response
|
| 20 |
|
| 21 |
-
|
| 22 |
class LocalBinaryModel(LLM):
|
| 23 |
model_id: str = None
|
| 24 |
llm: Llama = None
|
|
@@ -49,7 +48,7 @@ class LocalBinaryModel(LLM):
|
|
| 49 |
@property
|
| 50 |
def _llm_type(self) -> str:
|
| 51 |
return self.model_id
|
| 52 |
-
|
| 53 |
|
| 54 |
class TransformersPipelineModel(LLM):
|
| 55 |
model_id: str = None
|
|
@@ -170,9 +169,9 @@ class QAModel():
|
|
| 170 |
|
| 171 |
if 'local_models/' in llm_model_id:
|
| 172 |
logger.info('using local binary model')
|
| 173 |
-
self.llm_model = LocalBinaryModel(
|
| 174 |
-
|
| 175 |
-
)
|
| 176 |
elif 'api_models/' in llm_model_id:
|
| 177 |
logger.info('using api served model')
|
| 178 |
self.llm_model = APIServedModel(
|
|
|
|
| 13 |
from langchain.llms.base import LLM
|
| 14 |
from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceHubEmbeddings, HuggingFaceInstructEmbeddings
|
| 15 |
from langchain.vectorstores import FAISS
|
|
|
|
| 16 |
|
| 17 |
from api.logger import logger
|
| 18 |
from api.question_answering.response import Response
|
| 19 |
|
| 20 |
+
"""
|
| 21 |
class LocalBinaryModel(LLM):
|
| 22 |
model_id: str = None
|
| 23 |
llm: Llama = None
|
|
|
|
| 48 |
@property
|
| 49 |
def _llm_type(self) -> str:
|
| 50 |
return self.model_id
|
| 51 |
+
"""
|
| 52 |
|
| 53 |
class TransformersPipelineModel(LLM):
|
| 54 |
model_id: str = None
|
|
|
|
| 169 |
|
| 170 |
if 'local_models/' in llm_model_id:
|
| 171 |
logger.info('using local binary model')
|
| 172 |
+
#self.llm_model = LocalBinaryModel(
|
| 173 |
+
# model_id=llm_model_id
|
| 174 |
+
#)
|
| 175 |
elif 'api_models/' in llm_model_id:
|
| 176 |
logger.info('using api served model')
|
| 177 |
self.llm_model = APIServedModel(
|
requirements.txt
CHANGED
|
@@ -7,7 +7,6 @@ discord.py==2.2.2
|
|
| 7 |
evaluate==0.4.0
|
| 8 |
fastapi==0.98.0
|
| 9 |
langchain==0.0.154
|
| 10 |
-
llama_cpp_python==0.1.39
|
| 11 |
nltk==3.8.1
|
| 12 |
nbconvert==7.6.0
|
| 13 |
nbformat==5.9.0
|
|
|
|
| 7 |
evaluate==0.4.0
|
| 8 |
fastapi==0.98.0
|
| 9 |
langchain==0.0.154
|
|
|
|
| 10 |
nltk==3.8.1
|
| 11 |
nbconvert==7.6.0
|
| 12 |
nbformat==5.9.0
|