|
|
from typing import Dict, Any |
|
|
import torch |
|
|
from transformers import AutoConfig, AutoModel, AutoTokenizer, PreTrainedModel, PretrainedConfig |
|
|
import torch.nn as nn |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class CustomSNPConfig(PretrainedConfig): |
|
|
model_type = "custom_snp" |
|
|
|
|
|
|
|
|
class CustomSNPModel(PreTrainedModel): |
|
|
config_class = CustomSNPConfig |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
hidden_size = getattr(config, "hidden_size", 768) |
|
|
self.encoder = nn.Linear(hidden_size, hidden_size) |
|
|
self.mirror_head = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.Tanh()) |
|
|
self.prism_head = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.Tanh()) |
|
|
self.projection = nn.Linear(hidden_size, 6) |
|
|
|
|
|
def forward(self, input_ids=None, attention_mask=None, **kwargs): |
|
|
x = self.encoder(input_ids.float()) if input_ids is not None else None |
|
|
x = self.mirror_head(x) |
|
|
x = self.prism_head(x) |
|
|
return self.projection(x) |
|
|
|
|
|
|
|
|
AutoConfig.register("custom_snp", CustomSNPConfig) |
|
|
AutoModel.register(CustomSNPConfig, CustomSNPModel) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class EndpointHandler: |
|
|
def __init__(self, model_dir: str): |
|
|
print(f"Loading model from {model_dir}") |
|
|
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(model_dir) |
|
|
config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True) |
|
|
self.model = AutoModel.from_pretrained(model_dir, config=config, trust_remote_code=True) |
|
|
self.model.eval() |
|
|
print("✅ Custom SNP model loaded successfully.") |
|
|
|
|
|
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]: |
|
|
inputs = data.get("inputs") or data |
|
|
if isinstance(inputs, dict) and "text" in inputs: |
|
|
text = inputs["text"] |
|
|
else: |
|
|
text = str(inputs) |
|
|
|
|
|
encoded = self.tokenizer(text, return_tensors="pt", truncation=True, padding=True) |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = self.model(**encoded) |
|
|
if hasattr(outputs, "last_hidden_state"): |
|
|
emb = outputs.last_hidden_state.mean(dim=1).tolist() |
|
|
elif isinstance(outputs, tuple): |
|
|
emb = outputs[0].mean(dim=1).tolist() |
|
|
else: |
|
|
emb = outputs.tolist() |
|
|
|
|
|
return {"embeddings": emb} |
|
|
|