vikas1276 commited on
Commit
78d84d6
·
verified ·
1 Parent(s): c1df5d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -29
app.py CHANGED
@@ -1,39 +1,44 @@
 
 
 
 
1
  import torch
2
  import gradio as gr
3
- from diffusers import StableDiffusionImg2ImgPipeline
4
- from PIL import Image
5
-
6
- # Set up the device (use 'cpu' since we are running on CPU)
7
- device = "cpu" # Force CPU usage
8
 
9
- # Load the pre-trained Stable Diffusion model from Hugging Face
10
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-2-1", torch_dtype=torch.float32)
11
- pipe = pipe.to(device)
 
12
 
13
- # Function for generating an image based on input image and prompt
14
- def generate_image(photo, prompt):
15
- # Resize input image to 512x512 (required by Stable Diffusion)
16
- photo = photo.resize((512, 512))
 
 
17
 
18
- # Convert the PIL image to a tensor
19
- photo_tensor = torch.tensor(np.array(photo)).permute(2, 0, 1).unsqueeze(0).to(device).float() / 255.0
20
 
21
- # Generate the image using Stable Diffusion model
22
- generated_image = pipe(prompt=prompt, image=photo_tensor, strength=0.75, guidance_scale=7.5).images[0]
23
-
24
- # Return the generated image
25
- return generated_image
26
 
27
  # Create Gradio interface
28
- interface = gr.Interface(
29
- fn=generate_image, # The function to generate images
30
- inputs=[
31
- gr.Image(type="pil", label="Upload your photo"), # Input image (your photo)
32
- gr.Textbox(lines=2, placeholder="Enter prompt here", label="Enter text prompt") # Input text prompt
33
- ],
34
- outputs=gr.Image(type="pil", label="Generated Image"), # Output generated image
35
- live=True # Automatically update the output as the user changes the prompt or uploads a photo
36
  )
37
 
38
- # Launch the Gradio interface
39
- interface.launch()
 
 
1
+ # app.py
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import os
5
  import torch
6
  import gradio as gr
7
+ from unsloth import FastLanguageModel
8
+ from trl import SFTTrainer
9
+ from transformers import TrainingArguments
10
+ from datasets import load_dataset
 
11
 
12
+ # Load the model and tokenizer
13
+ max_seq_length = 2048
14
+ dataset_path = "/content/dataset.jsonl" # Update this path as needed
15
+ dataset = load_dataset("json", data_files=dataset_path)
16
 
17
+ model, tokenizer = FastLanguageModel.from_pretrained(
18
+ model_name="unsloth/Meta-Llama-3.1-8B",
19
+ max_seq_length=max_seq_length,
20
+ dtype=None,
21
+ load_in_4bit=True,
22
+ )
23
 
24
+ # Prepare the model for inference
25
+ model = FastLanguageModel.for_inference(model)
26
 
27
+ # Function to generate text
28
+ def generate_response(user_input):
29
+ input = tokenizer(user_input, return_tensors="pt").to("cuda:0")
30
+ output = model.generate(**input)
31
+ return tokenizer.batch_decode(output)[0]
32
 
33
  # Create Gradio interface
34
+ iface = gr.Interface(
35
+ fn=generate_response,
36
+ inputs=gr.inputs.Textbox(label="User Input"),
37
+ outputs=gr.outputs.Textbox(label="Bot Response"),
38
+ title="Chatbot with Llama 3.1",
39
+ description="A chatbot powered by the Llama 3.1 model. Type your message below."
 
 
40
  )
41
 
42
+ # Launch the Gradio app
43
+ if __name__ == "__main__":
44
+ iface.launch()