init
This commit is contained in:
commit
f393130598
19
imageEncoder.py
Normal file
19
imageEncoder.py
Normal file
@ -0,0 +1,19 @@
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from transformers import GPT2TokenizerFast, ViTImageProcessor, VisionEncoderDecoderModel
|
||||
|
||||
# load a fine-tuned image captioning model and corresponding tokenizer and image processor
|
||||
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
||||
tokenizer = GPT2TokenizerFast.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
||||
image_processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
||||
|
||||
# let's perform inference on an image
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
pixel_values = image_processor(image, return_tensors="pt").pixel_values
|
||||
|
||||
# autoregressively generate caption (uses greedy decoding by default)
|
||||
generated_ids = model.generate(pixel_values)
|
||||
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
||||
print(generated_text)
|
||||
11
run.py
Normal file
11
run.py
Normal file
@ -0,0 +1,11 @@
|
||||
|
||||
from transformers import pipeline
|
||||
#pipe = pipeline("text-generation", model="TheBloke/llava-v1.5-13B-AWQ",device_map="cuda:1")
|
||||
pipe = pipeline("image_classification", model="TheBloke/llava-v1.5-13B-AWQ",device_map="cuda:1")
|
||||
|
||||
images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
|
||||
|
||||
pipe(images)
|
||||
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user