from IPython.display import Image
from llmcam.gpt4v import ask_gpt4v
from llmcam.yolo import detect_objects
from llmcam.fn_to_fc import *
import json
import torch

source

stations

 stations (key:str)

Get all weather station including key word

Porvoos = stations("Porvoo")
assert "porvoo" in Porvoos[0]['properties']['name'].lower()

source

presets

 presets (station:dict)

Get all presets at a given weather station

preset = presets(Porvoos[0])[0]
imageUrl = preset['imageUrl']
print(imageUrl)
assert "jpg" in imageUrl
https://weathercam.digitraffic.fi/C0150200.jpg

source

capture

 capture (preset:dict)

Capture image at a given preset location in a Weather station, and return an image path

preset
{'id': 'C0150200',
 'presentationName': 'Porvoo',
 'inCollection': True,
 'resolution': '704x576',
 'directionCode': '0',
 'imageUrl': 'https://weathercam.digitraffic.fi/C0150200.jpg',
 'direction': 'UNKNOWN'}
hdr, path = capture(preset)
hdr
{'Content-Type': 'image/jpeg', 'Content-Length': '90583', 'Connection': 'keep-alive', 'x-amz-id-2': 'wA2BT41+HxfZ8UNNB0dr91AyDorDWpnh9gYyE/acE++GnwwtQncc7GYDLfI2juroHtaUVeTE6KeFpHEL6En+pizw/rnE7KNq', 'x-amz-request-id': 'MVXS9XBF8X68B317', 'last-modified': 'Sat, 16 Nov 2024 11:01:59 GMT', 'x-amz-expiration': 'expiry-date="Mon, 18 Nov 2024 00:00:00 GMT", rule-id="Delete versions and current images after 24h"', 'x-amz-server-side-encryption': 'AES256', 'X-Amz-Meta-Last-Modified': 'Sat, 16 Nov 2024 11:01:59 GMT', 'x-amz-version-id': 'BoZIRd6gWf27YR3q0ABxAhtLdb7jAadA', 'Accept-Ranges': 'bytes', 'Server': 'AmazonS3', 'Date': 'Sat, 16 Nov 2024 11:09:42 GMT', 'ETag': '"c9abb89a29f564146eeb579986b09c75"', 'Vary': 'Accept-Encoding', 'X-Cache': 'RefreshHit from cloudfront', 'Via': '1.1 2e0b0e777d576ee595b61a5d3b296990.cloudfront.net (CloudFront)', 'X-Amz-Cf-Pop': 'HEL51-P1', 'Alt-Svc': 'h3=":443"; ma=86400', 'X-Amz-Cf-Id': 'e05ksWs35_TPiamKJbWNHKPKIl9MSB4hRGWUhO85ugewtnefX68haA=='}
display(Image.open(path))


source

cap

 cap (key:str='Porvoo')

Capture an image at specified location, save it, and return its path

Type Default Details
key str Porvoo location keyword
Returns str
path = cap("porvoo")
print(path)
ask_gpt4v(path)
../data/cap_2024.11.16_11:01:59_Porvoo_C0150200.jpg
{'timestamp': '',
 'location': '',
 'dimensions': {'width': 720, 'height': 576},
 'buildings': {'number_of_buildings': 0},
 'vehicles': {'number_of_vehicles': 1},
 'waterbodies': {'visible': False},
 'street_lights': {'number_of_street_lights': 5},
 'people': {'approximate_number': 0},
 'lighting': {'time_of_day': 'daytime', 'artificial_lighting': 'none'},
 'visibility': {'clear': False},
 'sky': {'visible': True, 'light_conditions': 'overcast'}}
detect_objects(cap('porvoo'))
Using cache found in /home/doyu/.cache/torch/hub/ultralytics_yolov5_master
YOLOv5 🚀 2024-11-12 Python-3.12.4 torch-2.4.1+cu121 CPU

Fusing layers... 
YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs
Adding AutoShape... 
/home/doyu/.cache/torch/hub/ultralytics_yolov5_master/models/common.py:892: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
  with amp.autocast(autocast):
{}
'{}'
def __ask_gpt4v(
    path: str # file path to analyze
)->str:
    "ask gpt4v to analyze an image given"
    return json.dumps(ask_gpt4v(path))

__ask_gpt4v

 __ask_gpt4v (path:str)

ask gpt4v to analyze an image given

Type Details
path str file path to analyze
Returns str
tools = [tool_schema(f) for f in [cap, detect_objects, __ask_gpt4v] ]
tools
[{'type': 'function',
  'function': {'name': 'cap',
   'description': 'Capture an image at specified location, save it, and return its path',
   'parameters': {'type': 'object',
    'properties': {'module': {'type': 'string',
      'description': 'The module where the function is located.',
      'default': '__main__'},
     'key': {'type': 'string', 'description': 'location keyword'}},
    'required': []}}},
 {'type': 'function',
  'function': {'name': 'detect_objects',
   'description': 'Run YOLO object detection on an input image.',
   'parameters': {'type': 'object',
    'properties': {'module': {'type': 'string',
      'description': 'The module where the function is located.',
      'default': 'llmcam.yolo'},
     'image_path': {'type': 'string', 'description': 'Path/URL of image'},
     'conf': {'type': 'number', 'description': 'Confidence threshold'}},
    'required': ['image_path']}}},
 {'type': 'function',
  'function': {'name': '__ask_gpt4v',
   'description': 'ask gpt4v to analyze an image given',
   'parameters': {'type': 'object',
    'properties': {'module': {'type': 'string',
      'description': 'The module where the function is located.',
      'default': '__main__'},
     'path': {'type': 'string', 'description': 'file path to analyze'}},
    'required': ['path']}}}]
def callback(name, tools=[], **kwargs): return globals().get(name)(**kwargs)
msgs = form_msgs([
    ("system", "You are a helpful system administrator. Use the supplied tools to assist the user."),
    ("user", "Capture an image in Porvoo and tell me the path"),
])
complete(msgs, tools=tools)
('assistant',
 'The image has been captured in Porvoo and saved at the following path: `../data/cap_2024.11.16_11:01:59_Porvoo_C0150200.jpg`.')
msgs.append(form_msg("user", f"analyze this captured image"))
complete(msgs, tools=tools)
('assistant',
 'The analysis of the captured image from Porvoo reveals the following details:\n\n- **Dimensions**: The image has a width of 720 pixels and a height of 576 pixels.\n- **Buildings**: There are no buildings visible in the image.\n- **Vehicles**: No vehicles are present.\n- **Street Lights**: There are 5 street lights visible.\n- **People**: No people are visible in the image.\n- **Lighting**: The image was taken during the day, with no artificial lighting.\n- **Visibility**: The visibility is not clear.\n- **Weather**: It appears to be raining in the image.')
msgs.append(form_msg("user", "Can you detect objects in the file?"))
complete(msgs, tools=tools)
Using cache found in /home/doyu/.cache/torch/hub/ultralytics_yolov5_master
YOLOv5 🚀 2024-11-12 Python-3.12.4 torch-2.4.1+cu121 CPU

Fusing layers... 
YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs
Adding AutoShape... 
/home/doyu/.cache/torch/hub/ultralytics_yolov5_master/models/common.py:892: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
  with amp.autocast(autocast):
{}
('assistant',
 'No objects were detected in the image from Porvoo with the given confidence threshold.')