Thursday, October 2, 2025

 Counting parking spots 

Just like analytics of objects detected in aerial drone images is done just-in-time of query prompting and not for every drone image extracted from a drone video, unoccupied parking spots qualify as reasonable detections to be asked in a query. The corresponding analytics could take one of the following approaches: 

  1. Using color segmentation, contour detection and simple heuristics assuming that parking spots are regions in the parking lot not covered by cars. 

import cv2 

import numpy as np 

 

def count_empty_parking_spots(image_path, debug=False): 

    image = cv2.imread(image_path) 

    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) 

 

    # Segment pavement (brightness, not car colors) 

    # Pavement is usually light gray under sunlight in aerial images 

    lower_pavement = np.array([0, 0, 180])    # min hue/sat, high V (brightness) 

    upper_pavement = np.array([180, 35, 255]) # low saturation to ignore cars 

 

    mask_pavement = cv2.inRange(hsv, lower_pavement, upper_pavement) 

 

    # Remove small noise 

    kernel = np.ones((7, 7), np.uint8) 

    mask_clean = cv2.morphologyEx(mask_pavement, cv2.MORPH_OPEN, kernel) 

    mask_clean = cv2.morphologyEx(mask_clean, cv2.MORPH_CLOSE, kernel) 

 

    # Adaptive thresholding as backup (if pavement color varies) 

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 

    adaptive = cv2.adaptiveThreshold( 

        gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 25, -14) 

    combined = cv2.bitwise_and(mask_clean, adaptive) 

 

    # Find contours (Connected bright regions interpreted as parking spots) 

    contours, _ = cv2.findContours(combined, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 

 

    empty_count = 0 

    display = image.copy() 

 

    # Filter contours: parking spots should be rectangular and large enough 

    for contour in contours: 

        area = cv2.contourArea(contour) 

        x, y, w, h = cv2.boundingRect(contour) 

        aspect = float(w) / float(h) if h > 0 else 0 

        # Tune these heuristics to ignore stray bright patches & small noise 

        if 400 < area < 2500 and 1.3 < aspect < 2.7 and w > 20 and h > 11: 

            empty_count += 1 

            cv2.rectangle(display, (x, y), (x+w, y+h), (0,255,0), 2) 

    

    print(f"Detected {empty_count} empty parking spots") 

    if debug: 

        cv2.imshow("Parking Detection", display) 

        cv2.imshow("Mask", mask_clean) 

        cv2.imshow("Adaptive", adaptive) 

        cv2.imshow("Combined", combined) 

        cv2.imwrite(image_path.replace(".jpg","")+"-detectedspots.jpg", display) 

        cv2.waitKey(0) 

        cv2.destroyAllWindows() 

 

    return empty_count 

 

# Example usage for your two frames: 

count1 = count_empty_parking_spots("parking1.jpg", debug=True) 

count2 = count_empty_parking_spots("parking2.jpg", debug=True) 

 

print(f"parking1.jpg: {count1} empty spots (should be about 2)") 

print(f"parking2.jpg: {count2} empty spot (should be about 1)") 

""" 

Results: Detected 1 empty parking spots 

Detected 1 empty parking spots 

parking1.jpg: 1 empty spots (should be about 2) 

parking2.jpg: 1 empty spot (should be about 1) 

"""  

  1. Using YOLOv5 for speed, precision and adaptability to aerial views: 

import torch 

import cv2 

import numpy as np 

from matplotlib import pyplot as plt 

 

# Load YOLOv5 model (you can use 'yolov5s', 'yolov5m', or a custom-trained model) 

model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) 

 

# Load image 

image_path = 'parking2.jpg' 

img = cv2.imread(image_path) 

img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 

 

# Run inference 

results = model(img_rgb) 

 

# Parse results 

detections = results.pandas().xyxy[0] 

cars = detections[detections['name'] == 'car'] 

 

# Optional: If you have predefined parking spot coordinates 

# Define parking spots manually or load from a JSON file 

parking_spots = [ 

     {'id': 1, 'bbox': [815, 184, 992, 387]}, 

     {'id': 2, 'bbox': [230, 502, 273, 619]}, 

     {'id': 3, 'bbox': [427, 8, 468, 691]}, 

    # Add more spots... 

] 

 

# Check overlap between cars and parking spots 

def is_occupied(spot_bbox, car_bboxes): 

    x1, y1, x2, y2 = spot_bbox 

    spot_area = (x2 - x1) * (y2 - y1) 

    for _, car in car_bboxes.iterrows(): 

        cx1, cy1, cx2, cy2 = car[['xmin', 'ymin', 'xmax', 'ymax']] 

        ix1, iy1 = max(x1, cx1), max(y1, cy1) 

        ix2, iy2 = min(x2, cx2), min(y2, cy2) 

        iw, ih = max(0, ix2 - ix1), max(0, iy2 - iy1) 

        intersection = iw * ih 

        if intersection / spot_area > 0.3:  # Threshold for overlap 

            return True 

    return False 

 

# Count unoccupied spots 

unoccupied_count = 0 

for spot in parking_spots: 

    if not is_occupied(spot['bbox'], cars): 

        unoccupied_count += 1 

 

print(f"Unoccupied parking spots: {unoccupied_count}") 

The results of the second option appear as  

YOLOv5  2025-7-21 Python-3.13.1 torch-2.7.1+cpu CPU 

 

Fusing layers... 

YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs 

Adding AutoShape... 

C:\Users\ravib/.cache\torch\hub\ultralytics_yolov5_master\models\common.py:906: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead. 

  with amp.autocast(autocast): 

Unoccupied parking spots: 3 

And the given input image is:Aerial view of a city street

AI-generated content may be incorrect. 


Wednesday, October 1, 2025

 Calculating distance between two different scenes from aerial drone images can be done easily using their GPS co-ordinates. For example, the gps coordinate of scene 1 is 42.3755, -71.1180 and and the gps co-ordinate of scene 2 is 42.3770, -71.1167

Scene 1: (42.3755, -71.1180)

Scene 2: (42.3770, -71.1167)

And the distance between them is calculated by the Haversine formula which computes the shortest distance over the Earth's surface between two latitude-longitude points.

If the two locations have GPS coordinates (lat1,lon1) and (lat2,lon2):

The steps in Python are:

 Convert latitudes and longitudes from degrees to radians.

 Compute differences Δlat=lat2−lat1 and Δlon=lon2−lon1.

 Use the Haversine formula:

a= 〖sin〗^2 (Δlat/2)+cos⁡(〖lat〗_1 ) ×cos⁡(〖lat〗_2 )× 〖sin〗^2 (Δlon/2)

c=2×arctan2(√a,√(1-a))

d=R×c

Where R is Earth's radius (mean radius = 6371 km).

import math

def haversine_distance(lat1, lon1, lat2, lon2):

    R = 6371 # Earth radius in km

    lat1_rad = math.radians(lat1)

    lat2_rad = math.radians(lat2)

    delta_lat = math.radians(lat2 - lat1)

    delta_lon = math.radians(lon2 - lon1)

    a = math.sin(delta_lat / 2) ** 2 + math.cos(lat1_rad) * math.cos(lat2_rad) * math.sin(delta_lon / 2) ** 2

    c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))

    distance = R * c

    return distance

# Example usage:

# Scene 1 GPS: lat1, lon1

# Scene 2 GPS: lat2, lon2

lat1, lon1 = 42.3770, -71.1167 # Scene 1 approx coords

lat2, lon2 = 42.3755, -71.1180 # Scene 2 approx coords

distance_km = haversine_distance(lat1, lon1, lat2, lon2)

print(f"Distance between scenes: {distance_km:.3f} km")

Distance between scenes: 0.198 km