Tuesday, October 21, 2025

 These are a collection of Python code samples using OpenCV (cv2) that cover a wide range of aerial drone analytics use cases for urban areas. Each mini-snippet illustrates a different task typical for urban analytics from drone imagery: 

1. Object Tracking (Vehicle or Person) 

import cv2 
import numpy as np 
 
cap = cv2.VideoCapture('drone_video.mp4') 
ret, frame = cap.read() 
x, y, w, h = 600, 400, 60, 60  # ROI coordinates to start with (tune manually) 
track_window = (x, y, w, h) 
 
roi = frame[y:y+hx:x+w] 
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) 
mask = cv2.inRange(hsv_roinp.array((0., 30., 32.)), np.array((180.,255.,255.))) 
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180]) 
cv2.normalize(roi_histroi_hist, 0, 255, cv2.NORM_MINMAX) 
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 1) 
 
while True: 
    ret, frame = cap.read() 
    if not ret: break 
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 
    dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1) 
    ret, track_window = cv2.meanShift(dsttrack_windowterm_crit) 
    x, y, w, h = track_window 
    cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2) 
    cv2.imshow('Tracking', frame) 
    if cv2.waitKey(30) & 0xFF == ord('q'):  

       break 
cap.release() 
cv2.destroyAllWindows() 

 

2. Parking Slot Occupancy Detection 

import cv2 
import numpy as np 
 
img = cv2.imread('drone_parkinglot.jpg') 
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) 
# Threshold for pavement/light regions (empty): low saturation, high value 
mask = cv2.inRange(hsv, (0, 0, 170), (180, 30, 255)) 
kernel = np.ones((9,9),np.uint8) 
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) 
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 
empty_count = 0 
for cnt in contours: 
    area = cv2.contourArea(cnt) 
    x, y, w, h = cv2.boundingRect(cnt) 
    aspect = w/h if h else 0 
    if 450 < area < 2500 and 1.2 < aspect < 2.6: 
        empty_count += 1 
        cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0),2) 
print(f"Empty spots: {empty_count}") 
cv2.imshow('Parking', img);  

cv2.waitKey(0);  

cv2.destroyAllWindows() 
''' 
Result:  

Empty spots: 1 

''' 

 

3. Road and Lane Detection 

import cv2 
import numpy as np 
 
img = cv2.imread('drone_urban_road.jpg') 
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
blur = cv2.GaussianBlur(gray, (7,7), 0) 
edges = cv2.Canny(blur, 80, 180) 
lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=80, minLineLength=80, maxLineGap=10) 
print(f“Lines: {len(lines)}”) 
for line in lines:  

     x1,y1,x2,y2 = line[0][0], line[0][1], line[0][2], line[0][3]  

     cv2.line(img,(x1,y1),(x2,y2),(0,0,255),3) 
     cv2.imshow('Lanes', img);  

     cv2.waitKey(0);  

cv2.destroyAllWindows() 

''' 
Result:  

Lines: 27 

''' 

 

 

4. Building Footprint Segmentation 

import cv2 
import numpy as np 
 
img = cv2.imread('drone_buildings.jpg') 
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
_, thresh = cv2.threshold(gray, 160, 255, cv2.THRESH_BINARY) 
kernel = np.ones((11,11),np.uint8) 
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel) 
contours, _ = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 
for c in contours: 
    if cv2.contourArea(c) > 3000: 
        cv2.drawContours(img, [c], -1, (255,0,0), 3) 
        cv2.imshow('Buildings', img);  

        cv2.waitKey(0);  

cv2.destroyAllWindows() 

 
 

 

5. Crowd Counting in Public Spaces 

import cv2 
import numpy as np 
 
img = cv2.imread('drone_crowd.jpg') 
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
_,thresh = cv2.threshold(gray,180,255,cv2.THRESH_BINARY_INV) 
kernel = np.ones((5,5),np.uint8) 
opened = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) 
contours, _ = cv2.findContours(opened, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 
count = 0 
for c in contours: 
    area = cv2.contourArea(c) 
    if 60 < area < 400:  # people blobs 
        count += 1 
        x,y,w,h = cv2.boundingRect(c) 
        cv2.rectangle(img, (x,y), (x+w,y+h), (0,200,0), 2) 
print(f"Counted people: {count}") 
cv2.imshow('Crowd', img);  

cv2.waitKey(0);  

cv2.destroyAllWindows() 

Result: counted people: 3 

6. QR Code or Marker Detection (for drone navigation) 

import cv2 
 
img = cv2.imread('drone_marker.jpg') 
detector = cv2.QRCodeDetector() 
retval, decoded, points, _ = detector.detectAndDecodeMulti(img) 
if points is not None: 
    for pt in points: 
        pts = pt.astype(int).reshape(-1,2) 
        for i in range(len(pts)): 
            cv2.line(img, tuple(pts[i]), tuple(pts[(i+1)%4]), (255,0,0), 2) 
print("Found QR codes:", decoded) 
cv2.imshow('QR Codes', img);  

cv2.waitKey(0);  

cv2.destroyAllWindows() 

 

7. Built-up/Impervious Surface Extraction 

import cv2 
import numpy as np 
 
img = cv2.imread('urban_aerial.jpg') 
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
# Otsu's threshold to separate built-up vs green/open areas 
_,mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 
kernel = np.ones((9,9),np.uint8) 
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) 
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 
for c in contours: 
    if cv2.contourArea(c) > 2000: 
        cv2.drawContours(img, [c], -1, (0,0,255), 2) 
cv2.imshow('Built-up', img);  

cv2.waitKey(0);  

cv2.destroyAllWindows() 

Result: 

 

These samples provide practical starting points for many common urban aerial analytics workflows with OpenCV and Python. For advanced detection (e.g. semantic segmentation, vehicle type recognition, change detection), deep learning models or integration with other libraries (TensorFlow, PyTorch) are recommended for production. 

No comments:

Post a Comment