Ich habe hier vor einigen Monaten über unsere Arbeit zum Thema „Real-Time Identification of Animals found in Domestic Areas of Europe / Echtzeit-Identifikation von Tieren, die in europäischen Haushalten gefunden wurden“ geschrieben und auch hier über den Erfolg bei der Energieversorgung dieses lernbasierten Echtzeitsystems mit Solarenergie. Nachfolgend stelle ich Ihnen unser Papier mit dem Titel „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System / Effiziente Implementierung eines autonomen, solarbetriebenen, auf Echtzeit-Tiefenlernen basierenden Systems“ vor, das die gesamte Arbeit und unsere Ergebnisse beschreibt.

Zusammenfassung: Dieses Papier stellt ein autarkes solarbetriebenes Echtzeit-Tiefenlernsystem (DL) vor, das zu 100% auf Solarenergie basiert und aus einem Nvidia Jetson TX2-Board und einem zweiachsigen Solar-Tracker besteht, der auf dem Guss-Schatten-Prinzip basiert. Um sowohl eine höhere Energieerzeugung durch den Solar Tracker als auch einen geringeren Energieverbrauch durch das DL-basierte Echtzeitsystem zu erreichen, haben wir a) unser Solar Tracker-Panel mit einer höheren Anzahl polykristalliner photovoltaischer (PV) Zellen aktualisiert und mit einer Kette aus zwei Wechselrichtern, einem Akku und einem Solarladeregler verbunden; b) eine Bewegungserkennungsmethode implementiert, die den Inferenzprozess nur dann auslöst, wenn es eine erhebliche Bewegung im Webcam-Rahmen gibt. Experimentelle Ergebnisse zeigen, dass unser Solar Tracker ausreichend und konstant Solarenergie für alle 4 DL-Modelle (VGG-19, InceptionV3, ResNet-50 und MobileNetV2) erzeugt, die in Echtzeit auf der Nvidia Jetson TX2-Plattform laufen und die mehr als fünfmal weniger Energie benötigen als ein Laptop mit einem Nvidia GTX 1060-GPU, was beweist, dass DL-basierte Echtzeitsysteme durch Solar Tracker betrieben werden können, ohne dass herkömmliche Netzstecker benötigt werden oder Stromrechnungen bezahlt werden müssen.

Sie können den Artikel hier lesen.

Hier habe ich alle Projektdateien bezüglich unserer Effiziente Implementierung eines autonomen, solarbetriebenen, auf Echtzeit basierenden, tief lernenden Systems veröffentlicht.

fps_batch_Testing.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““
import cv2
import numpy as np
import time
import tensorflow as tf
import argparse
from tensorflow.python.platform import gfile

parser = argparse.ArgumentParser(description=’fps testing‘)
parser.add_argument(‚–video‘, dest=’video‘,help=’Path to video file to use instead of the webcam‘)

parser.add_argument(‚–gpu_mem‘, dest=’gpu_mem‘,default=’0.8′,help=’Choose GPU memory allocation from 0 to 1 in fraction‘)
args = parser.parse_args()

cap = cv2.VideoCapture(args.video)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

frameCount,frameWidth,frameHeight

buffer=[]

fc = 0
ret = True

while True:
ret, frame = cap.read()
if ret:
buffer.append(cv2.resize(frame,(224,224)))
fc += 1
else:
break

cap.release()
cv2.destroyAllWindows()

buffer=np.array(buffer)
print(buffer.shape)
config=tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction=float(args.gpu_mem) # gpu utilization
sess=tf.Session(config=config)
# sess=tf.InteractiveSession()
f = gfile.FastGFile(„./model/tf_model.pb“, ‚rb‘)
graph_def = tf.GraphDef()

# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.

tf.import_graph_def(graph_def)
softmax_tensor = sess.graph.get_tensor_by_name(‚import/fc1000/Softmax:0‘)
sess.graph.as_default()

print(‚Model loaded‘)

num_frames_array = [1,2,4,8,16,24,30,60]

RESULT_SAVER=[]
RESULT_SAVER.append([‚No.of frames‘,’Time‘,’Total_Number of frames in video:’+str(buffer.shape[0]),’GPU memory utilization :’+args.gpu_mem])

for num_frames in num_frames_array:
start_time = time.time()

for i in range(0,buffer.shape[0],num_frames):
_ = sess.run(softmax_tensor, {‚import/input_1:0′: buffer[i:i+num_frames,:,:,:]})
print(i)
elapsed_time = time.time() – start_time
print (str(num_frames)+‘ fps taken :’+str(elapsed_time)+‘ sec‘)
RESULT_SAVER.append([num_frames,elapsed_time])

import datetime
file_name=str(datetime.datetime.now()).split(‚.‘)[0].replace(‚:‘,’_‘).replace(‚ ‚,’_‘).replace(‚-‚,’_‘)

print (RESULT_SAVER)
with open(file_name+’_frame_test.txt‘, ‚w‘) as f:
for item in RESULT_SAVER:
f.write(„%s\n“ % ‚ | ‚.join(repr(str(n)) for n in item))
print(‚Resutl are saved in this file ‚+file_name+’_frame_test.txt‘)
sess.close()

inference_worker.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““

import tensorflow as tf
from multiprocessing import Process
import queue

from tensorflow.keras.applications.resnet50 import preprocess_input
import numpy as np

from tensorflow.python.platform import gfile

# Run inference on the model given images
def run_inference(model, images):
# Create the batch out of the list of preprocessed images
batch = preprocess_input(images.astype(‚float‘))

# Run inference
predictions = model.predict_on_batch(batch)
# Take the average predictions across all images
predictions_mean = np.mean(predictions, axis=0)
# Find t1he predicted class (argmax)
return np.argmax(predictions_mean), predictions_mean, predictions

# This will perform inference on the model in a separate process,
# so that we can continue playing the video/webcam in the main process
class InferenceWorker(Process):
def __init__(self, data_q, result_q, ready_q,which_model,gpu_mem):
Process.__init__(self, name=’ModelProcessor‘)
# Queues for sharing data with the main process
self.data_q = data_q
self.result_q = result_q
self.ready_q = ready_q
self.which_model=which_model
self.gpu_mem=gpu_mem

def run(self):
# load model

print(‚Loading model‘)
if self.which_model==’keras‘:
print(„Running keras model“)
from tensorflow import keras
from tensorflow.keras import backend as k
config = tf.ConfigProto()

# Don’t pre-allocate memory; allocate as-needed
config.gpu_options.allow_growth = True

# Only allow a total of half the GPU memory to be allocated
config.gpu_options.per_process_gpu_memory_fraction = float(self.gpu_mem)

# Create a session with the above options specified.
k.tensorflow_backend.set_session(tf.Session(config=config))

model = keras.models.load_model(‚checkpoints/run7-epoch_51.hdf5‘)
print(‚Model loaded‘)
# Alert the main process that the model is ready for images
self.ready_q.put(‚ready‘)
# Process images until the main thread tells us to stop
while True:
try:
(time, images, original_images) = self.data_q.get(True)
# Signal from main process to exit
if time == „exit“:
print(‚Stopping inference thread‘)
break
prediction, predictions, original_predictions = run_inference(model, images)
self.result_q.put((time, prediction, predictions, original_images, original_predictions ))

except queue.Empty:
continue
if self.which_model==’tf‘:
print(„Running Tensorflow model“)

config=tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction=float(self.gpu_mem)
sess=tf.Session(config=config)
#sess=tf.InteractiveSession()
f = gfile.FastGFile(„./model/tf_model.pb“, ‚rb‘)
graph_def = tf.GraphDef()

# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.

tf.import_graph_def(graph_def)
softmax_tensor = sess.graph.get_tensor_by_name(‚import/fc1000/Softmax:0‘)
sess.graph.as_default()

print(‚Model loaded‘)

# Alert the main process that the model is ready for images
self.ready_q.put(‚ready‘)
# Process images until the main thread tells us to stop
while True:
try:
(time, images, original_images) = self.data_q.get(True)
# Signal from main process to exit
if time == „exit“:
print(‚Stopping inference thread‘)
break

batch = preprocess_input(images.astype(‚float‘))
# Run inference
predictions = sess.run(softmax_tensor, {‚import/input_1:0‘: batch})
predictions_mean = np.mean(predictions, axis=0)
print („predictions_mean“,predictions_mean.shape,np.argmax(predictions_mean))
# Find t1he predicted class (argmax)

prediction, predictions, original_predictions = np.argmax(predictions_mean), predictions_mean, predictions
self.result_q.put((time, prediction, predictions, original_images, original_predictions ))

except queue.Empty:
continue

print(‚Inference thread done ‚)

jetson_power_measurement.py

#!/usr/bin/env python3
„““
Convient power measurement script for the Jetson TX2/Tegra X2. https://embeddeddl.wordpress.com/2018/04/25/convenient-power-measurements-on-the-jetson-tx2-tegra-x2-board/
relevant docs: http://developer2.download.nvidia.com/embedded/L4T/r27_Release_v1.0/Docs/Tegra_Linux_Driver_Package_Release_Notes_R27.1.pdf
@author: Lukas Cavigelli (cavigelli@iis.ee.ethz.ch)

Sorin Liviu Jurj only used this code in his experiments related to the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““
import os

# descr, i2c-addr, channel
_nodes = [(‚module/main‘ , ‚0041‘, ‚0‘),
(‚module/cpu‘ , ‚0041‘, ‚1‘),
(‚module/ddr‘ , ‚0041‘, ‚2‘),
(‚module/gpu‘ , ‚0040‘, ‚0‘),
(‚module/soc‘ , ‚0040‘, ‚1‘),
(‚module/wifi‘ , ‚0040‘, ‚2‘),

(‚board/main‘ , ‚0042‘, ‚0‘),
(‚board/5v0-io-sys‘ , ‚0042‘, ‚1‘),
(‚board/3v3-sys‘ , ‚0042‘, ‚2‘),
(‚board/3v3-io-sleep‘, ‚0043‘, ‚0‘),
(‚board/1v8-io‘ , ‚0043‘, ‚1‘),
(‚board/3v3-m.2‘ , ‚0043‘, ‚2‘),
]

_valTypes = [‚power‘, ‚voltage‘, ‚current‘]
_valTypesFull = [‚power [mW]‘, ‚voltage [mV]‘, ‚current [mA]‘]

def getNodes():
„““Returns a list of all power measurement nodes, each a
tuple of format (name, i2d-addr, channel)“““
return _nodes
def getNodesByName(nameList=[‚module/main‘]):
return [_nodes[[n[0] for n in _nodes].index(name)] for name in nameList]

def powerSensorsPresent():
„““Check whether we are on the TX2 platform/whether the sensors are present“““
return os.path.isdir(‚/sys/bus/i2c/drivers/ina3221x/0-0041/iio_device/‘)

def getPowerMode():
return os.popen(„nvpmodel -q | grep ‚Power Mode'“).read()[15:-1]

def readValue(i2cAddr=’0041′, channel=’0′, valType=’power‘):
„““Reads a single value from the sensor“““
fname = ‚/sys/bus/i2c/drivers/ina3221x/0-%s/iio_device/in_%s%s_input‘ % (i2cAddr, valType, channel)
with open(fname, ‚r‘) as f:
return f.read()

def getModulePower():
„““Returns the current power consumption of the entire module in mW.“““
return float(readValue(i2cAddr=’0041′, channel=’0′, valType=’power‘))

def getAllValues(nodes=_nodes):
„““Returns all values (power, voltage, current) for a specific set of nodes.“““
return [[float(readValue(i2cAddr=node[1], channel=node[2], valType=valType))
for valType in _valTypes]
for node in nodes]

def printFullReport():
„““Prints a full report, i.e. (power,voltage,current) for all measurement nodes.“““
from tabulate import tabulate
header = []
header.append(‚description‘)
for vt in _valTypesFull:
header.append(vt)

resultTable = []
for descr, i2dAddr, channel in _nodes:
row = []
row.append(descr)
for valType in _valTypes:
row.append(readValue(i2cAddr=i2dAddr, channel=channel, valType=valType))
resultTable.append(row)
print(tabulate(resultTable, header))

import threading
import time
class PowerLogger:
„““This is an asynchronous power logger.
Logging can be controlled using start(), stop().
Special events can be marked using recordEvent().
Results can be accessed through
„““
def __init__(self, interval=0.01, nodes=_nodes):
„““Constructs the power logger and sets a sampling interval (default: 0.01s)
and fixes which nodes are sampled (default: all of them)“““
self.interval = interval
self._startTime = -1
self.eventLog = []
self.dataLog = []
self._nodes = nodes

def start(self):
„Starts the logging activity“““
#define the inner function called regularly by the thread to log the data
def threadFun():
#start next timer
self.start()
#log data
t = self._getTime() – self._startTime
self.dataLog.append((t, getAllValues(self._nodes)))
#ensure long enough sampling interval
t2 = self._getTime() – self._startTime
assert(t2-t < self.interval)

#setup the timer and launch it
self._tmr = threading.Timer(self.interval, threadFun)
self._tmr.start()
if self._startTime < 0:
self._startTime = self._getTime()

def _getTime(self):
return time.clock_gettime(time.CLOCK_REALTIME)

def recordEvent(self, name):
„““Records a marker a specific event (with name)“““
t = self._getTime() – self._startTime
self.eventLog.append((t, name))

def stop(self):
„““Stops the logging activity“““
self._tmr.cancel()

def getDataTrace(self, nodeName=’module/main‘, valType=’power‘):
„““Return a list of sample values and time stamps for a specific measurement node and type“““
pwrVals = [itm[1][[n[0] for n in self._nodes].index(nodeName)][_valTypes.index(valType)]
for itm in self.dataLog]
timeVals = [itm[0] for itm in self.dataLog]
return timeVals, pwrVals

def showDataTraces(self, names=None, valType=’power‘, showEvents=True):
„““creates a PyPlot figure showing all the measured power traces and event markers“““
if names == None:
names = [name for name, _, _ in self._nodes]

#prepare data to display
TPs = [self.getDataTrace(nodeName=name, valType=valType) for name in names]
Ts, _ = TPs[0]
Ps = [p for _, p in TPs]
energies = [self.getTotalEnergy(nodeName=nodeName) for nodeName in names]
Ps = list(map(list, zip(*Ps))) # transpose list of lists

#draw figure
#import matplotlib.pyplot as plt
#plt.plot(Ts, Ps)
#plt.xlabel(‚time [s]‘)
#plt.ylabel(_valTypesFull[_valTypes.index(valType)])
#plt.grid(True)
#plt.legend([‚%s (%.2f J)‘ % (name, enrgy/1e3) for name, enrgy in zip(names, energies)])
#plt.title(‚power trace (NVPModel: %s)‘ % (os.popen(„nvpmodel -q | grep ‚Power Mode'“).read()[15:-1],))
#if showEvents:
#for t, _ in self.eventLog:
#plt.axvline(x=t, color=’black‘)

def showMostCommonPowerValue(self, nodeName=’module/main‘, valType=’power‘, numBins=100):
„““computes a histogram of power values and print most frequent bin“““
import numpy as np
_, pwrData = np.array(self.getDataTrace(nodeName=nodeName, valType=valType))
count, center = np.histogram(pwrData, bins=numBins)
#import matplotlib.pyplot as plt
#plt.bar((center[:-1]+center[1:])/2.0, count, align=’center‘)
maxProbVal = center[np.argmax(count)]#0.5*(center[np.argmax(count)] + center[np.argmax(count)+1])
print(‚max frequent power bin value [mW]: %f‘ % (maxProbVal,))

def getTotalEnergy(self, nodeName=’module/main‘, valType=’power‘):
„““Integrate the power consumption over time.“““
timeVals, dataVals = self.getDataTrace(nodeName=nodeName, valType=valType)
assert(len(timeVals) == len(dataVals))
tPrev, wgtdSum = 0.0, 0.0
for t, d in zip(timeVals, dataVals):
wgtdSum += d*(t-tPrev)
tPrev = t
return wgtdSum

def getAveragePower(self, nodeName=’module/main‘, valType=’power‘):
energy = self.getTotalEnergy(nodeName=nodeName, valType=valType)
timeVals, _ = self.getDataTrace(nodeName=nodeName, valType=valType)
return energy/timeVals[-1]

if __name__ == „__main__“:

printFullReport()
# print(getModulePower())
# pl = PowerLogger(interval=0.05, nodes=getNodesByName([‚module/main‘, ‚board/main‘]))
pl = PowerLogger(interval=0.05, nodes=list(filter(lambda n: n[0].startswith(‚module/‘), getNodes())))
pl.start()
time.sleep(2)
pl.recordEvent(‚ding! 3s‘)
os.system(’stress -c 12 -t 3′)
time.sleep(1.5)
pl.recordEvent(‚ding! 2s‘)
os.system(’stress -c 1 -t 2′)
time.sleep(2)
pl.recordEvent(‚ding! 1s‘)
os.system(’stress -c 2 -t 1′)
time.sleep(1.5)
pl.stop()
pl.showDataTraces()

preprocessing.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““

import cv2
import numpy as np

# Resize image
# by specifying the size of the smaller side
def resize_to(img, size=256):
if img is None:
return

(h, w) = img.shape[:2]
# Find smaller size
if h < w:
ratio = size / h
else:
ratio = size / w

# Here we have weight by height
outsize = (int(w * ratio), int(h * ratio))
return cv2.resize(img, outsize)

# Crop the center region
def crop_center(img, crop_size=224):
y, x = img.shape[:2]
startx = x // 2 – (crop_size // 2)
starty = y // 2 – (crop_size // 2)
return img[starty:starty + crop_size, startx:startx + crop_size, …]

# Crop any random part of the image. This is useful if we are processing a batch
# of images
def random_crop(img, random_crop_size=224):
height, width = img.shape[0], img.shape[1]
dx = random_crop_size
dy = random_crop_size
x = np.random.randint(0, width – dx + 1)
y = np.random.randint(0, height – dy + 1)
return img[y:(y + dy), x:(x + dx), :]

# Preprocess the fullsize image.
def preprocess_image(image, do_random_crop=False, resize_size=256,
crop_size=224):
# Resize the image
resized = resize_to(image, size=resize_size)
# Crop part of the resized image
if (do_random_crop):
cropped = random_crop(resized, crop_size)
else:
cropped = crop_center(resized, crop_size)
return cropped

# Preprocess a batch of images
def preprocess_all(images):
# Preprocess images according to how many images we have
# If we have more, then we can do random cropping
if len(images) == 1:
do_random_crop = False
resize_size = 256
elif len(images) >= 2 and len(images) < 8:
do_random_crop = True
resize_size = 256
elif len(images) >= 8:
do_random_crop = True
resize_size = 256

# Preprocess each image
return np.asarray(
[preprocess_image(image.copy(), do_random_crop, resize_size) for image
in
images])

speed_test.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““

# USAGE
# python fps_demo.py

# import the necessary packages
from __future__ import print_function

import argparse

from inference_worker import run_inference
from preprocessing import preprocess_all
from webcamvideostream import WebcamVideoStream
import cv2
import numpy as np
from tensorflow import keras
import time

# Test speed of processing by this computer
def test_inference_speed(num_frames):
start_time = time.time()
frames_with_times = vs.read_frames()
frames = [v[0] for v in frames_with_times][:num_frames]
_ = run_inference(model, preprocess_all(np.asarray(frames)))

# your code
elapsed_time = time.time() – start_time
return elapsed_time

# Find how many frames per second this computer can process
def find_inference_parameters():
print(‚Measuring inference speed for 1 frame‘)
num_frames = 1
# Warm model up too
test_inference_speed(num_frames)
inference_time = test_inference_speed(num_frames=num_frames)
print(
‚Measured inference time for {} frames: {:.3f}s‘.format(num_frames,
inference_time))
prev_inference_time = inference_time
prev_num_frames = num_frames
if inference_time > 1:
return prev_num_frames, prev_inference_time

print(‚Measuring inference speed for 2 frames‘)
num_frames = 2
# Warm model up too
test_inference_speed(num_frames)
inference_time = test_inference_speed(num_frames=num_frames)
print(
‚Measured inference time for {} frames: {:.3f}s‘.format(num_frames,
inference_time))
if inference_time > 1:
return prev_num_frames, prev_inference_time
prev_inference_time = inference_time
prev_num_frames = num_frames

print(‚Measuring inference speed for 4 frames‘)
num_frames = 4
# Warm model up too
test_inference_speed(num_frames)
inference_time = test_inference_speed(num_frames=num_frames)
print(
‚Measured inference time for {} frames: {:.3f}s‘.format(num_frames,
inference_time))
if inference_time > 1:
return prev_num_frames, prev_inference_time
prev_inference_time = inference_time
prev_num_frames = num_frames

print(‚Measuring inference speed for 8 frames‘)
num_frames = 8
# Warm model up too
test_inference_speed(num_frames)
inference_time = test_inference_speed(num_frames=num_frames)
print(
‚Measured inference time for {} frames: {:.3f}s‘.format(num_frames,
inference_time))
if inference_time > 1:
return prev_num_frames, prev_inference_time
prev_inference_time = inference_time
prev_num_frames = num_frames

print(‚Measuring inference speed for 16 frames‘)
num_frames = 16
# Warm model up too
test_inference_speed(num_frames)
inference_time = test_inference_speed(num_frames=num_frames)
print(
‚Measured inference time for {} frames: {:.3f}s‘.format(num_frames,
inference_time))
if inference_time > 1:
return prev_num_frames, prev_inference_time
prev_inference_time = inference_time
prev_num_frames = num_frames

print(‚Measuring inference speed for 24 frames‘)
num_frames = 24
# Warm model up too
test_inference_speed(num_frames)
inference_time = test_inference_speed(num_frames=num_frames)
print(
‚Measured inference time for {} frames: {:.3f}s‘.format(num_frames,
inference_time))
if inference_time > 1:
return prev_num_frames, prev_inference_time
prev_inference_time = inference_time
prev_num_frames = num_frames

return prev_num_frames, prev_inference_time

if __name__ == ‚__main__‘:

parser = argparse.ArgumentParser(description=’Webcam demo‘)
parser.add_argument(‚–video‘, dest=’video‘,
default=0,
help=’Path to video file to use instead of the webcam‘)
args = parser.parse_args()

# setup the model
print(‚Loading model‘)
model = keras.models.load_model(‚checkpoints/run7-epoch_51.hdf5‘)
print(‚Warming cam up‘)
# created a *threaded *video stream
vs = WebcamVideoStream(src=args.video, max_frames=24).start()
# Allow the cam to warm up
time.sleep(2)

frames_per_second, inference_time = find_inference_parameters()
print(‚Use fps:‘, frames_per_second)

# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()

webcam_demo.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““

# USAGE
# python fps_demo.py

# import the necessary packages
from __future__ import print_function

from preprocessing import preprocess_all
from webcamvideostream import WebcamVideoStream
import argparse
import cv2
import time
from datetime import datetime
from multiprocessing import Queue
import queue
import os
from datetime import datetime
from inference_worker import InferenceWorker
import numpy as np
import csv

# Display text on an image, used to show the video/webcam
def display_text(img, text, x=10, y=20):
# prepare the text
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (x, y)
fontScale = 0.5
fontColor = (0, 0, 255)
lineType = 1

# Draw the text on the image
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# Show the image with text
cv2.imshow(„Frame“, img)
cv2.waitKey(1)

#
def process(video_source, num_frames, max_read_fps, output_file,which_model,gpu_mem):
# Setup the worker and the queues for sharing data
data_q = Queue()
result_q = Queue()
ready_q = Queue()

inference_worker = InferenceWorker(data_q, result_q, ready_q,which_model,gpu_mem)
inference_worker.start()

# For analysis of predictions
prev_predictions = []
# Flag for if we are processing images at this time
processing = False
current_text = „“
# Wait until the inferenceWorker is ready
ready_q.get(True)

# Start the video queue
vs = WebcamVideoStream(src=video_source,max_frames=max_read_fps).start()

# Allow the cam to warm up
# The default video param of 0 means webcam
if args.video == 0:
time.sleep(1)

while 1:
try:
frame = vs.read()
if frame is None:
print(‚DETECTED ANIMALS:‘, detections)
# Save to CSV
with open(output_file, ‚w‘) as csvfile:
writer = csv.writer(csvfile)
writer.writerow([‚Animal‘, ‚From‘, ‚To‘])
for detection in detections:
animal, start, end = detection
writer.writerow(detection)

# Signal to the inference process to quit
data_q.put((„exit“, False))
# Finally, do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
print(‚Stopping main thread‘)
break
display_text(frame.copy(), current_text)
if not processing:
frames_with_times = vs.read_frames()[:num_frames]

# print (len(frames_with_times),len(frames_with_times[0]),(frames_with_times[0][0].shape),((frames_with_times[0][1])))

frames = [v[0] for v in frames_with_times]
data_q.put((time.time(), preprocess_all(frames), frames_with_times))
processing = True

try:
scheduled_time, prediction, predictions, frames_with_times, original_predictions = result_q.get(False)

# Save images
save_images(frames_with_times, original_predictions)

if prediction != imagenet_class:
current_text = f“{prediction_to_class[prediction]} detected. Confidence: ({predictions[prediction]:.3f})“

else:
current_text = f“Nothing detected. Confidence: ({predictions[prediction]:.3f})“
print(current_text)
processing = False

# record animal detections
prev_predictions.append((scheduled_time, prediction))
analyze_predictions(prev_predictions)
except queue.Empty:
pass

except KeyboardInterrupt:
print(‚DETECTED ANIMALS:‘, detections)
# Save to CSV
with open(output_file, ‚w‘) as csvfile:
writer = csv.writer(csvfile)
writer.writerow([‚Animal‘, ‚From‘, ‚To‘])
for detection in detections:
animal, start, end = detection
writer.writerow(detection)

# Signal to the inference process to quit
data_q.put((„exit“, False))
# Finally, do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
print(‚Stopping main thread‘)
break

# Find detected animal from last few seconds
def find_most_common_prediction(current_time, predictions):
# Find predictions in last N seconds
lookback_period = float(args.lookback_seconds) # seconds
lookback_predictions = []
for prediction_time, prediction in predictions[::-1]:
if current_time – lookback_period < prediction_time:
lookback_predictions.append(prediction)
else:
# We have reached older times, that don’t interest us
break
# print(lookback_predictions)
return np.max(np.asarray(lookback_predictions))

# Convert UNIX timestamp to human date
def timestamp_to_date(timestamp):
return datetime.utcfromtimestamp(timestamp).strftime(
‚%Y-%m-%d %H:%M:%S UTC‘)

# Analyze previous detections to record them
def analyze_predictions(all_predictions):
global detections
global start_time
global current_class

prediction_time, _ = all_predictions[-1]
detected = find_most_common_prediction(prediction_time, all_predictions)
print(detected)

if detected != current_class:
# We have detected something new

if current_class != imagenet_class:
# We have been detecting animal
# so we have finished detecting this animal
print(‚Finished detecting‘, prediction_to_class[current_class])

end_time = prediction_time
detections.append(
(prediction_to_class[current_class],
timestamp_to_date(start_time),
timestamp_to_date(end_time)))

start_time = prediction_time
current_class = detected
print(‚Started detecting‘, prediction_to_class[current_class])

# Save predictions as images
def save_images(frames_with_times, original_predictions):
for image_with_time, prediction in zip(frames_with_times, original_predictions):
# Figure what was the prediction
predicted_class = np.argmax(prediction)
# Only save positive predictions
if predicted_class == imagenet_class:
continue

class_name = prediction_to_class[predicted_class]
confidence = prediction[predicted_class]
print(str(class_name)+‘ ‚+str(confidence))

image = image_with_time[0]
detection_time = str(datetime.fromtimestamp(image_with_time[1]).isoformat()).replace(‚:‘, „-„).replace(‚/‘, „-„)

image_hash = hash(str(image))
# if this image has been saved already, don’t save it
if image_hash in saved_images:
continue
# Mark this image as saved
saved_images[image_hash] = True

# Make sure the class dir exists
os.makedirs(save_images_dir + class_name, exist_ok=True)

# Let’s save the image
save_name = save_images_dir + class_name + ‚/‘ + class_name + „_“ + detection_time + ‚.jpg‘
print(save_name)
cv2.imwrite(save_name, image)

if __name__ == ‚__main__‘:
# Keep a cache of images saved, so we dont save them multiple times
saved_images = {}

parser = argparse.ArgumentParser(description=’Webcam demo‘)
parser.add_argument(‚–video‘, dest=’video‘,
# The default video param of 0 means webcam
default=0,
help=’Path to video file to use instead of the webcam‘)

parser.add_argument(‚–fps‘, dest=’fps‘,
default=2,
help=’How many frames to pass to the model for inference. The more the better. Suggested value is from 1 to 24. Default 2′)

parser.add_argument(‚–video_read_frames‘, dest=’video_read_frames‘,
default=24,
help=’How many frames of video to read/analyze/play per second (default is 24)‘)

parser.add_argument(‚–output‘, dest=’output‘,
default=“output.csv“,
help=’File to output animal detections to. Default is output.csv‘)

parser.add_argument(‚–lookback_seconds‘, dest=’lookback_seconds‘,
default=3,
help=’How many seconds back to analyze detections for inclusion in CSV file‘)

parser.add_argument(‚–model_type‘, dest=’which_model‘,
default=’keras‘,
help=’Chose which model you want to run, keras or tf‘)

parser.add_argument(‚–gpu_mem‘, dest=’gpu_mem‘,
default=1,
help=’Choose GPU memory allocation from 0 to 1 in fraction‘)

args = parser.parse_args()
print(‚Using FPS:‘, args.fps)
print(‚Using video read frames per second:‘, int(args.video_read_frames))

# For analysis of detections
detections = []
imagenet_class = 34
current_class = imagenet_class
start_time = False

# Create directory to store detections
current_time = datetime.now().strftime(‚%Y-%m-%d_%H-%M-%S‘)
save_images_dir = ‚./animals_and_birds/’+current_time+’/‘
os.makedirs(save_images_dir, exist_ok=True)

# Mapping of classes to inference indexes
prediction_to_class = {0: ‚Bat‘, 1: ‚Bear‘, 2: ‚Canary‘,
3: ‚Cat‘, 4: ‚Cattle‘, 5: ‚Chicken‘,
6: ‚Deer‘, 7: ‚Dog‘, 8: ‚Donkey‘,
9: ‚Duck‘, 10: ‚Fox‘, 11: ‚Frog‘,
12: ‚Goat‘, 13: ‚Goose‘, 14: ‚Hamster‘,
15: ‚Hedgehog‘, 16: ‚Horse‘,
17: ‚Lizard‘, 18: ‚Magpie‘, 19: ‚Mole‘,
20: ‚Owl‘, 21: ‚Parrot‘, 22: ‚Pig‘,
23: ‚Pigeon‘, 24: ‚Rabbit‘, 25: ‚Raven‘,
26: ‚Sheep‘, 27: ‚Snake‘, 28: ‚Sparrow‘,
29: ‚Squirrel‘, 30: ‚Stork‘,
31: ‚Tortoise‘, 32: ‚Turkey‘,
33: ‚Woodpecker‘,
34: ‚imagenet_resized_256‘}

# Process the video/webcam
process(args.video, int(args.fps), int(args.video_read_frames), args.output,args.which_model,args.gpu_mem)
cv2.destroyAllWindows()

webcam_motion.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““
#!/usr/bin/env python
# coding: utf-8

# In[1]:

import cv2
from webcamvideostream import WebcamVideoStream
import time

import numpy as np
from preprocessing import preprocess_all
import matplotlib.pyplot as plt

import tensorflow as tf
import argparse
from tensorflow.python.platform import gfile
from datetime import datetime
import csv
import os

# In[ ]:

parser = argparse.ArgumentParser(description=’Webcam motion demo‘)
parser.add_argument(‚–video‘, dest=’video‘,
# The default video param of 0 means webcam
default=0,
help=’Path to video file to use instead of the webcam‘)

parser.add_argument(‚–fps‘, dest=’fps‘,
default=2,
help=’How many frames to pass to the model for inference. The more the better. Suggested value is from 1 to 24. Default 2′)

parser.add_argument(‚–output‘, dest=’output‘,
default=“output.csv“,
help=’File to output animal detections to. Default is output.csv‘)

parser.add_argument(‚–lookback_seconds‘, dest=’lookback_seconds‘,
default=3,
help=’How many seconds back to analyze detections for inclusion in CSV file‘)

parser.add_argument(‚–gpu_mem‘, dest=’gpu_mem‘,
default=1,
help=’Choose GPU memory allocation from 0 to 1 in fraction‘)

parser.add_argument(‚–save_pic‘, dest=’save_pic‘,
default=False,
help=’Choose GPU memory allocation from 0 to 1 in fraction‘)

args = parser.parse_args()
print(‚Using FPS:‘, args.fps)

# In[2]:

# Mapping of classes to inference indexes
prediction_to_class = {0: ‚Bat‘, 1: ‚Bear‘, 2: ‚Canary‘,
3: ‚Cat‘, 4: ‚Cattle‘, 5: ‚Chicken‘,
6: ‚Deer‘, 7: ‚Dog‘, 8: ‚Donkey‘,
9: ‚Duck‘, 10: ‚Fox‘, 11: ‚Frog‘,
12: ‚Goat‘, 13: ‚Goose‘, 14: ‚Hamster‘,
15: ‚Hedgehog‘, 16: ‚Horse‘,
17: ‚Lizard‘, 18: ‚Magpie‘, 19: ‚Mole‘,
20: ‚Owl‘, 21: ‚Parrot‘, 22: ‚Pig‘,
23: ‚Pigeon‘, 24: ‚Rabbit‘, 25: ‚Raven‘,
26: ‚Sheep‘, 27: ‚Snake‘, 28: ‚Sparrow‘,
29: ‚Squirrel‘, 30: ‚Stork‘,
31: ‚Tortoise‘, 32: ‚Turkey‘,
33: ‚Woodpecker‘,
34: ‚imagenet_resized_256‘}

# In[3]:

# Find detected animal from last few seconds
def find_most_common_prediction(current_time, predictions):
# Find predictions in last N seconds
lookback_period = float(args.lookback_seconds) # seconds
lookback_predictions = []
for prediction_time, prediction in predictions[::-1]:
if current_time – lookback_period < prediction_time:
lookback_predictions.append(prediction)
else:
# We have reached older times, that don’t interest us
break
# print(lookback_predictions)
return np.max(np.asarray(lookback_predictions))

# Convert UNIX timestamp to human date
def timestamp_to_date(timestamp):
return datetime.utcfromtimestamp(timestamp).strftime(
‚%Y-%m-%d %H:%M:%S UTC‘)

# Analyze previous detections to record them
def analyze_predictions(all_predictions):
global detections
global start_time
global current_class

prediction_time, _ = all_predictions[-1]
detected = find_most_common_prediction(prediction_time, all_predictions)
print(detected)

if detected != current_class:
# We have detected something new

if current_class != imagenet_class:
# We have been detecting animal
# so we have finished detecting this animal
print(‚Finished detecting‘, prediction_to_class[current_class])

end_time = prediction_time
detections.append(
(prediction_to_class[current_class],
timestamp_to_date(start_time),
timestamp_to_date(end_time)))

start_time = prediction_time
current_class = detected
print(‚Started detecting‘, prediction_to_class[current_class])

# Display text on an image, used to show the video/webcam
def display_text(img, text, x=10, y=20):
# prepare the text
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (x, y)
fontScale = 0.5
fontColor = (0, 0, 255)
lineType = 1

# Draw the text on the image
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# Show the image with text
cv2.imshow(„Frame“, img)
cv2.waitKey(1)

# In[ ]:

# In[4]:

print(‚Model loading….‘)
config=tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction=float(args.gpu_mem) # gpu utilization
sess=tf.Session(config=config)
# sess=tf.InteractiveSession()
f = gfile.FastGFile(„./model/tf_model.pb“, ‚rb‘)
graph_def = tf.GraphDef()

# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.

tf.import_graph_def(graph_def)
softmax_tensor = sess.graph.get_tensor_by_name(‚import/fc1000/Softmax:0‘)
sess.graph.as_default()
print(‚Model loaded‘)

# In[ ]:

output_file=args.output

current_time = datetime.now().strftime(‚%Y-%m-%d_%H-%M-%S‘)
save_images_dir = ‚./animals_and_birds/’+current_time+’/‘
os.makedirs(save_images_dir, exist_ok=True)

firstFrame=None
imagenet_class = 34
current_class = imagenet_class
current_text=“ “
detections = []
prev_predictions=[]
start_time_full = time.time()

video_source=args.video
if video_source==’0′:
video_source=0
vs = WebcamVideoStream(src=video_source,max_frames=int(args.fps)).start()

while 1:

try:
frame = vs.read()
if frame is None:
print (’no frame‘)

print(‚DETECTED ANIMALS:‘, detections)
# Save to CSV
with open(output_file, ‚w‘) as csvfile:
writer = csv.writer(csvfile)
writer.writerow([‚Animal‘, ‚From‘, ‚To‘])
for detection in detections:
animal, start, end = detection
writer.writerow(detection)

# Signal to the inference process to quit
# Finally, do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
print(‚Stopping main thread‘)
break

full_image_Frame=frame.copy()
display_text(full_image_Frame, current_text)
frame=cv2.resize(frame,(224,224))
gray = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)

if firstFrame is None:
firstFrame = gray.copy()
continue

# compute the absolute difference between the current frame and first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

# dilate the thresholded image to fill in holes, then find contours on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)

contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cont_Array=[cv2.contourArea(c) for c in contours]
if len(cont_Array)>0:
if max(cont_Array)>100:
predictions_array=sess.run(softmax_tensor, {‚import/input_1:0′: frame.reshape(1,frame.shape[0],frame.shape[1],3)})
prediction = np.argmax(predictions_array)

if prediction != imagenet_class:
current_text = f“{prediction_to_class[prediction]} detected. Confidence: ({predictions_array.reshape(-1,)[prediction]:.3f})“

if args.save_pic==’True‘:
class_name = prediction_to_class[prediction]
os.makedirs(save_images_dir + class_name, exist_ok=True)

confidence=predictions_array.reshape(-1,)[prediction]
print(str(class_name)+‘ ‚+str(confidence))
detection_time = str(datetime.fromtimestamp(time.time()).isoformat()).replace(‚:‘, „-„).replace(‚/‘, „-„)

save_name = save_images_dir + class_name + ‚/‘ + class_name + „_“ + detection_time + ‚.jpg‘
print(save_name)
cv2.imwrite(save_name, full_image_Frame)

else:
current_text = f“Nothing detected. Confidence: ({predictions_array.reshape(-1,)[prediction]:.3f})“

prev_predictions.append((time.time(), prediction))
analyze_predictions(prev_predictions)
firstFrame = gray.copy()

except KeyboardInterrupt:
print(‚DETECTED ANIMALS:‘, detections)
# Save to CSV
with open(output_file, ‚w‘) as csvfile:
writer = csv.writer(csvfile)
writer.writerow([‚Animal‘, ‚From‘, ‚To‘])
for detection in detections:
animal, start, end = detection
writer.writerow(detection)

# Signal to the inference process to quit
# Finally, do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
print(‚Stopping main thread‘)

elapsed_time = time.time() – start_time_full
vs.stop()
sess.close()

break
cv2.destroyAllWindows()
elapsed_time = time.time() – start_time_full

vs.stop()
sess.close()

webcam_motion_tx2.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““
#!/usr/bin/env python
# coding: utf-8

# In[1]:

import cv2
from webcamvideostream import WebcamVideoStream
import time

import numpy as np
from preprocessing import preprocess_all
#import matplotlib.pyplot as plt

import tensorflow as tf
import argparse
from tensorflow.python.platform import gfile
from datetime import datetime
import csv
import os

# In[ ]:

parser = argparse.ArgumentParser(description=’Webcam motion demo‘)
parser.add_argument(‚–video‘, dest=’video‘,
# The default video param of 0 means webcam
default=0,
help=’Path to video file to use instead of the webcam‘)

parser.add_argument(‚–fps‘, dest=’fps‘,
default=2,
help=’How many frames to pass to the model for inference. The more the better. Suggested value is from 1 to 24. Default 2′)

parser.add_argument(‚–output‘, dest=’output‘,
default=“output.csv“,
help=’File to output animal detections to. Default is output.csv‘)

parser.add_argument(‚–lookback_seconds‘, dest=’lookback_seconds‘,
default=3,
help=’How many seconds back to analyze detections for inclusion in CSV file‘)

parser.add_argument(‚–gpu_mem‘, dest=’gpu_mem‘,
default=1,
help=’Choose GPU memory allocation from 0 to 1 in fraction‘)

parser.add_argument(‚–save_pic‘, dest=’save_pic‘,
default=False,
help=’Choose GPU memory allocation from 0 to 1 in fraction‘)

args = parser.parse_args()
print(‚Using FPS:‘, args.fps)

# In[2]:

# Mapping of classes to inference indexes
prediction_to_class = {0: ‚Bat‘, 1: ‚Bear‘, 2: ‚Canary‘,
3: ‚Cat‘, 4: ‚Cattle‘, 5: ‚Chicken‘,
6: ‚Deer‘, 7: ‚Dog‘, 8: ‚Donkey‘,
9: ‚Duck‘, 10: ‚Fox‘, 11: ‚Frog‘,
12: ‚Goat‘, 13: ‚Goose‘, 14: ‚Hamster‘,
15: ‚Hedgehog‘, 16: ‚Horse‘,
17: ‚Lizard‘, 18: ‚Magpie‘, 19: ‚Mole‘,
20: ‚Owl‘, 21: ‚Parrot‘, 22: ‚Pig‘,
23: ‚Pigeon‘, 24: ‚Rabbit‘, 25: ‚Raven‘,
26: ‚Sheep‘, 27: ‚Snake‘, 28: ‚Sparrow‘,
29: ‚Squirrel‘, 30: ‚Stork‘,
31: ‚Tortoise‘, 32: ‚Turkey‘,
33: ‚Woodpecker‘,
34: ‚imagenet_resized_256‘}

# In[3]:

# Find detected animal from last few seconds
def find_most_common_prediction(current_time, predictions):
# Find predictions in last N seconds
lookback_period = float(args.lookback_seconds) # seconds
lookback_predictions = []
for prediction_time, prediction in predictions[::-1]:
if current_time – lookback_period < prediction_time:
lookback_predictions.append(prediction)
else:
# We have reached older times, that don’t interest us
break
# print(lookback_predictions)
return np.max(np.asarray(lookback_predictions))

# Convert UNIX timestamp to human date
def timestamp_to_date(timestamp):
return datetime.utcfromtimestamp(timestamp).strftime(
‚%Y-%m-%d %H:%M:%S UTC‘)

# Analyze previous detections to record them
def analyze_predictions(all_predictions):
global detections
global start_time
global current_class

prediction_time, _ = all_predictions[-1]
detected = find_most_common_prediction(prediction_time, all_predictions)
print(detected)

if detected != current_class:
# We have detected something new

if current_class != imagenet_class:
# We have been detecting animal
# so we have finished detecting this animal
print(‚Finished detecting‘, prediction_to_class[current_class])

end_time = prediction_time
detections.append(
(prediction_to_class[current_class],
timestamp_to_date(start_time),
timestamp_to_date(end_time)))

start_time = prediction_time
current_class = detected
print(‚Started detecting‘, prediction_to_class[current_class])

# Display text on an image, used to show the video/webcam
def display_text(img, text, x=10, y=20):
# prepare the text
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (x, y)
fontScale = 0.5
fontColor = (0, 0, 255)
lineType = 1

# Draw the text on the image
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# Show the image with text
cv2.imshow(„Frame“, img)
cv2.waitKey(1)

# In[ ]:

# In[4]:

print(‚Model loading….‘)
config=tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction=float(args.gpu_mem) # gpu utilization
sess=tf.Session(config=config)
# sess=tf.InteractiveSession()
f = gfile.FastGFile(„./model/tf_model.pb“, ‚rb‘)
graph_def = tf.GraphDef()

# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.

tf.import_graph_def(graph_def)
softmax_tensor = sess.graph.get_tensor_by_name(‚import/fc1000/Softmax:0‘)
sess.graph.as_default()
print(‚Model loaded‘)

# In[ ]:

output_file=args.output

current_time = datetime.now().strftime(‚%Y-%m-%d_%H-%M-%S‘)
save_images_dir = ‚./animals_and_birds/’+current_time+’/‘
os.makedirs(save_images_dir, exist_ok=True)

firstFrame=None
imagenet_class = 34
current_class = imagenet_class
current_text=“ “
detections = []
prev_predictions=[]
start_time_full = time.time()

video_source=args.video
if video_source==’0′:
video_source=0
vs = WebcamVideoStream(src=video_source,max_frames=int(args.fps)).start()

while 1:

try:
frame = vs.read()
if frame is None:
print (’no frame‘)

print(‚DETECTED ANIMALS:‘, detections)
# Save to CSV
with open(output_file, ‚w‘) as csvfile:
writer = csv.writer(csvfile)
writer.writerow([‚Animal‘, ‚From‘, ‚To‘])
for detection in detections:
animal, start, end = detection
writer.writerow(detection)

# Signal to the inference process to quit
# Finally, do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
print(‚Stopping main thread‘)
break

full_image_Frame=frame.copy()
display_text(full_image_Frame, current_text)
frame=cv2.resize(frame,(224,224))
gray = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)

if firstFrame is None:
firstFrame = gray.copy()
continue

# compute the absolute difference between the current frame and first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

# dilate the thresholded image to fill in holes, then find contours on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)

_,contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cont_Array=[cv2.contourArea(c) for c in contours]
if len(cont_Array)>0:
if max(cont_Array)>100:
predictions_array=sess.run(softmax_tensor, {‚import/input_1:0′: frame.reshape(1,frame.shape[0],frame.shape[1],3)})
prediction = np.argmax(predictions_array)

if prediction != imagenet_class:
current_text = (str(prediction_to_class[prediction])+ „detected. Confidence: „+str(predictions_array.reshape(-1,)[prediction]))

if args.save_pic==’True‘:
class_name = prediction_to_class[prediction]
os.makedirs(save_images_dir + class_name, exist_ok=True)

confidence=predictions_array.reshape(-1,)[prediction]
print(str(class_name)+‘ ‚+str(confidence))
detection_time = str(datetime.fromtimestamp(time.time()).isoformat()).replace(‚:‘, „-„).replace(‚/‘, „-„)

save_name = save_images_dir + class_name + ‚/‘ + class_name + „_“ + detection_time + ‚.jpg‘
print(save_name)
cv2.imwrite(save_name, full_image_Frame)

else:
current_text = („Nothing detected. Confidence :“+str(predictions_array.reshape(-1,)[prediction]))

prev_predictions.append((time.time(), prediction))
analyze_predictions(prev_predictions)
firstFrame = gray.copy()

except KeyboardInterrupt:
print(‚DETECTED ANIMALS:‘, detections)
# Save to CSV
with open(output_file, ‚w‘) as csvfile:
writer = csv.writer(csvfile)
writer.writerow([‚Animal‘, ‚From‘, ‚To‘])
for detection in detections:
animal, start, end = detection
writer.writerow(detection)

# Signal to the inference process to quit
# Finally, do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
print(‚Stopping main thread‘)

elapsed_time = time.time() – start_time_full
vs.stop()
sess.close()

break
cv2.destroyAllWindows()
elapsed_time = time.time() – start_time_full

vs.stop()
sess.close()

webcamvideostream.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““

# import the necessary packages
from threading import Thread
import cv2
import time

class WebcamVideoStream:
def __init__(self, src=0, name=“WebcamVideoStream“, max_frames=24):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()

self.frames = []
self.max_frames = max_frames
self.last_saved_frame = time.time()

# initialize the thread name
self.name = name

# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False

def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self

def update(self):
# keep looping infinitely until the thread is stopped
while True:
started_reading = time.time()

# if the thread indicator variable is set, stop the thread
if self.stopped:
print(‚Video/webcam thread stopping‘)
return

# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
self._add_frame(self.frame)

ended_reading = time.time()

# Calculate how long to wait until getting the next frame
delay_seconds = (1. / self.max_frames) – (
ended_reading – started_reading)
# no delay of less than 1 ms
if delay_seconds < 0.001:
delay_seconds = 0.001
time.sleep(delay_seconds)

def _add_frame(self, frame):
self.last_saved_frame = time.time()
self.frames.append((frame, time.time()))
# Only keep so much frames in the stack
if len(self.frames) > self.max_frames:
self.frames.pop(0)

def read_frames(self):
return self.frames

def read(self):
# return the frame most recently read
return self.frame

def stop(self):
# indicate that the thread should be stopped
self.stopped = True

Es ist mir auch gelungen, eine SMS-Alarmierung zu erhalten, wenn ein gefährliches Tier auf meinem Grundstück identifiziert wurde. Dies ist besonders für Landwirte sehr hilfreich, wenn ein Bär die Schafe angreift oder ein Fuchs Hühner angreift (der Benutzer kann sehr einfach die Tierklasse angeben, für die er eine SMS-Alarmmeldung erhalten möchte).

Die Idee zur Implementierung dieser SMS-Funktion kam mir, nachdem ich die erstaunliche Arbeit gesehen hatte, die Gautam Kumar hier geleistet hat.

Wie es funktioniert: Wenn es 3 Sekunden lang das gleiche Tier für die ganze Zeit erkannt hat und es sich um einen Fuchs oder Bär handelt, versucht es, eine Nachricht zu senden (aber nur, wenn es in den letzten 5 Minuten keine Nachricht für dieses Tier gesendet hat). Das liegt daran, dass wir nicht jede Sekunde eine SMS erhalten wollen, wenn ein Wildtier identifiziert wird, sondern nur einmal alle 5 Minuten.

Mein Code für diese Datei sehen Sie unten:

webcam_motion_tx2_with_twilio_sms_new.py

„““
Code implemented in Python by Sorin Liviu Jurj for the paper called „Efficient Implementation of a Self-Sufficient Solar-Powered Real-Time Deep Learning-Based System“. More information about this project you can find here: https://www.jurj.de/efficient-implementation-of-a-self-sufficient-solar-powered-real-time-deep-learning-based-system
„““

#!/usr/bin/env python
# coding: utf-8

# In[1]:

import cv2
from webcamvideostream import WebcamVideoStream
import time

import numpy as np
from preprocessing import preprocess_all
#import matplotlib.pyplot as plt

import tensorflow as tf
import argparse
from tensorflow.python.platform import gfile
from datetime import datetime
import csv
import os

#######——————————————————————————–
# Import the Twilio client
from twilio.rest import Client

#python code to receive message on your mobile phone using Twilio API. I followed same steps as mentioned here.
# the following line needs your Twilio Account SID and Auth Token
#SID and Authentication token can be generated by making an trial/licence account on twilio.
#To make trial account, generate account SID and token follow the steps as shown at https://www.fullstackpython.com/blog/send-sms-text-messages-python.html
client = Client(„ACd851d351a8b52566cd6ead4b0ce7dc07“, „bff3c0fb9b3c7c86a219c060cc64e7d1“) # example: client = Client(„ACea4cecca40ebb1bf4594098d5ceXXXX“, „32789639585561088d5937514694XXXX“)
# change the „from_“ number to your Twilio number and the „to“ number
# to the phone number you signed up for Twilio with, or upgrade your
# account to send SMS to any phone number
client.messages.create(to=“+491733830498″, # replace with your mobile number or on which you want to get SMS
from_=“+12565703390″,
body=“This is a message to inform you that the Twilio SMS alert works!“)
# If everything is okay, you will get SMS on your mobile phone using abobe 4 lines of code
#write above block of code (line 36) at section/block of codes where you want to get SMS.
#For example, if no animal is detected by webcam, you can simply write output frame as ‚No animal detected‘ (No need to generate SMS)
#If there is a change in label, generate alert message with message body as predicted class label.

#In my case, i defined a threshold, if predicted probability is below threshlod, i don’t generate alert message and label output frame as ‚Normal‘
# using variable ‚ok‘.
#However, when probability of predicted class label is > threshold, i generated alert message using twilio with mesage body as predicted class and probability.
#there is a change in class label (if label != prelabel:) and i am generating message next to that line of code.

„““
client = Client(„ACd851d351a8b52566cd6ead4b0ce7dc07“, „bff3c0fb9b3c7c86a219c060cc64e7d1″)
prelabel = “
ok = ‚imagenet_resized_256‘

if (preds[prediction]) < th:
text = „Alert : {} – {:.2f}%“.format((ok), 100 – (preds[prediction] * 100))
cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.25, (0, 255, 0), 5)
else:
text = „Alert : {} – {:.2f}%“.format((label), preds[prediction] * 100)
cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.25, (0, 255, 0), 5)
if label != prelabel:
client.messages.create(to=“+491733830498″,
from_=“+12565703390″,
body=’\n’+ str(text) +’\n Camera ID: ‚ + str(camid) + ‚\n Address: ‚ + location)
prelabel = label
„““

#If you are facing difficulty to generate SMS you can use other API too such as
#https://www.geeksforgeeks.org/send-sms-updates-mobile-phone-using-python/
#######——————————————————————————–

parser = argparse.ArgumentParser(description=’Webcam motion demo‘)
parser.add_argument(‚–video‘, dest=’video‘,
# The default video param of 0 means webcam
default=0,
help=’Path to video file to use instead of the webcam‘)

parser.add_argument(‚–fps‘, dest=’fps‘,
default=2,
help=’How many frames to pass to the model for inference. The more the better. Suggested value is from 1 to 24. Default 2′)

parser.add_argument(‚–output‘, dest=’output‘,
default=“output.csv“,
help=’File to output animal detections to. Default is output.csv‘)

parser.add_argument(‚–lookback_seconds‘, dest=’lookback_seconds‘,
default=3,
help=’How many seconds back to analyze detections for inclusion in CSV file‘)

parser.add_argument(‚–gpu_mem‘, dest=’gpu_mem‘,
default=1,
help=’Choose GPU memory allocation from 0 to 1 in fraction‘)

parser.add_argument(‚–save_pic‘, dest=’save_pic‘,
default=False,
help=’Choose GPU memory allocation from 0 to 1 in fraction‘)

args = parser.parse_args()
print(‚Using FPS:‘, args.fps)

# In[2]:

# Mapping of classes to inference indexes
prediction_to_class = {0: ‚Bat‘, 1: ‚Bear‘, 2: ‚Canary‘,
3: ‚Cat‘, 4: ‚Cattle‘, 5: ‚Chicken‘,
6: ‚Deer‘, 7: ‚Dog‘, 8: ‚Donkey‘,
9: ‚Duck‘, 10: ‚Fox‘, 11: ‚Frog‘,
12: ‚Goat‘, 13: ‚Goose‘, 14: ‚Hamster‘,
15: ‚Hedgehog‘, 16: ‚Horse‘,
17: ‚Lizard‘, 18: ‚Magpie‘, 19: ‚Mole‘,
20: ‚Owl‘, 21: ‚Parrot‘, 22: ‚Pig‘,
23: ‚Pigeon‘, 24: ‚Rabbit‘, 25: ‚Raven‘,
26: ‚Sheep‘, 27: ‚Snake‘, 28: ‚Sparrow‘,
29: ‚Squirrel‘, 30: ‚Stork‘,
31: ‚Tortoise‘, 32: ‚Turkey‘,
33: ‚Woodpecker‘,
34: ‚imagenet_resized_256‘}

# In[3]:

# Find detected animal from last few seconds
def find_most_common_prediction(current_time, predictions):
# Find predictions in last N seconds
lookback_period = float(args.lookback_seconds) # seconds
lookback_predictions = []
for prediction_time, prediction in predictions[::-1]:
if current_time – lookback_period < prediction_time:
lookback_predictions.append(prediction)
else:
# We have reached older times, that don’t interest us
break
# print(lookback_predictions)
return max(set(lookback_predictions), key=lookback_predictions.count)

def consistent_prediction(current_time, predictions):
# Find predictions in last N seconds
lookback_period = 3 # seconds
lookback_predictions = []
for prediction_time, prediction in predictions[::-1]:
if current_time – lookback_period < prediction_time:
lookback_predictions.append(prediction)
else:
# We have reached older times, that don’t interest us
break
# print(lookback_predictions)
return all(p == lookback_predictions[0] for p in lookback_predictions)

# Convert UNIX timestamp to human date
def timestamp_to_date(timestamp):
return datetime.utcfromtimestamp(timestamp).strftime(
‚%Y-%m-%d %H:%M:%S UTC‘)

# Analyze previous detections to record them
def analyze_predictions(all_predictions):
global detections
global start_time
global current_class
global last_bear_message_time
global last_fox_message_time

prediction_time, _ = all_predictions[-1]
detected = find_most_common_prediction(prediction_time, all_predictions)
print(detected)

if detected != current_class:
# We have detected something new

if current_class != imagenet_class:
# We have been detecting animal
# so we have finished detecting this animal
print(‚Finished detecting‘, prediction_to_class[current_class])

end_time = prediction_time
detections.append(
(prediction_to_class[current_class],
timestamp_to_date(start_time),
timestamp_to_date(end_time)))

start_time = prediction_time
current_class = detected
print(‚Started detecting‘, prediction_to_class[current_class])

elif consistent_prediction(prediction_time, all_predictions):
if detected == 1:
if time.time() – last_bear_message_time > 5*60:
last_bear_message_time = time.time()
client.messages.create(to=“+491733830498″,
from_=“+12565703390″,
body=“I detected a bear“)
if detected == 10:
if time.time() – last_fox_message_time > 5*60:
last_fox_message_time = time.time()
client.messages.create(to=“+491733830498″,
from_=“+12565703390″,
body=“I detected a fox“)

# Display text on an image, used to show the video/webcam
def display_text(img, text, x=10, y=20):
# prepare the text
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (x, y)
fontScale = 0.5
fontColor = (0, 0, 255)
lineType = 1

# Draw the text on the image
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# Show the image with text
cv2.imshow(„Frame“, img)
cv2.waitKey(1)

# In[ ]:

last_bear_message_time = time.time() -6*60
last_fox_message_time = time.time() -6*60

# In[4]:

print(‚Model loading….‘)
config=tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction=float(args.gpu_mem) # gpu utilization
sess=tf.Session(config=config)
# sess=tf.InteractiveSession()
f = gfile.FastGFile(„./model/tf_model.pb“, ‚rb‘)
graph_def = tf.GraphDef()

# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.

tf.import_graph_def(graph_def)
softmax_tensor = sess.graph.get_tensor_by_name(‚import/fc1000/Softmax:0‘)
sess.graph.as_default()
print(‚Model loaded‘)

# In[ ]:

output_file=args.output

current_time = datetime.now().strftime(‚%Y-%m-%d_%H-%M-%S‘)
save_images_dir = ‚./animals_and_birds/’+current_time+’/‘
os.makedirs(save_images_dir, exist_ok=True)

firstFrame=None
imagenet_class = 34
current_class = imagenet_class
current_text=“ “
detections = []
prev_predictions=[]
start_time_full = time.time()

video_source=args.video
if video_source==’0′:
video_source=0
vs = WebcamVideoStream(src=video_source,max_frames=int(args.fps)).start()

while 1:

try:
frame = vs.read()
if frame is None:
print (’no frame‘)

print(‚DETECTED ANIMALS:‘, detections)
# Save to CSV
with open(output_file, ‚w‘) as csvfile:
writer = csv.writer(csvfile)
writer.writerow([‚Animal‘, ‚From‘, ‚To‘])
for detection in detections:
animal, start, end = detection
writer.writerow(detection)

# Signal to the inference process to quit
# Finally, do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
print(‚Stopping main thread‘)
break

full_image_Frame=frame.copy()
display_text(full_image_Frame, current_text)
frame=cv2.resize(frame,(224,224))
gray = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)

if firstFrame is None:
firstFrame = gray.copy()
continue

# compute the absolute difference between the current frame and first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

# dilate the thresholded image to fill in holes, then find contours on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)

_,contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cont_Array=[cv2.contourArea(c) for c in contours]
if len(cont_Array)>0:
if max(cont_Array)>100:
predictions_array=sess.run(softmax_tensor, {‚import/input_1:0′: frame.reshape(1,frame.shape[0],frame.shape[1],3)})
prediction = np.argmax(predictions_array)

if prediction != imagenet_class:
current_text = (str(prediction_to_class[prediction])+ „detected. Confidence: „+str(predictions_array.reshape(-1,)[prediction]))

if args.save_pic==’True‘:
class_name = prediction_to_class[prediction]
os.makedirs(save_images_dir + class_name, exist_ok=True)

confidence=predictions_array.reshape(-1,)[prediction]
print(str(class_name)+‘ ‚+str(confidence))
detection_time = str(datetime.fromtimestamp(time.time()).isoformat()).replace(‚:‘, „-„).replace(‚/‘, „-„)

save_name = save_images_dir + class_name + ‚/‘ + class_name + „_“ + detection_time + ‚.jpg‘
print(save_name)
cv2.imwrite(save_name, full_image_Frame)

else:
current_text = („Nothing detected. Confidence :“+str(predictions_array.reshape(-1,)[prediction]))

prev_predictions.append((time.time(), prediction))
analyze_predictions(prev_predictions)
firstFrame = gray.copy()

except KeyboardInterrupt:
print(‚DETECTED ANIMALS:‘, detections)
# Save to CSV
with open(output_file, ‚w‘) as csvfile:
writer = csv.writer(csvfile)
writer.writerow([‚Animal‘, ‚From‘, ‚To‘])
for detection in detections:
animal, start, end = detection
writer.writerow(detection)

# Signal to the inference process to quit
# Finally, do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
print(‚Stopping main thread‘)

elapsed_time = time.time() – start_time_full
vs.stop()
sess.close()

break
cv2.destroyAllWindows()
elapsed_time = time.time() – start_time_full

vs.stop()
sess.close()

Leave a Comment

Diese Website verwendet Akismet, um Spam zu reduzieren. Erfahren Sie mehr darüber, wie Ihre Kommentardaten verarbeitet werden .