實時姿態辨識,完整程式碼如下
In [1]:
!pip install openvino
!pip install jupyter_ui_poll
import collections
import os
import sys
import time
# import cv2
import datetime
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
from pytorchyolo import detect, models
# print("setup KMP_DUPLICATE_LIB_OK=TRUE")
yolo_path = "./old"
cfg_path = "{}/my-yolov3-tiny.cfg".format(yolo_path)
model_path = "{}/my-yolov3-tiny_20000.weights".format(yolo_path)
import cv2
import numpy as np
import ipywidgets.widgets as widgets
from jupyter_ui_poll import ui_events
from openvino.runtime import Core
prefix_path = "../utils"
sys.path.append(prefix_path)
print(sys.path)
import notebook_utils as utils
# self define function
from object_detect import DetectTracker
from PoseDetect import PostAdapter
from utils import bgr8_to_jpeg, omz_model, _is_boxes_pose
model = models.load_model(cfg_path, model_path)
Requirement already satisfied: openvino in c:\users\user\anaconda3\envs\myenv\lib\site-packages (2023.1.0) Requirement already satisfied: openvino-telemetry>=2023.1.0 in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from openvino) (2024.1.0) Requirement already satisfied: singledispatchmethod in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from openvino) (1.0) Requirement already satisfied: numpy>=1.16.6 in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from openvino) (1.21.6) Collecting jupyter_ui_poll Using cached jupyter_ui_poll-0.2.2-py2.py3-none-any.whl (9.0 kB) Requirement already satisfied: ipython in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from jupyter_ui_poll) (7.34.0) Requirement already satisfied: traitlets>=4.2 in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (5.9.0) Requirement already satisfied: backcall in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (0.2.0) Requirement already satisfied: colorama in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (0.4.6) Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (3.0.39) Requirement already satisfied: pickleshare in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (0.7.5) Requirement already satisfied: decorator in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (5.1.1) Requirement already satisfied: jedi>=0.16 in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (0.19.0) Requirement already satisfied: pygments in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (2.16.1) Requirement already satisfied: setuptools>=18.5 in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (65.6.3) Requirement already satisfied: matplotlib-inline in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from ipython->jupyter_ui_poll) (0.1.6) Requirement already satisfied: parso<0.9.0,>=0.8.3 in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from jedi>=0.16->ipython->jupyter_ui_poll) (0.8.3) Requirement already satisfied: wcwidth in c:\users\user\anaconda3\envs\myenv\lib\site-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->jupyter_ui_poll) (0.2.7) Installing collected packages: jupyter_ui_poll Successfully installed jupyter_ui_poll-0.2.2 ['C:\\Users\\USER\\openvino_project_last\\All_in_one', 'C:\\Users\\USER\\anaconda3\\envs\\myenv\\python37.zip', 'C:\\Users\\USER\\anaconda3\\envs\\myenv\\DLLs', 'C:\\Users\\USER\\anaconda3\\envs\\myenv\\lib', 'C:\\Users\\USER\\anaconda3\\envs\\myenv', '', 'C:\\Users\\USER\\anaconda3\\envs\\myenv\\lib\\site-packages', 'C:\\Users\\USER\\anaconda3\\envs\\myenv\\lib\\site-packages\\win32', 'C:\\Users\\USER\\anaconda3\\envs\\myenv\\lib\\site-packages\\win32\\lib', 'C:\\Users\\USER\\anaconda3\\envs\\myenv\\lib\\site-packages\\Pythonwin', 'C:\\Users\\USER\\anaconda3\\envs\\myenv\\lib\\site-packages\\IPython\\extensions', 'C:\\Users\\USER\\.ipython', '../utils']
../utils\notebook_utils.py:23: FutureWarning: OpenVINO Inference Engine Python API is deprecated and will be removed in 2024.0 release.For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html import openvino.inference_engine
In [2]:
# A directory where the model will be downloaded.
base_model_dir = "model"
# The name of the model from Open Model Zoo
model_name = "ssdlite_mobilenet_v2"
# Select precision Version
precision = "FP16"#下載並轉換成IR模式的模型
# The output path for the conversion.
converted_model_path = omz_model(model_name, base_model_dir, precision, prefix_path)
print("output path:{}".format(converted_model_path))
download command:omz_downloader --name ssdlite_mobilenet_v2 --precisions FP16 --output_dir model --cache_dir model output path:model/public/ssdlite_mobilenet_v2/FP16/ssdlite_mobilenet_v2.xml
In [3]:
# A directory where the model will be downloaded.
base_model_dir = "model"
# The name of the model from Open Model Zoo.left_arm, right_arm
model_name = "human-pose-estimation-0001"
# Selected precision (FP32, FP16, FP16-INT8).
precision = "FP16-INT8"
model_path = omz_model(model_name, base_model_dir, precision, prefix_path)
print("output path:{}".format(model_path))
download command:omz_downloader --name human-pose-estimation-0001 --precisions FP16-INT8 --output_dir model --cache_dir model output path:model/intel/human-pose-estimation-0001/FP16-INT8/human-pose-estimation-0001.xml
In [4]:
# Initialize OpenVINO Runtime.
ie_core = Core()
# Read the network and corresponding weights from a file.
objectDetect_model = ie_core.read_model(model=converted_model_path)
openPose_model = ie_core.read_model(model=model_path)
# Compile the model for CPU (you can choose manually CPU, GPU, MYRIAD etc.)
# or let the engine choose the best available device (AUTO).
compiled_model_O = ie_core.compile_model(model=objectDetect_model, device_name="CPU")
compiled_model_P = ie_core.compile_model(model=openPose_model, device_name="CPU")
DetectObj = DetectTracker(compiled_model_O)
PoseObj = PostAdapter(compiled_model_P)
print(DetectObj.get_my_labels())
['painkiller', 'vitamin c', 'heart medicine', 'bottle', 'waterbottle']
In [5]:
def labels_string(boxes, labels):
tmp = ""
for box in boxes:
tmp += "{},".format(labels[box[0]])
if len(tmp) == 0:
return "沒有物品正拿在手上"
return tmp
def process_results(predictions):
results = []
for prediction in predictions:
results.append((int(prediction[5]), prediction[4], (int(prediction[0]), int(prediction[1]), int(prediction[2]), int(prediction[3]))))
return results
In [6]:
def calculate_dist(a, b):
x=int(b[0])-int(a[0])
y=int(b[1])-int(a[1])
return np.sqrt(x*x+y*y)
In [7]:
w_image = widgets.Image(format='jpeg', width=640, height=480)
left_arm = widgets.Text(value='',placeholder='是否有手機?',description='左手:',disabled=False)
right_arm = widgets.Text(value='',placeholder='是否有手機?',description='右手:',disabled=False)
left_hand = widgets.Text(value='',placeholder='吃藥?',description='左手:',disabled=False)
right_hand = widgets.Text(value='',placeholder='吃藥?',description='右手:',disabled=False)
mouth = widgets.Text(value='',placeholder='吃藥?',description='嘴巴:',disabled=False)
medicine = widgets.Text(value='',placeholder='',description='有吃藥嗎:',disabled=False)
timeC = widgets.Text(value='',placeholder='',description='時間:',disabled=False)
alarm = widgets.Text(value='',placeholder='',description='鬧鐘:',disabled=False)
dlist = widgets.Dropdown(
options = [('bottle') , ('waterbottle'), ('heart_medicine'), ('vitamin_c'), ('painkiller')],
value = 'bottle',
description='Number:',
disabled=False,
)
obj_type = 'bottle'
def headler(change):
global obj_type
print("change to:{}".format(change.new))
obj_type = change.new
print(obj_type)
dlist.observe(headler, names='value')
text_Contain = widgets.VBox([left_hand, right_hand, mouth, medicine, timeC, alarm, dlist])
Contain = widgets.HBox([w_image, text_Contain])
# Main processing function to run object detection.
def run_object_detection(source=0, flip=False, skip_first_frames=0, alarmH=20, alarmM=0):
global dlist
player = None
eat_medicine = 0
try:
# Create a video player to play with target fps.
player = utils.VideoPlayer(
source=source, flip=flip, fps=30, skip_first_frames=skip_first_frames
)
# Start capturing.
player.start()
processing_times = collections.deque()
saved_pos = {'bottle' : [], 'waterbottle' : [], 'heart_medicine' : [], 'vitamin_c':[], 'painkiller':[]}
#saved_pos =n boxes9
with ui_events() as poll:
while True:
poll(10)
# Grab the frame.
frame = player.next()
if frame is None:
print("Source ended")
break
# boxes = DetectObj.get_boxes(frame)
boxes = detect.detect_image(model, frame)
boxes = process_results(boxes)
poses, scores = PoseObj.get_results(frame)
#print(boxes[0][0])
frame = DetectObj.draw_boxes(frame, boxes)
frame = PoseObj.draw_poses(frame, poses, 0.1) #ask 0.1
if len(boxes) > 0:
for id, conf, pos in boxes:
if id == 0 : saved_pos['painkiller'] = [(id, conf, pos)]
if id == 1 : saved_pos['vitamin_c'] = [(id, conf, pos)]
if id == 2 : saved_pos['heart_medicine'] = [(id, conf, pos)]
if id == 3 : saved_pos['bottle'] =[(id, conf, pos)]
if id == 4 : saved_pos['waterbottle'] = [(id, conf, pos)]
frame = DetectObj.draw_boxes(frame=frame, boxes=saved_pos[obj_type])
# print(dropdown.value)
# Draw boxes on a frame.
#frame = DetectObj.draw_boxes(frame=frame, boxes=boxes)
#frame = PoseObj.draw_poses(frame, poses, 0.1)
for pose in poses:
left_arm.value = labels_string(_is_boxes_pose(boxes, pose[10], 200), DetectObj.get_my_labels())
right_arm.value = labels_string(_is_boxes_pose(boxes, pose[9], 200), DetectObj.get_my_labels())
break
w_image.value = bgr8_to_jpeg(frame)
for pose in poses:
left_hand.value = labels_string(_is_boxes_pose(boxes, pose[10], 250, 0.25), DetectObj.get_my_labels())
right_hand.value = labels_string(_is_boxes_pose(boxes, pose[9], 250, 0.25), DetectObj.get_my_labels())
mouth.value = labels_string(_is_boxes_pose(boxes, pose[0], 250, 0.25), DetectObj.get_my_labels())
if left_hand.value != "沒有" and calculate_dist(pose[10], pose[0]) < 250:
medicine.value = "你有吃藥耶你好棒"
eat_medicine = 1
if right_hand.value != "沒有" and calculate_dist(pose[10], pose[0]) < 250:
medicine.value = "你有吃藥耶你好棒"
eat_medicine = 1
if mouth.value != "沒有":
medicine.value = "你有吃藥耶你好棒"
eat_medicine = 1
tonow = datetime.datetime.now()
timeC.value = tonow.strftime('%Y/%m/%d %H:%M:%S')
if tonow.hour>=alarmH and tonow.minute>=alarmM and eat_medicine == 0:
alarm.value = "吃藥啦 阿北"
medicine.value = "沒有"
else:
alarm.value = ""
w_image.value = bgr8_to_jpeg(frame)
# ctrl-c
except KeyboardInterrupt:
print("Interrupted")
# any different error
except RuntimeError as e:
print(e)
finally:
if player is not None:
# Stop capturing.
player.stop()
In [10]:
display(Contain)
run_object_detection(source=0, flip=True, alarmH=8, alarmM=56)
HBox(children=(Image(value=b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00\xff\xdb\x00C…
change to:waterbottle waterbottle Interrupted
In [9]:
import time
def show_result(args):
print(combobox_t.value)
button_t = widgets.Button(
description='Click me',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check'
)
combobox_t = widgets.Combobox(
value='water_bottle',
placeholder='Choose an item',
options=['water_bottle', 'bottle', 'vitamin_c', 'painkiller', 'heart_medicine'],
description='Combobox:',
ensure_option=True,
disabled=False
)
display(combobox_t, button_t)
# print(button_t.value)
button_t.on_click(show_result)
Combobox(value='water_bottle', description='Combobox:', ensure_option=True, options=('water_bottle', 'bottle',…
Button(description='Click me', icon='check', style=ButtonStyle(), tooltip='Click me')
In [ ]:
In [ ]: