ultralytics 8.2.46 Results, DFL and AIGym fixes (#14074)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: AAOMM <52826299+Chayanonjackal@users.noreply.github.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: Muhammad Rizwan Munawar <muhammadrizwanmunawar123@gmail.com>
Co-authored-by: zzzer <48149018+zhixuwei@users.noreply.github.com>
Co-authored-by: Abirami Vina <abirami.vina@gmail.com>
This commit is contained in:
Francesco Mattioli 2024-06-29 18:10:30 +02:00 committed by GitHub
parent f5ccddf5df
commit 645c83671f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 28 additions and 26 deletions

View File

@ -55,16 +55,14 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
kpts_to_check=[6, 8, 10],
)
frame_count = 0
while cap.isOpened():
success, im0 = cap.read()
if not success:
print("Video frame is empty or video processing has been successfully completed.")
break
frame_count += 1
results = model.track(im0, verbose=False) # Tracking recommended
# results = model.predict(im0) # Prediction also supported
im0 = gym_object.start_counting(im0, results, frame_count)
im0 = gym_object.start_counting(im0, results)
cv2.destroyAllWindows()
```
@ -90,16 +88,14 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
kpts_to_check=[6, 8, 10],
)
frame_count = 0
while cap.isOpened():
success, im0 = cap.read()
if not success:
print("Video frame is empty or video processing has been successfully completed.")
break
frame_count += 1
results = model.track(im0, verbose=False) # Tracking recommended
# results = model.predict(im0) # Prediction also supported
im0 = gym_object.start_counting(im0, results, frame_count)
im0 = gym_object.start_counting(im0, results)
video_writer.write(im0)
cv2.destroyAllWindows()

View File

@ -11,6 +11,7 @@
47978446+sergiuwaxmann@users.noreply.github.com: sergiuwaxmann
48149018+zhixuwei@users.noreply.github.com: zhixuwei
49699333+dependabot[bot]@users.noreply.github.com: dependabot
52826299+Chayanonjackal@users.noreply.github.com: Chayanonjackal
61612323+Laughing-q@users.noreply.github.com: Laughing-q
62214284+Burhan-Q@users.noreply.github.com: Burhan-Q
68285002+Kayzwer@users.noreply.github.com: Kayzwer

View File

@ -11,7 +11,7 @@ epochs: 100 # (int) number of epochs to train for
time: # (float, optional) number of hours to train for, overrides epochs if supplied
patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
batch: 16 # (int) number of images per batch (-1 for AutoBatch)
imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes
imgsz: 640 # (int | list) input images size as int for train and val modes, or list[h,w] for predict and export modes
save: True # (bool) save train checkpoints and predict results
save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
cache: False # (bool) True/ram, disk or False. Use cache for data loading

View File

@ -3,7 +3,7 @@
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
# [depth, width, max_channels]
b: [0.67, 1.00, 512]
@ -21,7 +21,7 @@ backbone:
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 1, PSA, [1024]] # 10
# YOLOv8.0n head
# YOLOv10.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4

View File

@ -3,7 +3,7 @@
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
# [depth, width, max_channels]
l: [1.00, 1.00, 512]
@ -21,7 +21,7 @@ backbone:
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 1, PSA, [1024]] # 10
# YOLOv8.0n head
# YOLOv10.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4

View File

@ -3,7 +3,7 @@
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
# [depth, width, max_channels]
m: [0.67, 0.75, 768]
@ -21,7 +21,7 @@ backbone:
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 1, PSA, [1024]] # 10
# YOLOv8.0n head
# YOLOv10.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4

View File

@ -3,7 +3,7 @@
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.33, 0.25, 1024]
@ -21,7 +21,7 @@ backbone:
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 1, PSA, [1024]] # 10
# YOLOv8.0n head
# YOLOv10.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4

View File

@ -3,7 +3,7 @@
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
# [depth, width, max_channels]
s: [0.33, 0.50, 1024]
@ -21,7 +21,7 @@ backbone:
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 1, PSA, [1024]] # 10
# YOLOv8.0n head
# YOLOv10.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4

View File

@ -3,7 +3,7 @@
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
# [depth, width, max_channels]
x: [1.00, 1.25, 512]
@ -21,7 +21,7 @@ backbone:
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 1, PSA, [1024]] # 10
# YOLOv8.0n head
# YOLOv10.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4

View File

@ -53,24 +53,29 @@ class AIGym:
# Check if environment supports imshow
self.env_check = check_imshow(warn=True)
self.count = list()
self.angle = list()
self.stage = list()
def start_counting(self, im0, results, frame_count):
def start_counting(self, im0, results):
"""
Function used to count the gym steps.
Args:
im0 (ndarray): Current frame from the video stream.
results (list): Pose estimation data.
frame_count (int): Current frame count.
"""
self.im0 = im0
# Initialize count, angle, and stage lists on the first frame
if frame_count == 1:
self.count = [0] * len(results[0])
self.angle = [0] * len(results[0])
self.stage = ["-" for _ in results[0]]
if not len(results[0]):
return self.im0
if len(results[0]) > len(self.count):
new_human = len(results[0]) - len(self.count)
self.count += [0] * new_human
self.angle += [0] * new_human
self.stage += ["-"] * new_human
self.keypoints = results[0].keypoints.data
self.annotator = Annotator(im0, line_width=self.tf)