diff --git a/export.py b/export.py index 531fa91..e167b20 100644 --- a/export.py +++ b/export.py @@ -669,4 +669,4 @@ def main(opt): if __name__ == '__main__': opt = parse_opt() - main(opt) \ No newline at end of file + main(opt) diff --git a/main.py b/main.py index d3c4ea8..04b808b 100644 --- a/main.py +++ b/main.py @@ -28,7 +28,7 @@ def main(): aaMovementAmp = .8 # Person Class Confidence - confidence = 0.5 + confidence = 0.4 # What key to press to quit and shutdown the autoaim aaQuitKey = "Q" @@ -113,7 +113,7 @@ def main(): npImg = np.array(camera.get_latest_frame()) # Normalizing Data - im = torch.from_numpy(npImg).to('cuda') + im = torch.from_numpy(npImg) im = torch.movedim(im, 2, 0) im = im.half() im /= 255 @@ -125,7 +125,7 @@ def main(): # Suppressing results that dont meet thresholds pred = non_max_suppression( - results, 0.25, 0.25, 0, False, max_det=1000) + results, confidence, confidence, 0, False, max_det=1000) # Converting output to usable cords targets = [] @@ -139,11 +139,11 @@ def main(): for *xyxy, conf, cls in reversed(det): targets.append((xyxy2xywh(torch.tensor(xyxy).view( - 1, 4)) / gn).view(-1).tolist()) # normalized xywh - + 1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh + targets = pd.DataFrame( - targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"]) - + targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"]) + # If there are people in the center bounding box if len(targets) > 0: # Get the last persons mid coordinate if it exists @@ -187,12 +187,10 @@ def main(): (startX, startY, endX, endY) = int( midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH) - confidence = .5 - idx = 0 # draw the bounding box and label on the frame - label = "{}: {:.2f}%".format("Human", confidence * 100) + label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100) cv2.rectangle(npImg, (startX, startY), (endX, endY), COLORS[idx], 2) y = startY - 15 if startY - 15 > 15 else startY + 15 diff --git a/main_onnx_amd.py b/main_onnx_amd.py index 2d6c4ff..aa6d9fb 100644 --- a/main_onnx_amd.py +++ b/main_onnx_amd.py @@ -27,10 +27,10 @@ def main(): aaRightShift = 0 # Autoaim mouse movement amplifier - aaMovementAmp = 0.6 + aaMovementAmp = .8 # Person Class Confidence - confidence = 0.35 + confidence = 0.4 # What key to press to quit and shutdown the autoaim aaQuitKey = "Q" @@ -105,9 +105,16 @@ def main(): last_mid_coord = None while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0: - npImg = cp.array([camera.get_latest_frame()]) / 255 - npImg = npImg.astype(cp.half) - npImg = cp.moveaxis(npImg, 3, 1) + # Getting Frame + npImg = np.array(camera.get_latest_frame()) + + # Normalizing Data + im = torch.from_numpy(npImg) + im = torch.movedim(im, 2, 0) + im = im.half() + im /= 255 + if len(im.shape) == 3: + im = im[None] outputs = ort_sess.run(None, {'images': npImg}) @@ -127,10 +134,10 @@ def main(): for *xyxy, conf, cls in reversed(det): targets.append((xyxy2xywh(torch.tensor(xyxy).view( - 1, 4)) / gn).view(-1).tolist()) # normalized xywh + 1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh targets = pd.DataFrame( - targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"]) + targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"]) # If there are people in the center bounding box if len(targets) > 0: @@ -177,7 +184,7 @@ def main(): idx = 0 # draw the bounding box and label on the frame - label = "{}: {:.2f}%".format("Human", confidence * 100) + label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100) cv2.rectangle(npImg, (startX, startY), (endX, endY), COLORS[idx], 2) y = startY - 15 if startY - 15 > 15 else startY + 15 diff --git a/main_onnx_cpu.py b/main_onnx_cpu.py index 3420a34..20b5259 100644 --- a/main_onnx_cpu.py +++ b/main_onnx_cpu.py @@ -26,10 +26,10 @@ def main(): aaRightShift = 0 # Autoaim mouse movement amplifier - aaMovementAmp = 0.6 + aaMovementAmp = .8 # Person Class Confidence - confidence = 0.35 + confidence = 0.4 # What key to press to quit and shutdown the autoaim aaQuitKey = "Q" @@ -106,9 +106,16 @@ def main(): last_mid_coord = None while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0: - npImg = np.array([camera.get_latest_frame()]) / 255 - npImg = npImg.astype(np.half) - npImg = np.moveaxis(npImg, 3, 1) + # Getting Frame + npImg = np.array(camera.get_latest_frame()) + + # Normalizing Data + im = torch.from_numpy(npImg) + im = torch.movedim(im, 2, 0) + im = im.half() + im /= 255 + if len(im.shape) == 3: + im = im[None] outputs = ort_sess.run(None, {'images': npImg}) @@ -128,10 +135,10 @@ def main(): for *xyxy, conf, cls in reversed(det): targets.append((xyxy2xywh(torch.tensor(xyxy).view( - 1, 4)) / gn).view(-1).tolist()) # normalized xywh + 1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh targets = pd.DataFrame( - targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"]) + targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"]) # If there are people in the center bounding box if len(targets) > 0: @@ -178,7 +185,7 @@ def main(): idx = 0 # draw the bounding box and label on the frame - label = "{}: {:.2f}%".format("Human", confidence * 100) + label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100) cv2.rectangle(npImg, (startX, startY), (endX, endY), COLORS[idx], 2) y = startY - 15 if startY - 15 > 15 else startY + 15 diff --git a/main_onnx_nvidia copy.py b/main_onnx_nvidia.py similarity index 91% rename from main_onnx_nvidia copy.py rename to main_onnx_nvidia.py index 13aca23..8335ff3 100644 --- a/main_onnx_nvidia copy.py +++ b/main_onnx_nvidia.py @@ -27,10 +27,10 @@ def main(): aaRightShift = 0 # Autoaim mouse movement amplifier - aaMovementAmp = 0.6 + aaMovementAmp = .8 # Person Class Confidence - confidence = 0.35 + confidence = 0.4 # What key to press to quit and shutdown the autoaim aaQuitKey = "Q" @@ -42,7 +42,7 @@ def main(): cpsDisplay = True # Set to True if you want to get the visuals - visuals = False + visuals = True # Selecting the correct game window try: @@ -105,11 +105,18 @@ def main(): last_mid_coord = None while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0: - npImg = cp.array([camera.get_latest_frame()]) / 255 - npImg = npImg.astype(cp.half) - npImg = cp.moveaxis(npImg, 3, 1) + # Getting Frame + npImg = np.array(camera.get_latest_frame()) - outputs = ort_sess.run(None, {'images': cp.asnumpy(npImg)}) + # Normalizing Data + im = torch.from_numpy(npImg).to('cuda') + im = torch.movedim(im, 2, 0) + im = im.half() + im /= 255 + if len(im.shape) == 3: + im = im[None] + + outputs = ort_sess.run(None, {'images': cp.asnumpy(im)}) im = torch.from_numpy(outputs[0]).to('cpu') @@ -119,7 +126,7 @@ def main(): targets = [] for i, det in enumerate(pred): s = "" - gn = torch.tensor(npImg.shape)[[0, 0, 0, 0]] + gn = torch.tensor(im.shape)[[0, 0, 0, 0]] if len(det): for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class @@ -127,10 +134,10 @@ def main(): for *xyxy, conf, cls in reversed(det): targets.append((xyxy2xywh(torch.tensor(xyxy).view( - 1, 4)) / gn).view(-1).tolist()) # normalized xywh + 1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh targets = pd.DataFrame( - targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"]) + targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"]) # If there are people in the center bounding box if len(targets) > 0: @@ -177,7 +184,7 @@ def main(): idx = 0 # draw the bounding box and label on the frame - label = "{}: {:.2f}%".format("Human", confidence * 100) + label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100) cv2.rectangle(npImg, (startX, startY), (endX, endY), COLORS[idx], 2) y = startY - 15 if startY - 15 > 15 else startY + 15 diff --git a/main_tensorrt_gpu.py b/main_tensorrt_gpu.py index d6e8b71..05c29c3 100644 --- a/main_tensorrt_gpu.py +++ b/main_tensorrt_gpu.py @@ -94,7 +94,7 @@ def main(): sTime = time.time() # Loading Yolo5 Small AI Model - model = DetectMultiBackend('yolov5s.engine', device=torch.device( + model = DetectMultiBackend('yolov5s320Half.engine', device=torch.device( 'cuda'), dnn=False, data='', fp16=True) stride, names, pt = model.stride, model.names, model.pt @@ -106,17 +106,18 @@ def main(): with torch.no_grad(): while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0: - npImg = cp.array([camera.get_latest_frame()]) / 255 - npImg = npImg.astype(cp.half) + npImg = cp.array([camera.get_latest_frame()]) + im = npImg / 255 + im = im.astype(cp.half) - im = cp.moveaxis(npImg, 3, 1) + im = cp.moveaxis(im, 3, 1) im = torch.from_numpy(cp.asnumpy(im)).to('cuda') - # Converting to numpy for visuals - im0 = im[0].permute(1, 2, 0) * 255 - im0 = im0.cpu().numpy().astype(np.uint8) - # Image has to be in BGR for visualization - im0 = cv2.cvtColor(im0, cv2.COLOR_RGB2BGR) + # # Converting to numpy for visuals + # im0 = im[0].permute(1, 2, 0) * 255 + # im0 = im0.cpu().numpy().astype(np.uint8) + # # Image has to be in BGR for visualization + # im0 = cv2.cvtColor(im0, cv2.COLOR_RGB2BGR) # Detecting all the objects results = model(im) @@ -135,10 +136,10 @@ def main(): for *xyxy, conf, cls in reversed(det): targets.append((xyxy2xywh(torch.tensor(xyxy).view( - 1, 4)) / gn).view(-1).tolist()) # normalized xywh + 1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh targets = pd.DataFrame( - targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"]) + targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"]) # If there are people in the center bounding box if len(targets) > 0: @@ -174,6 +175,7 @@ def main(): # See what the bot sees if visuals: + npImg = cp.asnumpy(npImg[0]) # Loops over every item identified and draws a bounding box for i in range(0, len(targets)): halfW = round(targets["width"][i] / 2) @@ -185,11 +187,11 @@ def main(): idx = 0 # draw the bounding box and label on the frame - label = "{}: {:.2f}%".format("Human", confidence * 100) - cv2.rectangle(im0, (startX, startY), (endX, endY), + label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100) + cv2.rectangle(npImg, (startX, startY), (endX, endY), COLORS[idx], 2) y = startY - 15 if startY - 15 > 15 else startY + 15 - cv2.putText(im0, label, (startX, y), + cv2.putText(npImg, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) # Forced garbage cleanup every second @@ -205,7 +207,7 @@ def main(): # See visually what the Aimbot sees if visuals: - cv2.imshow('Live Feed', im0) + cv2.imshow('Live Feed', npImg) if (cv2.waitKey(1) & 0xFF) == ord('q'): exit() camera.stop() diff --git a/main_torch_gpu.py b/main_torch_gpu.py index f654aac..a76ba94 100644 --- a/main_torch_gpu.py +++ b/main_torch_gpu.py @@ -25,10 +25,10 @@ def main(): aaRightShift = 0 # Autoaim mouse movement amplifier - aaMovementAmp = 1.0 + aaMovementAmp = .8 # Person Class Confidence - confidence = 0.25 + confidence = 0.4 # What key to press to quit and shutdown the autoaim aaQuitKey = "Q" @@ -136,10 +136,10 @@ def main(): for *xyxy, conf, cls in reversed(det): targets.append((xyxy2xywh(torch.tensor(xyxy).view( - 1, 4)) / gn).view(-1).tolist()) # normalized xywh + 1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh targets = pd.DataFrame( - targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"]) + targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"]) # If there are people in the center bounding box if len(targets) > 0: @@ -186,7 +186,7 @@ def main(): idx = 0 # draw the bounding box and label on the frame - label = "{}: {:.2f}%".format("Human", confidence * 100) + label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100) cv2.rectangle(npImg, (startX, startY), (endX, endY), COLORS[idx], 2) y = startY - 15 if startY - 15 > 15 else startY + 15 diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 7687a2b..28d5b79 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -531,13 +531,14 @@ class LoadImagesAndLabels(Dataset): # Update labels include_class = [] # filter labels to include only these classes (optional) + self.segments = list(self.segments) include_class_array = np.array(include_class).reshape(1, -1) for i, (label, segment) in enumerate(zip(self.labels, self.segments)): if include_class: j = (label[:, 0:1] == include_class_array).any(1) self.labels[i] = label[j] if segment: - self.segments[i] = segment[j] + self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index b5d2af9..811ad4a 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -2,9 +2,8 @@ # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -# FROM docker.io/pytorch/pytorch:latest -FROM pytorch/pytorch:latest +# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch +FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/downloads.py b/utils/downloads.py index 643b529..88f5237 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -118,8 +118,8 @@ def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): except Exception: tag = release - file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) if name in assets: + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) safe_download(file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', min_bytes=1E5, diff --git a/utils/general.py b/utils/general.py index 7462046..adb9242 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1119,13 +1119,13 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): imshow_ = cv2.imshow # copy to avoid recursion errors -def imread(path, flags=cv2.IMREAD_COLOR): - return cv2.imdecode(np.fromfile(path, np.uint8), flags) +def imread(filename, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(filename, np.uint8), flags) -def imwrite(path, im): +def imwrite(filename, img): try: - cv2.imencode(Path(path).suffix, im)[1].tofile(path) + cv2.imencode(Path(filename).suffix, img)[1].tofile(filename) return True except Exception: return False diff --git a/utils/segment/general.py b/utils/segment/general.py index 9da8945..f1b2f1d 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -10,7 +10,7 @@ def crop_mask(masks, boxes): Vectorized by Chong (thanks Chong). Args: - - masks should be a size [h, w, n] tensor of masks + - masks should be a size [n, h, w] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ diff --git a/utils/segment/loss.py b/utils/segment/loss.py index 2a8a4c6..caeff3c 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -16,7 +16,6 @@ class ComputeLoss: self.overlap = overlap device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters - self.device = device # Define criteria BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 3ba0976..1b22ec8 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: idx = targets[:, 0] == i ti = targets[idx] # image targets