import onnxruntime as ort import numpy as np import gc import numpy as np import cv2 import time import win32api import win32con import pandas as pd from utils.general import (cv2, non_max_suppression, xyxy2xywh) from mouse_driver.MouseMove import mouse_move as ghub_move import torch # Could be do with # from config import * # But we are writing it out for clarity for new devs from config import aaMovementAmp, useMask, maskHeight, maskWidth, aaQuitKey, confidence, headshot_mode, cpsDisplay, visuals, onnxChoice, centerOfScreen import gameSelection def main(): # External Function for running the game selection menu (gameSelection.py) camera, cWidth, cHeight = gameSelection.gameSelection() # Used for forcing garbage collection count = 0 sTime = time.time() # Choosing the correct ONNX Provider based on config.py onnxProvider = "" if onnxChoice == 1: onnxProvider = "CPUExecutionProvider" elif onnxChoice == 2: onnxProvider = "DmlExecutionProvider" elif onnxChoice == 3: import cupy as cp onnxProvider = "CUDAExecutionProvider" so = ort.SessionOptions() so.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL ort_sess = ort.InferenceSession('RRRR.onnx', sess_options=so, providers=[ onnxProvider]) # Used for colors drawn on bounding boxes COLORS = np.random.uniform(0, 255, size=(1500, 3)) # Main loop Quit if Q is pressed last_mid_coord = None while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0: # Getting Frame npImg = np.array(camera.get_latest_frame()) from config import maskSide # "temporary" workaround for bad syntax if useMask: maskSide = maskSide.lower() if maskSide == "right": npImg[-maskHeight:, -maskWidth:, :] = 0 elif maskSide == "left": npImg[-maskHeight:, :maskWidth, :] = 0 else: raise Exception('ERROR: Invalid maskSide! Please use "left" or "right"') # If Nvidia, do this if onnxChoice == 3: # Normalizing Data im = torch.from_numpy(npImg).to('cuda') if im.shape[2] == 4: # If the image has an alpha channel, remove it im = im[:, :, :3,] im = torch.movedim(im, 2, 0) im = im.half() im /= 255 if len(im.shape) == 3: im = im[None] # If AMD or CPU, do this else: # Normalizing Data im = np.array([npImg]) if im.shape[3] == 4: # If the image has an alpha channel, remove it im = im[:, :, :, :3] im = im / 255 im = im.astype(np.half) im = np.moveaxis(im, 3, 1) # If Nvidia, do this if onnxChoice == 3: outputs = ort_sess.run(None, {'images': cp.asnumpy(im)}) # If AMD or CPU, do this else: outputs = ort_sess.run(None, {'images': np.array(im)}) im = torch.from_numpy(outputs[0]).to('cpu') pred = non_max_suppression( im, confidence, confidence, 0, False, max_det=10) targets = [] for i, det in enumerate(pred): s = "" gn = torch.tensor(im.shape)[[0, 0, 0, 0]] if len(det): for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {int(c)}, " # add to string for *xyxy, conf, cls in reversed(det): targets.append((xyxy2xywh(torch.tensor(xyxy).view( 1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh targets = pd.DataFrame( targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"]) center_screen = [cWidth, cHeight] # If there are people in the center bounding box if len(targets) > 0: if (centerOfScreen): # Compute the distance from the center targets["dist_from_center"] = np.sqrt((targets.current_mid_x - center_screen[0])**2 + (targets.current_mid_y - center_screen[1])**2) # Sort the data frame by distance from center targets = targets.sort_values("dist_from_center") # Get the last persons mid coordinate if it exists if last_mid_coord: targets['last_mid_x'] = last_mid_coord[0] targets['last_mid_y'] = last_mid_coord[1] # Take distance between current person mid coordinate and last person mid coordinate targets['dist'] = np.linalg.norm( targets.iloc[:, [0, 1]].values - targets.iloc[:, [4, 5]], axis=1) targets.sort_values(by="dist", ascending=False) # Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance) xMid = targets.iloc[0].current_mid_x yMid = targets.iloc[0].current_mid_y box_height = targets.iloc[0].height if headshot_mode: headshot_offset = box_height * 0.38 else: headshot_offset = box_height * 0.2 mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight] # Moving the mouse #imagine recalculating everything to find out you have a drop in replacement if win32api.GetKeyState(0x02) < 0: ghub_move(mouseMove[0],mouseMove[1]) last_mid_coord = [xMid, yMid] else: last_mid_coord = None # See what the bot sees if visuals: # Loops over every item identified and draws a bounding box for i in range(0, len(targets)): halfW = round(targets["width"][i] / 2) halfH = round(targets["height"][i] / 2) midX = targets['current_mid_x'][i] midY = targets['current_mid_y'][i] (startX, startY, endX, endY) = int(midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH) idx = 0 # draw the bounding box and label on the frame label = "{}: {:.2f}%".format( "Character", targets["confidence"][i] * 100) cv2.rectangle(npImg, (startX, startY), (endX, endY), COLORS[idx], 2) y = startY - 15 if startY - 15 > 15 else startY + 15 cv2.putText(npImg, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) # Forced garbage cleanup every second count += 1 if (time.time() - sTime) > 1: if cpsDisplay: print("CPS: {}".format(count)) count = 0 sTime = time.time() # Uncomment if you keep running into memory issues # gc.collect(generation=0) # See visually what the Aimbot sees if visuals: cv2.imshow('Live Feed', npImg) if (cv2.waitKey(1) & 0xFF) == ord('q'): exit() camera.stop() if __name__ == "__main__": try: main() except Exception as e: import traceback traceback.print_exception(e) print("ERROR: " + str(e)) print("Ask @Wonder for help in our Discord in the #ai-aimbot channel ONLY: https://discord.gg/rootkitorg")