diff --git a/README.md b/README.md index a4dc089..3328bc7 100644 --- a/README.md +++ b/README.md @@ -34,20 +34,9 @@ ANYTHING dealing with Machine Learning can be funky with your computer. So if yo 4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command. -6. Copy and past the commands below into your terminal. This will install the Open Source packages needed to run the program. +6. Copy and past the command below into your terminal. This will install the Open Source packages needed to run the program. ``` -pip install PyAutoGUI -pip install PyDirectInput -pip install Pillow -pip install opencv-python -pip install mss -pip install numpy -pip install pandas -pip install win32api -pip install yaml -pip install tqdm -pip install matplotlib -pip install seaborn +pip install -r requirements.txt ``` ### Run diff --git a/main.py b/main.py index ce845fe..c489e8f 100644 --- a/main.py +++ b/main.py @@ -1,3 +1,4 @@ +from unittest import result import torch import pyautogui @@ -14,16 +15,20 @@ def main(): # Window title to go after and the height of the screenshots videoGameWindowTitle = "Counter" - screenShotHeight = 500 + # Portion of screen to be captured (This forms a square/rectangle around the center of screen) + screenShotHeight = 320 + screenShotWidth = 320 # How big the Autoaim box should be around the center of the screen - aaDetectionBox = 300 + aaDetectionBox = 320 # Autoaim speed - aaMovementAmp = 2 + aaMovementAmp = 1.1 - # 0 will point center mass, 40 will point around the head in CSGO - aaAimExtraVertical = 40 + # Person Class Confidence + confidence = 0.5 + + headshot_mode = True # Set to True if you want to get the visuals visuals = False @@ -41,7 +46,10 @@ def main(): videoGameWindow.activate() # Setting up the screen shots - sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight} + sctArea = {"mon": 1, "top": videoGameWindow.top + (videoGameWindow.height - screenShotHeight) // 2, + "left": ((videoGameWindow.left + videoGameWindow.right) // 2) - (screenShotWidth // 2), + "width": screenShotWidth, + "height": screenShotHeight} #! Uncomment if you want to view the entire screen # sctArea = {"mon": 1, "top": 0, "left": 0, "width": 1920, "height": 1080} @@ -58,21 +66,24 @@ def main(): sTime = time.time() # Loading Yolo5 Small AI Model - model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) - + model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, force_reload=True) + model.classes = [0] + # Used for colors drawn on bounding boxes COLORS = np.random.uniform(0, 255, size=(1500, 3)) # Main loop Quit if Q is pressed + last_mid_coord = None + aimbot=False while win32api.GetAsyncKeyState(ord('Q')) == 0: # Getting screenshop, making into np.array and dropping alpha dimention. npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2) # Detecting all the objects - results = model(npImg).pandas().xyxy[0] + results = model(npImg, size=320).pandas().xyxy[0] # Filtering out everything that isn't a person - filteredResults = results[results['class']==0] + filteredResults = results[(results['class']==0) & (results['confidence']>confidence)] # Returns an array of trues/falses depending if it is in the center Autoaim box or not cResults = ((filteredResults["xmin"] > cWidth - aaDetectionBox) & (filteredResults["xmax"] < cWidth + aaDetectionBox)) & \ @@ -83,15 +94,36 @@ def main(): # If there are people in the center bounding box if len(targets) > 0: - # All logic is just done on the random person that shows up first in the list + targets['current_mid_x'] = (targets['xmax'] + targets['xmin']) // 2 + targets['current_mid_y'] = (targets['ymax'] + targets['ymin']) // 2 + # Get the last persons mid coordinate if it exists + if last_mid_coord: + targets['last_mid_x'] = last_mid_coord[0] + targets['last_mid_y'] = last_mid_coord[1] + # Take distance between current person mid coordinate and last person mid coordinate + targets['dist'] = np.linalg.norm(targets.iloc[:, [7,8]].values - targets.iloc[:, [9,10]], axis=1) + targets.sort_values(by="dist", ascending=False) + + # Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance) xMid = round((targets.iloc[0].xmax + targets.iloc[0].xmin) / 2) yMid = round((targets.iloc[0].ymax + targets.iloc[0].ymin) / 2) - mouseMove = [xMid - cWidth, yMid - (cHeight + aaAimExtraVertical)] + box_height = targets.iloc[0].ymax - targets.iloc[0].ymin + if headshot_mode: + headshot_offset = box_height * 0.38 + else: + headshot_offset = box_height * 0.2 + mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight] + cv2.circle(npImg, (int(mouseMove[0] + xMid), int(mouseMove[1] + yMid - headshot_offset)), 3, (0, 0, 255)) # Moving the mouse - win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), 0, 0) - + if win32api.GetKeyState(0x14): + win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, int(mouseMove[0] * aaMovementAmp), int(mouseMove[1] * aaMovementAmp), 0, 0) + last_mid_coord = [xMid, yMid] + + else: + last_mid_coord = None + # See what the bot sees if visuals: # Loops over every item identified and draws a bounding box diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..a813336 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +PyAutoGUI +PyDirectInput +Pillow +opencv-python +mss +numpy +pandas +pywin32 +pyyaml +tqdm +matplotlib +seaborn