Squashed commit of the following:

commit 10957c41487023dc0b996a100b9ca11024ac7ea6
Merge: 3e54f7b edce98a
Author: Elijah Harmon <elijahharmon@gmail.com>
Date:   Thu Apr 14 16:45:07 2022 -0400

    Merge pull request #7 from wrp5031/feature/wade

    Faster analysis and softening of the aiming jitters

commit edce98a36982f83461c4569f4033401ea9a2c546
Merge: 05c9b17 3e54f7b
Author: Elijah Harmon <elijahharmon@gmail.com>
Date:   Thu Apr 14 16:44:21 2022 -0400

    Merge branch 'main' into feature/wade

commit 05c9b17ca50cbd1bdfe5a8d5a283d1b2c32ae4d3
Author: wade <wpines@clarityinnovates.com>
Date:   Thu Apr 14 16:39:26 2022 -0400

    Screen capture area is based around the center of the screen. Added headshot mode. If multiple people, there is logic to take the person that had a coordinate closest to the last recorded coordinate.

commit 3e54f7ba975b4691fed2f9f61b163fed0f7d54df
Author: Elijah Harmon <elijahharmon@gmail.com>
Date:   Sun Apr 10 00:02:39 2022 -0400

    Create requirements.txt

commit f1fa560e56ac6ed92e645b0b4c78dac08861459f
Author: Elijah Harmon <elijahharmon@gmail.com>
Date:   Sat Apr 2 19:54:37 2022 -0400

    Changed readme title

commit a84ac9a238d47518bb45d64e27354f3fe65073ec
Author: TazMatic <31835653+TazMatic@users.noreply.github.com>
Date:   Thu Mar 31 16:52:02 2022 -0400

    Fix win32api and yaml package names

commit 8e32d8bd309c9e6de926213499d766bdf3d10fc8
Author: Elijah Harmon <elijahharmon@gmail.com>
Date:   Tue Mar 15 16:54:36 2022 -0400

    Update about pressing Q

commit 3dc6835a9b33d7d27e9878b72550416b86fa2406
Author: Elijah Harmon <elijahharmon@gmail.com>
Date:   Tue Mar 15 16:48:20 2022 -0400

    Update Readme

commit ae24cc3f496e2a9c2810f2876c262c3bef46df09
Merge: 21d431d 42954e0
Author: Elijah Harmon <elijahharmon@gmail.com>
Date:   Tue Mar 15 16:42:44 2022 -0400

    Merge pull request #3 from RootKit-Org/dev

    Now using YOLO
This commit is contained in:
Qfc9 2022-04-16 13:29:40 -04:00
parent 42954e02be
commit bc2247dd72
3 changed files with 60 additions and 27 deletions

View File

@ -34,20 +34,9 @@ ANYTHING dealing with Machine Learning can be funky with your computer. So if yo
4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command. 4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command.
6. Copy and past the commands below into your terminal. This will install the Open Source packages needed to run the program. 6. Copy and past the command below into your terminal. This will install the Open Source packages needed to run the program.
``` ```
pip install PyAutoGUI pip install -r requirements.txt
pip install PyDirectInput
pip install Pillow
pip install opencv-python
pip install mss
pip install numpy
pip install pandas
pip install win32api
pip install yaml
pip install tqdm
pip install matplotlib
pip install seaborn
``` ```
### Run ### Run

56
main.py
View File

@ -1,3 +1,4 @@
from unittest import result
import torch import torch
import pyautogui import pyautogui
@ -14,16 +15,20 @@ def main():
# Window title to go after and the height of the screenshots # Window title to go after and the height of the screenshots
videoGameWindowTitle = "Counter" videoGameWindowTitle = "Counter"
screenShotHeight = 500 # Portion of screen to be captured (This forms a square/rectangle around the center of screen)
screenShotHeight = 320
screenShotWidth = 320
# How big the Autoaim box should be around the center of the screen # How big the Autoaim box should be around the center of the screen
aaDetectionBox = 300 aaDetectionBox = 320
# Autoaim speed # Autoaim speed
aaMovementAmp = 2 aaMovementAmp = 1.1
# 0 will point center mass, 40 will point around the head in CSGO # Person Class Confidence
aaAimExtraVertical = 40 confidence = 0.5
headshot_mode = True
# Set to True if you want to get the visuals # Set to True if you want to get the visuals
visuals = False visuals = False
@ -41,7 +46,10 @@ def main():
videoGameWindow.activate() videoGameWindow.activate()
# Setting up the screen shots # Setting up the screen shots
sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight} sctArea = {"mon": 1, "top": videoGameWindow.top + (videoGameWindow.height - screenShotHeight) // 2,
"left": ((videoGameWindow.left + videoGameWindow.right) // 2) - (screenShotWidth // 2),
"width": screenShotWidth,
"height": screenShotHeight}
#! Uncomment if you want to view the entire screen #! Uncomment if you want to view the entire screen
# sctArea = {"mon": 1, "top": 0, "left": 0, "width": 1920, "height": 1080} # sctArea = {"mon": 1, "top": 0, "left": 0, "width": 1920, "height": 1080}
@ -58,21 +66,24 @@ def main():
sTime = time.time() sTime = time.time()
# Loading Yolo5 Small AI Model # Loading Yolo5 Small AI Model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, force_reload=True)
model.classes = [0]
# Used for colors drawn on bounding boxes # Used for colors drawn on bounding boxes
COLORS = np.random.uniform(0, 255, size=(1500, 3)) COLORS = np.random.uniform(0, 255, size=(1500, 3))
# Main loop Quit if Q is pressed # Main loop Quit if Q is pressed
last_mid_coord = None
aimbot=False
while win32api.GetAsyncKeyState(ord('Q')) == 0: while win32api.GetAsyncKeyState(ord('Q')) == 0:
# Getting screenshop, making into np.array and dropping alpha dimention. # Getting screenshop, making into np.array and dropping alpha dimention.
npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2) npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2)
# Detecting all the objects # Detecting all the objects
results = model(npImg).pandas().xyxy[0] results = model(npImg, size=320).pandas().xyxy[0]
# Filtering out everything that isn't a person # Filtering out everything that isn't a person
filteredResults = results[results['class']==0] filteredResults = results[(results['class']==0) & (results['confidence']>confidence)]
# Returns an array of trues/falses depending if it is in the center Autoaim box or not # Returns an array of trues/falses depending if it is in the center Autoaim box or not
cResults = ((filteredResults["xmin"] > cWidth - aaDetectionBox) & (filteredResults["xmax"] < cWidth + aaDetectionBox)) & \ cResults = ((filteredResults["xmin"] > cWidth - aaDetectionBox) & (filteredResults["xmax"] < cWidth + aaDetectionBox)) & \
@ -83,14 +94,35 @@ def main():
# If there are people in the center bounding box # If there are people in the center bounding box
if len(targets) > 0: if len(targets) > 0:
# All logic is just done on the random person that shows up first in the list targets['current_mid_x'] = (targets['xmax'] + targets['xmin']) // 2
targets['current_mid_y'] = (targets['ymax'] + targets['ymin']) // 2
# Get the last persons mid coordinate if it exists
if last_mid_coord:
targets['last_mid_x'] = last_mid_coord[0]
targets['last_mid_y'] = last_mid_coord[1]
# Take distance between current person mid coordinate and last person mid coordinate
targets['dist'] = np.linalg.norm(targets.iloc[:, [7,8]].values - targets.iloc[:, [9,10]], axis=1)
targets.sort_values(by="dist", ascending=False)
# Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance)
xMid = round((targets.iloc[0].xmax + targets.iloc[0].xmin) / 2) xMid = round((targets.iloc[0].xmax + targets.iloc[0].xmin) / 2)
yMid = round((targets.iloc[0].ymax + targets.iloc[0].ymin) / 2) yMid = round((targets.iloc[0].ymax + targets.iloc[0].ymin) / 2)
mouseMove = [xMid - cWidth, yMid - (cHeight + aaAimExtraVertical)] box_height = targets.iloc[0].ymax - targets.iloc[0].ymin
if headshot_mode:
headshot_offset = box_height * 0.38
else:
headshot_offset = box_height * 0.2
mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight]
cv2.circle(npImg, (int(mouseMove[0] + xMid), int(mouseMove[1] + yMid - headshot_offset)), 3, (0, 0, 255))
# Moving the mouse # Moving the mouse
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), 0, 0) if win32api.GetKeyState(0x14):
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, int(mouseMove[0] * aaMovementAmp), int(mouseMove[1] * aaMovementAmp), 0, 0)
last_mid_coord = [xMid, yMid]
else:
last_mid_coord = None
# See what the bot sees # See what the bot sees
if visuals: if visuals:

12
requirements.txt Normal file
View File

@ -0,0 +1,12 @@
PyAutoGUI
PyDirectInput
Pillow
opencv-python
mss
numpy
pandas
pywin32
pyyaml
tqdm
matplotlib
seaborn