mirror of
https://github.com/RootKit-Org/AI-Aimbot.git
synced 2025-06-21 02:41:01 +08:00
Merge pull request #7 from wrp5031/feature/wade
Faster analysis and softening of the aiming jitters
This commit is contained in:
commit
10957c4148
21
README.md
21
README.md
@ -1,6 +1,6 @@
|
||||
[](http://makeapullrequest.com)
|
||||
# AI Aimbot
|
||||
**Adhere to our GNU licence!!!**<br />
|
||||
# Ultimate Aimbot
|
||||
**Adhere to our GNU licence or else we WILL come after you legally.**<br />
|
||||
- free to use, sell, profit from, litterally anything you want to do with it
|
||||
- **credit MUST be given to RootKit for the underlying base code**
|
||||
|
||||
@ -34,20 +34,9 @@ ANYTHING dealing with Machine Learning can be funky with your computer. So if yo
|
||||
|
||||
4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command.
|
||||
|
||||
6. Copy and past the commands below into your terminal. This will install the Open Source packages needed to run the program.
|
||||
6. Copy and past the command below into your terminal. This will install the Open Source packages needed to run the program.
|
||||
```
|
||||
pip install PyAutoGUI
|
||||
pip install PyDirectInput
|
||||
pip install Pillow
|
||||
pip install opencv-python
|
||||
pip install mss
|
||||
pip install numpy
|
||||
pip install pandas
|
||||
pip install pywin32
|
||||
pip install pyyaml
|
||||
pip install tqdm
|
||||
pip install matplotlib
|
||||
pip install seaborn
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Run
|
||||
@ -61,7 +50,7 @@ If you have python and the packages you are good to go. Load up any game on your
|
||||
|
||||
4. Press the enter key
|
||||
|
||||
5. Type `python main.py`, press enter and that is it! **IF YOU PRESS Q IT WILL STOP THE PROGRAM**
|
||||
5. Type `python main.py`, press enter and that is it!
|
||||
|
||||
## Community Based
|
||||
We are a community based nonprofit. We are always open to pull requests on any of our repos. You will always be given credit for all of you work. Depending on what you contribute, we will give you any revenue earned on your contributions 💰💰💰!
|
||||
|
56
main.py
56
main.py
@ -1,3 +1,4 @@
|
||||
from unittest import result
|
||||
import torch
|
||||
|
||||
import pyautogui
|
||||
@ -14,16 +15,20 @@ def main():
|
||||
# Window title to go after and the height of the screenshots
|
||||
videoGameWindowTitle = "Counter"
|
||||
|
||||
screenShotHeight = 500
|
||||
# Portion of screen to be captured (This forms a square/rectangle around the center of screen)
|
||||
screenShotHeight = 320
|
||||
screenShotWidth = 320
|
||||
|
||||
# How big the Autoaim box should be around the center of the screen
|
||||
aaDetectionBox = 300
|
||||
aaDetectionBox = 320
|
||||
|
||||
# Autoaim speed
|
||||
aaMovementAmp = 2
|
||||
aaMovementAmp = 1.1
|
||||
|
||||
# 0 will point center mass, 40 will point around the head in CSGO
|
||||
aaAimExtraVertical = 40
|
||||
# Person Class Confidence
|
||||
confidence = 0.5
|
||||
|
||||
headshot_mode = True
|
||||
|
||||
# Set to True if you want to get the visuals
|
||||
visuals = False
|
||||
@ -41,7 +46,10 @@ def main():
|
||||
videoGameWindow.activate()
|
||||
|
||||
# Setting up the screen shots
|
||||
sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight}
|
||||
sctArea = {"mon": 1, "top": videoGameWindow.top + (videoGameWindow.height - screenShotHeight) // 2,
|
||||
"left": ((videoGameWindow.left + videoGameWindow.right) // 2) - (screenShotWidth // 2),
|
||||
"width": screenShotWidth,
|
||||
"height": screenShotHeight}
|
||||
|
||||
#! Uncomment if you want to view the entire screen
|
||||
# sctArea = {"mon": 1, "top": 0, "left": 0, "width": 1920, "height": 1080}
|
||||
@ -58,21 +66,24 @@ def main():
|
||||
sTime = time.time()
|
||||
|
||||
# Loading Yolo5 Small AI Model
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, force_reload=True)
|
||||
model.classes = [0]
|
||||
|
||||
# Used for colors drawn on bounding boxes
|
||||
COLORS = np.random.uniform(0, 255, size=(1500, 3))
|
||||
|
||||
# Main loop Quit if Q is pressed
|
||||
last_mid_coord = None
|
||||
aimbot=False
|
||||
while win32api.GetAsyncKeyState(ord('Q')) == 0:
|
||||
# Getting screenshop, making into np.array and dropping alpha dimention.
|
||||
npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2)
|
||||
|
||||
# Detecting all the objects
|
||||
results = model(npImg).pandas().xyxy[0]
|
||||
results = model(npImg, size=320).pandas().xyxy[0]
|
||||
|
||||
# Filtering out everything that isn't a person
|
||||
filteredResults = results[results['class']==0]
|
||||
filteredResults = results[(results['class']==0) & (results['confidence']>confidence)]
|
||||
|
||||
# Returns an array of trues/falses depending if it is in the center Autoaim box or not
|
||||
cResults = ((filteredResults["xmin"] > cWidth - aaDetectionBox) & (filteredResults["xmax"] < cWidth + aaDetectionBox)) & \
|
||||
@ -83,14 +94,35 @@ def main():
|
||||
|
||||
# If there are people in the center bounding box
|
||||
if len(targets) > 0:
|
||||
# All logic is just done on the random person that shows up first in the list
|
||||
targets['current_mid_x'] = (targets['xmax'] + targets['xmin']) // 2
|
||||
targets['current_mid_y'] = (targets['ymax'] + targets['ymin']) // 2
|
||||
# Get the last persons mid coordinate if it exists
|
||||
if last_mid_coord:
|
||||
targets['last_mid_x'] = last_mid_coord[0]
|
||||
targets['last_mid_y'] = last_mid_coord[1]
|
||||
# Take distance between current person mid coordinate and last person mid coordinate
|
||||
targets['dist'] = np.linalg.norm(targets.iloc[:, [7,8]].values - targets.iloc[:, [9,10]], axis=1)
|
||||
targets.sort_values(by="dist", ascending=False)
|
||||
|
||||
# Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance)
|
||||
xMid = round((targets.iloc[0].xmax + targets.iloc[0].xmin) / 2)
|
||||
yMid = round((targets.iloc[0].ymax + targets.iloc[0].ymin) / 2)
|
||||
|
||||
mouseMove = [xMid - cWidth, yMid - (cHeight + aaAimExtraVertical)]
|
||||
box_height = targets.iloc[0].ymax - targets.iloc[0].ymin
|
||||
if headshot_mode:
|
||||
headshot_offset = box_height * 0.38
|
||||
else:
|
||||
headshot_offset = box_height * 0.2
|
||||
mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight]
|
||||
cv2.circle(npImg, (int(mouseMove[0] + xMid), int(mouseMove[1] + yMid - headshot_offset)), 3, (0, 0, 255))
|
||||
|
||||
# Moving the mouse
|
||||
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), 0, 0)
|
||||
if win32api.GetKeyState(0x14):
|
||||
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, int(mouseMove[0] * aaMovementAmp), int(mouseMove[1] * aaMovementAmp), 0, 0)
|
||||
last_mid_coord = [xMid, yMid]
|
||||
|
||||
else:
|
||||
last_mid_coord = None
|
||||
|
||||
# See what the bot sees
|
||||
if visuals:
|
||||
|
@ -10,4 +10,3 @@ pyyaml
|
||||
tqdm
|
||||
matplotlib
|
||||
seaborn
|
||||
torch
|
||||
|
Loading…
x
Reference in New Issue
Block a user