mirror of
https://github.com/RootKit-Org/AI-Aimbot.git
synced 2025-06-21 02:41:01 +08:00
commit
ae24cc3f49
19
README.md
19
README.md
@ -9,11 +9,6 @@ Watch the shorts video! - https://youtu.be/EEgspHlU_H0
|
|||||||
|
|
||||||
Join teh Discord - https://discord.gg/rootkit
|
Join teh Discord - https://discord.gg/rootkit
|
||||||
|
|
||||||
## V2 - Coming soon
|
|
||||||
We have already finished the V2 bot BUT will be releasing it on the 6th. Patreons subs will get it right now (https://www.patreon.com/rootkit)!
|
|
||||||
|
|
||||||
V2 bot runs about 5-8x faster. Additionally V2 will NOT need detectron so it will run on more computers.
|
|
||||||
|
|
||||||
## Current Stats
|
## Current Stats
|
||||||
This bot's speed is VERY dependent on your hardware. We will update the model it uses for detection later with a faster one.
|
This bot's speed is VERY dependent on your hardware. We will update the model it uses for detection later with a faster one.
|
||||||
|
|
||||||
@ -22,14 +17,12 @@ Bot was tested on a:
|
|||||||
- 64 GB DDR4
|
- 64 GB DDR4
|
||||||
- Nvidia RTX 2080
|
- Nvidia RTX 2080
|
||||||
|
|
||||||
We got anywhere from 5-15 corrections per second which is pretty slow. All games were ran at 1280x720 or close to it.
|
We got anywhere from 15-35 corrections per second. All games were ran at 1280x720 or close to it when testing.
|
||||||
The main slow down is the model's prediction speed averaging anywhere from .09-.29s.
|
|
||||||
The second biggest slow down is the garbage collection. It runs only once per second and takes about .05 seconds to run in generation 0.
|
|
||||||
|
|
||||||
ANYTHING dealing with Machine Learning can be funky with your computer. So if you keep getting CUDA errors, you may want to restart your PC in order to make sure everything resets properly.
|
ANYTHING dealing with Machine Learning can be funky with your computer. So if you keep getting CUDA errors, you may want to restart your PC in order to make sure everything resets properly.
|
||||||
|
|
||||||
### REQUIREMENTS
|
### REQUIREMENTS
|
||||||
- Nvidia RTX 2080/3070 or higher
|
- Nvidia RTX 2050 or higher
|
||||||
- Nvidia CUDA Toolkit 11.3 (https://developer.nvidia.com/cuda-11.3.0-download-archive)
|
- Nvidia CUDA Toolkit 11.3 (https://developer.nvidia.com/cuda-11.3.0-download-archive)
|
||||||
|
|
||||||
### Pre-setup
|
### Pre-setup
|
||||||
@ -41,8 +34,6 @@ ANYTHING dealing with Machine Learning can be funky with your computer. So if yo
|
|||||||
|
|
||||||
4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command.
|
4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command.
|
||||||
|
|
||||||
5. To install `detectron2` go to this website, https://detectron2.readthedocs.io/en/latest/tutorials/install.html and follow the instructions. **They don't officially support Windows but it will work on Windows**. You will either need to install it on the WSL or you will need the underlying compiler.
|
|
||||||
|
|
||||||
6. Copy and past the commands below into your terminal. This will install the Open Source packages needed to run the program.
|
6. Copy and past the commands below into your terminal. This will install the Open Source packages needed to run the program.
|
||||||
```
|
```
|
||||||
pip install PyAutoGUI
|
pip install PyAutoGUI
|
||||||
@ -51,6 +42,12 @@ pip install Pillow
|
|||||||
pip install opencv-python
|
pip install opencv-python
|
||||||
pip install mss
|
pip install mss
|
||||||
pip install numpy
|
pip install numpy
|
||||||
|
pip install pandas
|
||||||
|
pip install win32api
|
||||||
|
pip install yaml
|
||||||
|
pip install tqdm
|
||||||
|
pip install matplotlib
|
||||||
|
pip install seaborn
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
109
main.py
109
main.py
@ -1,24 +1,20 @@
|
|||||||
|
import torch
|
||||||
|
|
||||||
import pyautogui
|
import pyautogui
|
||||||
import gc
|
import gc
|
||||||
import pydirectinput
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import os, json, cv2, random
|
import os, json, cv2, random
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
import time
|
import time
|
||||||
import mss
|
import mss
|
||||||
|
import win32api, win32con
|
||||||
from detectron2 import model_zoo
|
|
||||||
from detectron2.engine import DefaultPredictor
|
|
||||||
from detectron2.config import get_cfg
|
|
||||||
from detectron2.utils.visualizer import Visualizer
|
|
||||||
from detectron2.data import MetadataCatalog, DatasetCatalog
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# Window title to go after and the height of the screenshots
|
# Window title to go after and the height of the screenshots
|
||||||
videoGameWindowTitle = "Counter-Strike"
|
videoGameWindowTitle = "Counter"
|
||||||
videoGameWindowTitle = "Valorant"
|
|
||||||
screenShotHeight = 250
|
screenShotHeight = 500
|
||||||
|
|
||||||
# How big the Autoaim box should be around the center of the screen
|
# How big the Autoaim box should be around the center of the screen
|
||||||
aaDetectionBox = 300
|
aaDetectionBox = 300
|
||||||
@ -29,22 +25,28 @@ def main():
|
|||||||
# 0 will point center mass, 40 will point around the head in CSGO
|
# 0 will point center mass, 40 will point around the head in CSGO
|
||||||
aaAimExtraVertical = 40
|
aaAimExtraVertical = 40
|
||||||
|
|
||||||
# Loading up the object detection model
|
# Set to True if you want to get the visuals
|
||||||
cfg = get_cfg()
|
visuals = False
|
||||||
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
|
|
||||||
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
|
|
||||||
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
|
|
||||||
predictor = DefaultPredictor(cfg)
|
|
||||||
|
|
||||||
# Selecting the correct game window
|
# Selecting the correct game window
|
||||||
videoGameWindows = pyautogui.getWindowsWithTitle(videoGameWindowTitle)
|
try:
|
||||||
videoGameWindow = videoGameWindows[0]
|
videoGameWindows = pyautogui.getWindowsWithTitle(videoGameWindowTitle)
|
||||||
|
videoGameWindow = videoGameWindows[0]
|
||||||
|
except:
|
||||||
|
print("The game window you are trying to select doesn't exist.")
|
||||||
|
print("Check variable videoGameWindowTitle (typically on line 15")
|
||||||
|
exit()
|
||||||
|
|
||||||
# Select that Window
|
# Select that Window
|
||||||
videoGameWindow.activate()
|
videoGameWindow.activate()
|
||||||
|
|
||||||
# Setting up the screen shots
|
# Setting up the screen shots
|
||||||
sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight}
|
sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight}
|
||||||
|
|
||||||
|
#! Uncomment if you want to view the entire screen
|
||||||
|
# sctArea = {"mon": 1, "top": 0, "left": 0, "width": 1920, "height": 1080}
|
||||||
|
|
||||||
|
# Starting screenshoting engine
|
||||||
sct = mss.mss()
|
sct = mss.mss()
|
||||||
|
|
||||||
# Calculating the center Autoaim box
|
# Calculating the center Autoaim box
|
||||||
@ -55,48 +57,73 @@ def main():
|
|||||||
count = 0
|
count = 0
|
||||||
sTime = time.time()
|
sTime = time.time()
|
||||||
|
|
||||||
# Main loop
|
# Loading Yolo5 Small AI Model
|
||||||
while True:
|
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
||||||
|
|
||||||
|
# Used for colors drawn on bounding boxes
|
||||||
|
COLORS = np.random.uniform(0, 255, size=(1500, 3))
|
||||||
|
|
||||||
|
# Main loop Quit if Q is pressed
|
||||||
|
while win32api.GetAsyncKeyState(ord('Q')) == 0:
|
||||||
# Getting screenshop, making into np.array and dropping alpha dimention.
|
# Getting screenshop, making into np.array and dropping alpha dimention.
|
||||||
npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2)
|
npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2)
|
||||||
|
|
||||||
# Detecting all the objects
|
# Detecting all the objects
|
||||||
predictions = predictor(npImg)
|
results = model(npImg).pandas().xyxy[0]
|
||||||
|
|
||||||
# Removing anything that isn't a human and getting the center of those object boxes
|
# Filtering out everything that isn't a person
|
||||||
predCenters = predictions['instances'][predictions['instances'].pred_classes== 0].pred_boxes.get_centers()
|
filteredResults = results[results['class']==0]
|
||||||
|
|
||||||
# Returns an array of trues/falses depending if it is in the center Autoaim box or not
|
# Returns an array of trues/falses depending if it is in the center Autoaim box or not
|
||||||
cResults = ((predCenters[::,0] > cWidth - aaDetectionBox) & (predCenters[::,0] < cWidth + aaDetectionBox)) & \
|
cResults = ((filteredResults["xmin"] > cWidth - aaDetectionBox) & (filteredResults["xmax"] < cWidth + aaDetectionBox)) & \
|
||||||
((predCenters[::,1] > cHeight - aaDetectionBox) & (predCenters[::,1] < cHeight + aaDetectionBox))
|
((filteredResults["ymin"] > cHeight - aaDetectionBox) & (filteredResults["ymax"] < cHeight + aaDetectionBox))
|
||||||
|
|
||||||
# Moves variable from the GPU to CPU
|
# Removes persons that aren't in the center bounding box
|
||||||
predCenters = predCenters.to("cpu")
|
targets = filteredResults[cResults]
|
||||||
|
|
||||||
# Removes all predictions that aren't closest to the center
|
# If there are people in the center bounding box
|
||||||
targets = np.array(predCenters[cResults])
|
|
||||||
|
|
||||||
# If there are targets in the center box
|
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
# Get the first target
|
# All logic is just done on the random person that shows up first in the list
|
||||||
mouseMove = targets[0] - [cWidth, cHeight + aaAimExtraVertical]
|
xMid = round((targets.iloc[0].xmax + targets.iloc[0].xmin) / 2)
|
||||||
|
yMid = round((targets.iloc[0].ymax + targets.iloc[0].ymin) / 2)
|
||||||
|
|
||||||
# Move the mouse
|
mouseMove = [xMid - cWidth, yMid - (cHeight + aaAimExtraVertical)]
|
||||||
pydirectinput.move(round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), relative=True)
|
|
||||||
|
# Moving the mouse
|
||||||
|
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), 0, 0)
|
||||||
|
|
||||||
|
# See what the bot sees
|
||||||
|
if visuals:
|
||||||
|
# Loops over every item identified and draws a bounding box
|
||||||
|
for i in range(0, len(results)):
|
||||||
|
(startX, startY, endX, endY) = int(results["xmin"][i]), int(results["ymin"][i]), int(results["xmax"][i]), int(results["ymax"][i])
|
||||||
|
|
||||||
|
confidence = results["confidence"][i]
|
||||||
|
|
||||||
|
idx = int(results["class"][i])
|
||||||
|
|
||||||
|
# draw the bounding box and label on the frame
|
||||||
|
label = "{}: {:.2f}%".format(results["name"][i], confidence * 100)
|
||||||
|
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||||
|
COLORS[idx], 2)
|
||||||
|
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||||
|
cv2.putText(npImg, label, (startX, y),
|
||||||
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||||
|
|
||||||
# Forced garbage cleanup every second
|
# Forced garbage cleanup every second
|
||||||
count += 1
|
count += 1
|
||||||
if (time.time() - sTime) > 1:
|
if (time.time() - sTime) > 1:
|
||||||
|
print(count)
|
||||||
count = 0
|
count = 0
|
||||||
sTime = time.time()
|
sTime = time.time()
|
||||||
|
|
||||||
gc.collect(generation=0)
|
gc.collect(generation=0)
|
||||||
|
|
||||||
#! Uncomment to see visually what the Aimbot sees
|
# See visually what the Aimbot sees
|
||||||
# v = Visualizer(npImg[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
|
if visuals:
|
||||||
# out = v.draw_instance_predictions(predictions["instances"].to("cpu"))
|
cv2.imshow('Live Feed', npImg)
|
||||||
# cv2.imshow('sample image',out.get_image()[:, :, ::-1])
|
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||||
# if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
exit()
|
||||||
# exit()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
Loading…
x
Reference in New Issue
Block a user