mirror of
https://github.com/RootKit-Org/AI-Aimbot.git
synced 2025-06-21 02:41:01 +08:00
Ready for release to Patreon
This commit is contained in:
parent
6ca82dc752
commit
fd035cff00
60
README.md
Normal file
60
README.md
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# Base Undetectable Aimbot
|
||||||
|
Watch the video! - Coming Soon
|
||||||
|
|
||||||
|
Join teh Discord - https://discord.gg/rootkit
|
||||||
|
|
||||||
|
## Current Stats
|
||||||
|
This bot's speed is VERY dependent on your hardware. We will update the model it uses for detection later with a faster one.
|
||||||
|
|
||||||
|
Bot was tested on a:
|
||||||
|
- AMD Ryzen 7 2700
|
||||||
|
- 64 GB DDR4
|
||||||
|
- Nvidia RTX 2080
|
||||||
|
|
||||||
|
We got anywhere from 5-15 corrections per second which is pretty slow. All games were ran at 1280x720 or close to it.
|
||||||
|
The main slow down is the model's prediction speed averaging anywhere from .09-.29s.
|
||||||
|
The second biggest slow down is the garbage collection. It runs only once per second and takes about .05 seconds to run in generation 0.
|
||||||
|
|
||||||
|
ANYTHING dealing with Machine Learning can be funky with your computer. So if you keep getting CUDA errors, you may want to restart your PC in order to make sure everything resets properly.
|
||||||
|
|
||||||
|
### REQUIREMENTS
|
||||||
|
- Nvidia RTX 2080/3070 or higher
|
||||||
|
- Nvidia CUDA Toolkit 11.3 (https://developer.nvidia.com/cuda-11.3.0-download-archive)
|
||||||
|
|
||||||
|
### Pre-setup
|
||||||
|
1. Unzip the file and place the folder somewhere easy to access
|
||||||
|
|
||||||
|
2. Make sure you have a pet Python (aka install python) - https://www.python.org/
|
||||||
|
|
||||||
|
3. (Windows Users) Open up either `PowerShell` or `Command Prompt`. This can be done by pressing the Windows Key and searching for one of those applications.
|
||||||
|
|
||||||
|
4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command.
|
||||||
|
|
||||||
|
5. To install `detectron2` go to this website, https://detectron2.readthedocs.io/en/latest/tutorials/install.html and follow the instructions. They don't officially support Windows but it will work on Windows.
|
||||||
|
|
||||||
|
6. Copy and past the commands below into your terminal. This will install the Open Source packages needed to run the program.
|
||||||
|
```
|
||||||
|
pip install PyAutoGUI
|
||||||
|
pip install PyDirectInput
|
||||||
|
pip install Pillow
|
||||||
|
pip install opencv-python
|
||||||
|
pip install mss
|
||||||
|
pip install numpy
|
||||||
|
```
|
||||||
|
***IF YOU GET THE FOLLOWING ERROR `pip is not recognized as an internal or external command, operable program, or batch file` Ask someone in the discord to help you out. We will be releasing a video about this soon since all the videos available for it are garbage.***
|
||||||
|
|
||||||
|
### Run
|
||||||
|
If you have python and the packages you are good to go. Load up any game on your MAIN monitor and load into a game.
|
||||||
|
|
||||||
|
1. (Windows Users) Open up either `PowerShell` or `Command Prompt`. This can be done by pressing the Windows Key and searching for one of those applications.
|
||||||
|
|
||||||
|
2. Type `cd ` (make sure you add the space after the cd or else I will call you a monkey)
|
||||||
|
|
||||||
|
3. Drag and drop the folder that has the bot code onto the terminal
|
||||||
|
|
||||||
|
4. Press the enter key
|
||||||
|
|
||||||
|
5. Type `python main.py`, press enter and that is it!
|
||||||
|
|
||||||
|
**We are always looking for new Volunteers to join our Champions!
|
||||||
|
If you have any ideas for videos or programs, let us know!**
|
91
main.py
91
main.py
@ -15,77 +15,84 @@ from detectron2.config import get_cfg
|
|||||||
from detectron2.utils.visualizer import Visualizer
|
from detectron2.utils.visualizer import Visualizer
|
||||||
from detectron2.data import MetadataCatalog, DatasetCatalog
|
from detectron2.data import MetadataCatalog, DatasetCatalog
|
||||||
|
|
||||||
|
# Window title to go after and the height of the screenshots
|
||||||
|
videoGameWindowTitle = "Counter-Strike"
|
||||||
|
screenShotHeight = 250
|
||||||
|
|
||||||
|
# How big the Autoaim box should be around the center of the screen
|
||||||
|
aaDetectionBox = 300
|
||||||
|
|
||||||
|
# Autoaim speed
|
||||||
|
aaMovementAmp = 2
|
||||||
|
|
||||||
|
# 0 will point center mass, 40 will point around the head in CSGO
|
||||||
|
aaAimExtraVertical = 40
|
||||||
|
|
||||||
|
# Loading up the object detection model
|
||||||
cfg = get_cfg()
|
cfg = get_cfg()
|
||||||
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
|
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
|
||||||
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
|
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
|
||||||
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
|
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
|
||||||
predictor = DefaultPredictor(cfg)
|
predictor = DefaultPredictor(cfg)
|
||||||
|
|
||||||
# videoGameWindows = pyautogui.getWindowsWithTitle("New World")
|
# Selecting the correct game window
|
||||||
# videoGameWindows = pyautogui.getWindowsWithTitle("Halo Infinite")
|
videoGameWindows = pyautogui.getWindowsWithTitle(videoGameWindowTitle)
|
||||||
videoGameWindows = pyautogui.getWindowsWithTitle("Counter-")
|
|
||||||
videoGameWindow = videoGameWindows[0]
|
videoGameWindow = videoGameWindows[0]
|
||||||
|
|
||||||
# Find the Window titled exactly "New World" (typically the actual game)
|
|
||||||
# for window in videoGameWindows:
|
|
||||||
# if window.title == "New World":
|
|
||||||
# # if window.title == "Halo Infinite":
|
|
||||||
# videoGameWindow = window
|
|
||||||
# break
|
|
||||||
|
|
||||||
# Select that Window
|
# Select that Window
|
||||||
videoGameWindow.activate()
|
videoGameWindow.activate()
|
||||||
# mssRegion = {"mon": 1, "top": videoGameWindow.top, "left": videoGameWindow.left, "width": videoGameWindow.width, "height": videoGameWindow.height}
|
|
||||||
# mssRegion = {"mon": 1, "top": videoGameWindow.top + (round(videoGameWindow.height/64) * 20), "left": videoGameWindow.left + (round(videoGameWindow.width/64) * 8), "width": round(videoGameWindow.width/64) * 48, "height": round(videoGameWindow.height/64) * 16}
|
# Setting up the screen shots
|
||||||
# mssRegion = {"mon": 1, "top": videoGameWindow.top + round(videoGameWindow.height/3), "left": videoGameWindow.left + round(videoGameWindow.width/3), "width": round(videoGameWindow.width/3), "height": round(videoGameWindow.height/3)}
|
sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight}
|
||||||
# mssRegion = {"mon": 1, "top": videoGameWindow.top, "left": videoGameWindow.left + round(videoGameWindow.width/3), "width": round(videoGameWindow.width/3), "height": videoGameWindow.height}
|
sct = mss.mss()
|
||||||
mssRegion = {"mon": 1, "top": videoGameWindow.top+300, "left": videoGameWindow.left, "width": 1280, "height": 250}
|
|
||||||
# mssRegion = {"mon": 1, "top": videoGameWindow.top + round(videoGameWindow.height/3), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": round(videoGameWindow.height/3)}
|
# Calculating the center Autoaim box
|
||||||
# mssRegion = {"mon": 1, "top": videoGameWindow.top, "left": videoGameWindow.left + round(videoGameWindow.width/3), "width": round(videoGameWindow.width/3), "height": videoGameWindow.height}
|
cWidth = sctArea["width"] / 2
|
||||||
|
cHeight = sctArea["height"] / 2
|
||||||
|
|
||||||
|
# Used for forcing garbage collection
|
||||||
count = 0
|
count = 0
|
||||||
sTime = time.time()
|
sTime = time.time()
|
||||||
cWidth = mssRegion["width"] / 2
|
|
||||||
cHeight = mssRegion["height"] / 2
|
|
||||||
cMargin = 300
|
|
||||||
print(mssRegion)
|
|
||||||
|
|
||||||
|
# Main loop
|
||||||
sct = mss.mss()
|
|
||||||
while True:
|
while True:
|
||||||
npImg = np.delete(np.array(sct.grab(mssRegion)), 3, axis=2)
|
# Getting screenshop, making into np.array and dropping alpha dimention.
|
||||||
|
npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2)
|
||||||
|
|
||||||
outputs = predictor(npImg)
|
# Detecting all the objects
|
||||||
# print(time.time()-aTime)
|
predictions = predictor(npImg)
|
||||||
|
|
||||||
|
# Removing anything that isn't a human and getting the center of those object boxes
|
||||||
|
predCenters = predictions['instances'][predictions['instances'].pred_classes== 0].pred_boxes.get_centers()
|
||||||
|
|
||||||
|
# Returns an array of trues/falses depending if it is in the center Autoaim box or not
|
||||||
|
cResults = ((predCenters[::,0] > cWidth - aaDetectionBox) & (predCenters[::,0] < cWidth + aaDetectionBox)) & \
|
||||||
|
((predCenters[::,1] > cHeight - aaDetectionBox) & (predCenters[::,1] < cHeight + aaDetectionBox))
|
||||||
|
|
||||||
allCenters = outputs['instances'][outputs['instances'].pred_classes== 0].pred_boxes.get_centers()
|
# Moves variable from the GPU to CPU
|
||||||
cResults = ((allCenters[::,0] > cWidth - cMargin) & (allCenters[::,0] < cWidth + cMargin)) & \
|
predCenters = predCenters.to("cpu")
|
||||||
((allCenters[::,1] > cHeight - cMargin) & (allCenters[::,1] < cHeight + cMargin))
|
|
||||||
|
|
||||||
allCenters = allCenters.to("cpu")
|
# Removes all predictions that aren't closest to the center
|
||||||
targets = np.array(allCenters[cResults])
|
targets = np.array(predCenters[cResults])
|
||||||
|
|
||||||
# print(len(target))
|
# If there are targets in the center box
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
# print(target)
|
# Get the first target
|
||||||
asdf = targets[0] - [cWidth, cHeight+40]
|
mouseMove = targets[0] - [cWidth, cHeight + aaAimExtraVertical]
|
||||||
# print(asdf)
|
|
||||||
|
|
||||||
pydirectinput.move(round(asdf[0]*1), round(asdf[1]*1), relative=True)
|
# Move the mouse
|
||||||
|
pydirectinput.move(round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), relative=True)
|
||||||
|
|
||||||
|
# Forced garbage cleanup every second
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
if (time.time() - sTime) > 1:
|
if (time.time() - sTime) > 1:
|
||||||
# print(time.time()-sTime)
|
|
||||||
print(count)
|
|
||||||
count = 0
|
count = 0
|
||||||
sTime = time.time()
|
sTime = time.time()
|
||||||
gc.collect(generation=0)
|
gc.collect(generation=0)
|
||||||
|
|
||||||
|
# Uncomment to see visually what the Aimbot sees
|
||||||
# v = Visualizer(npImg[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
|
# v = Visualizer(npImg[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
|
||||||
# out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
|
# out = v.draw_instance_predictions(predictions["instances"].to("cpu"))
|
||||||
#
|
|
||||||
# cv2.imshow('sample image',out.get_image()[:, :, ::-1])
|
# cv2.imshow('sample image',out.get_image()[:, :, ::-1])
|
||||||
#
|
|
||||||
# if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
# if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||||
# exit()
|
# exit()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user