Ready for release to Patreon

This commit is contained in:
Qfc9 2022-01-09 20:01:40 -05:00
parent 6ca82dc752
commit fd035cff00
2 changed files with 109 additions and 42 deletions

60
README.md Normal file
View File

@ -0,0 +1,60 @@
# Base Undetectable Aimbot
Watch the video! - Coming Soon
Join teh Discord - https://discord.gg/rootkit
## Current Stats
This bot's speed is VERY dependent on your hardware. We will update the model it uses for detection later with a faster one.
Bot was tested on a:
- AMD Ryzen 7 2700
- 64 GB DDR4
- Nvidia RTX 2080
We got anywhere from 5-15 corrections per second which is pretty slow. All games were ran at 1280x720 or close to it.
The main slow down is the model's prediction speed averaging anywhere from .09-.29s.
The second biggest slow down is the garbage collection. It runs only once per second and takes about .05 seconds to run in generation 0.
ANYTHING dealing with Machine Learning can be funky with your computer. So if you keep getting CUDA errors, you may want to restart your PC in order to make sure everything resets properly.
### REQUIREMENTS
- Nvidia RTX 2080/3070 or higher
- Nvidia CUDA Toolkit 11.3 (https://developer.nvidia.com/cuda-11.3.0-download-archive)
### Pre-setup
1. Unzip the file and place the folder somewhere easy to access
2. Make sure you have a pet Python (aka install python) - https://www.python.org/
3. (Windows Users) Open up either `PowerShell` or `Command Prompt`. This can be done by pressing the Windows Key and searching for one of those applications.
4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command.
5. To install `detectron2` go to this website, https://detectron2.readthedocs.io/en/latest/tutorials/install.html and follow the instructions. They don't officially support Windows but it will work on Windows.
6. Copy and past the commands below into your terminal. This will install the Open Source packages needed to run the program.
```
pip install PyAutoGUI
pip install PyDirectInput
pip install Pillow
pip install opencv-python
pip install mss
pip install numpy
```
***IF YOU GET THE FOLLOWING ERROR `pip is not recognized as an internal or external command, operable program, or batch file` Ask someone in the discord to help you out. We will be releasing a video about this soon since all the videos available for it are garbage.***
### Run
If you have python and the packages you are good to go. Load up any game on your MAIN monitor and load into a game.
1. (Windows Users) Open up either `PowerShell` or `Command Prompt`. This can be done by pressing the Windows Key and searching for one of those applications.
2. Type `cd ` (make sure you add the space after the cd or else I will call you a monkey)
3. Drag and drop the folder that has the bot code onto the terminal
4. Press the enter key
5. Type `python main.py`, press enter and that is it!
**We are always looking for new Volunteers to join our Champions!
If you have any ideas for videos or programs, let us know!**

91
main.py
View File

@ -15,77 +15,84 @@ from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
# Window title to go after and the height of the screenshots
videoGameWindowTitle = "Counter-Strike"
screenShotHeight = 250
# How big the Autoaim box should be around the center of the screen
aaDetectionBox = 300
# Autoaim speed
aaMovementAmp = 2
# 0 will point center mass, 40 will point around the head in CSGO
aaAimExtraVertical = 40
# Loading up the object detection model
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
predictor = DefaultPredictor(cfg)
# videoGameWindows = pyautogui.getWindowsWithTitle("New World")
# videoGameWindows = pyautogui.getWindowsWithTitle("Halo Infinite")
videoGameWindows = pyautogui.getWindowsWithTitle("Counter-")
# Selecting the correct game window
videoGameWindows = pyautogui.getWindowsWithTitle(videoGameWindowTitle)
videoGameWindow = videoGameWindows[0]
# Find the Window titled exactly "New World" (typically the actual game)
# for window in videoGameWindows:
# if window.title == "New World":
# # if window.title == "Halo Infinite":
# videoGameWindow = window
# break
# Select that Window
videoGameWindow.activate()
# mssRegion = {"mon": 1, "top": videoGameWindow.top, "left": videoGameWindow.left, "width": videoGameWindow.width, "height": videoGameWindow.height}
# mssRegion = {"mon": 1, "top": videoGameWindow.top + (round(videoGameWindow.height/64) * 20), "left": videoGameWindow.left + (round(videoGameWindow.width/64) * 8), "width": round(videoGameWindow.width/64) * 48, "height": round(videoGameWindow.height/64) * 16}
# mssRegion = {"mon": 1, "top": videoGameWindow.top + round(videoGameWindow.height/3), "left": videoGameWindow.left + round(videoGameWindow.width/3), "width": round(videoGameWindow.width/3), "height": round(videoGameWindow.height/3)}
# mssRegion = {"mon": 1, "top": videoGameWindow.top, "left": videoGameWindow.left + round(videoGameWindow.width/3), "width": round(videoGameWindow.width/3), "height": videoGameWindow.height}
mssRegion = {"mon": 1, "top": videoGameWindow.top+300, "left": videoGameWindow.left, "width": 1280, "height": 250}
# mssRegion = {"mon": 1, "top": videoGameWindow.top + round(videoGameWindow.height/3), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": round(videoGameWindow.height/3)}
# mssRegion = {"mon": 1, "top": videoGameWindow.top, "left": videoGameWindow.left + round(videoGameWindow.width/3), "width": round(videoGameWindow.width/3), "height": videoGameWindow.height}
# Setting up the screen shots
sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight}
sct = mss.mss()
# Calculating the center Autoaim box
cWidth = sctArea["width"] / 2
cHeight = sctArea["height"] / 2
# Used for forcing garbage collection
count = 0
sTime = time.time()
cWidth = mssRegion["width"] / 2
cHeight = mssRegion["height"] / 2
cMargin = 300
print(mssRegion)
sct = mss.mss()
# Main loop
while True:
npImg = np.delete(np.array(sct.grab(mssRegion)), 3, axis=2)
# Getting screenshop, making into np.array and dropping alpha dimention.
npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2)
outputs = predictor(npImg)
# print(time.time()-aTime)
# Detecting all the objects
predictions = predictor(npImg)
# Removing anything that isn't a human and getting the center of those object boxes
predCenters = predictions['instances'][predictions['instances'].pred_classes== 0].pred_boxes.get_centers()
# Returns an array of trues/falses depending if it is in the center Autoaim box or not
cResults = ((predCenters[::,0] > cWidth - aaDetectionBox) & (predCenters[::,0] < cWidth + aaDetectionBox)) & \
((predCenters[::,1] > cHeight - aaDetectionBox) & (predCenters[::,1] < cHeight + aaDetectionBox))
allCenters = outputs['instances'][outputs['instances'].pred_classes== 0].pred_boxes.get_centers()
cResults = ((allCenters[::,0] > cWidth - cMargin) & (allCenters[::,0] < cWidth + cMargin)) & \
((allCenters[::,1] > cHeight - cMargin) & (allCenters[::,1] < cHeight + cMargin))
# Moves variable from the GPU to CPU
predCenters = predCenters.to("cpu")
allCenters = allCenters.to("cpu")
targets = np.array(allCenters[cResults])
# Removes all predictions that aren't closest to the center
targets = np.array(predCenters[cResults])
# print(len(target))
# If there are targets in the center box
if len(targets) > 0:
# print(target)
asdf = targets[0] - [cWidth, cHeight+40]
# print(asdf)
# Get the first target
mouseMove = targets[0] - [cWidth, cHeight + aaAimExtraVertical]
pydirectinput.move(round(asdf[0]*1), round(asdf[1]*1), relative=True)
# Move the mouse
pydirectinput.move(round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), relative=True)
# Forced garbage cleanup every second
count += 1
if (time.time() - sTime) > 1:
# print(time.time()-sTime)
print(count)
count = 0
sTime = time.time()
gc.collect(generation=0)
# Uncomment to see visually what the Aimbot sees
# v = Visualizer(npImg[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
# out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
#
# out = v.draw_instance_predictions(predictions["instances"].to("cpu"))
# cv2.imshow('sample image',out.get_image()[:, :, ::-1])
#
# if (cv2.waitKey(1) & 0xFF) == ord('q'):
# exit()