mirror of
https://github.com/RootKit-Org/AI-Aimbot.git
synced 2025-06-21 02:41:01 +08:00
Cleaned up Readme and Main.py for v1 release
This commit is contained in:
parent
fd035cff00
commit
9903da9e8c
21
README.md
21
README.md
@ -1,8 +1,19 @@
|
||||
# Base Undetectable Aimbot
|
||||
Watch the video! - Coming Soon
|
||||
[](http://makeapullrequest.com)
|
||||
# Ultimate Aimbot
|
||||
**Adhere to our GNU licence or else we WILL come after you legally.**<br />
|
||||
- free to use, sell, profit from, litterally anything you want to do with it
|
||||
- **credit MUST be given to RootKit for the underlying base code**
|
||||
|
||||
Watch the tutorial video! - Coming Soon<br />
|
||||
Watch the shorts video! - https://youtu.be/EEgspHlU_H0
|
||||
|
||||
Join teh Discord - https://discord.gg/rootkit
|
||||
|
||||
## V2 - Coming soon
|
||||
We have already finished the V2 bot BUT will be releasing it on the 6th. Patreons subs will get it right now (https://www.patreon.com/rootkit)!
|
||||
|
||||
V2 bot runs about 5-8x faster. Additionally V2 will now need detectron so it will run on more computers.
|
||||
|
||||
## Current Stats
|
||||
This bot's speed is VERY dependent on your hardware. We will update the model it uses for detection later with a faster one.
|
||||
|
||||
@ -30,7 +41,7 @@ ANYTHING dealing with Machine Learning can be funky with your computer. So if yo
|
||||
|
||||
4. To install `PyTorch` go to this website, https://pytorch.org/get-started/locally/, and Select the stable build, your OS, Pip, Python and CUDA 11.3. Then select the text that is generated and run that command.
|
||||
|
||||
5. To install `detectron2` go to this website, https://detectron2.readthedocs.io/en/latest/tutorials/install.html and follow the instructions. They don't officially support Windows but it will work on Windows.
|
||||
5. To install `detectron2` go to this website, https://detectron2.readthedocs.io/en/latest/tutorials/install.html and follow the instructions. **They don't officially support Windows but it will work on Windows**. You will either need to install it on the WSL or you will need the underlying compiler.
|
||||
|
||||
6. Copy and past the commands below into your terminal. This will install the Open Source packages needed to run the program.
|
||||
```
|
||||
@ -41,7 +52,6 @@ pip install opencv-python
|
||||
pip install mss
|
||||
pip install numpy
|
||||
```
|
||||
***IF YOU GET THE FOLLOWING ERROR `pip is not recognized as an internal or external command, operable program, or batch file` Ask someone in the discord to help you out. We will be releasing a video about this soon since all the videos available for it are garbage.***
|
||||
|
||||
### Run
|
||||
If you have python and the packages you are good to go. Load up any game on your MAIN monitor and load into a game.
|
||||
@ -56,5 +66,8 @@ If you have python and the packages you are good to go. Load up any game on your
|
||||
|
||||
5. Type `python main.py`, press enter and that is it!
|
||||
|
||||
## Community Based
|
||||
We are a community based nonprofit. We are always open to pull requests on any of our repos. You will always be given credit for all of you work. Depending on what you contribute, we will give you any revenue earned on your contributions 💰💰💰!
|
||||
|
||||
**We are always looking for new Volunteers to join our Champions!
|
||||
If you have any ideas for videos or programs, let us know!**
|
||||
|
130
main.py
130
main.py
@ -1,4 +1,3 @@
|
||||
import detectron2
|
||||
import pyautogui
|
||||
import gc
|
||||
import pydirectinput
|
||||
@ -15,84 +14,89 @@ from detectron2.config import get_cfg
|
||||
from detectron2.utils.visualizer import Visualizer
|
||||
from detectron2.data import MetadataCatalog, DatasetCatalog
|
||||
|
||||
# Window title to go after and the height of the screenshots
|
||||
videoGameWindowTitle = "Counter-Strike"
|
||||
screenShotHeight = 250
|
||||
def main():
|
||||
# Window title to go after and the height of the screenshots
|
||||
videoGameWindowTitle = "Counter-Strike"
|
||||
videoGameWindowTitle = "Valorant"
|
||||
screenShotHeight = 250
|
||||
|
||||
# How big the Autoaim box should be around the center of the screen
|
||||
aaDetectionBox = 300
|
||||
# How big the Autoaim box should be around the center of the screen
|
||||
aaDetectionBox = 300
|
||||
|
||||
# Autoaim speed
|
||||
aaMovementAmp = 2
|
||||
# Autoaim speed
|
||||
aaMovementAmp = 2
|
||||
|
||||
# 0 will point center mass, 40 will point around the head in CSGO
|
||||
aaAimExtraVertical = 40
|
||||
# 0 will point center mass, 40 will point around the head in CSGO
|
||||
aaAimExtraVertical = 40
|
||||
|
||||
# Loading up the object detection model
|
||||
cfg = get_cfg()
|
||||
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
|
||||
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
|
||||
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
|
||||
predictor = DefaultPredictor(cfg)
|
||||
# Loading up the object detection model
|
||||
cfg = get_cfg()
|
||||
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
|
||||
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
|
||||
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
|
||||
predictor = DefaultPredictor(cfg)
|
||||
|
||||
# Selecting the correct game window
|
||||
videoGameWindows = pyautogui.getWindowsWithTitle(videoGameWindowTitle)
|
||||
videoGameWindow = videoGameWindows[0]
|
||||
# Selecting the correct game window
|
||||
videoGameWindows = pyautogui.getWindowsWithTitle(videoGameWindowTitle)
|
||||
videoGameWindow = videoGameWindows[0]
|
||||
|
||||
# Select that Window
|
||||
videoGameWindow.activate()
|
||||
# Select that Window
|
||||
videoGameWindow.activate()
|
||||
|
||||
# Setting up the screen shots
|
||||
sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight}
|
||||
sct = mss.mss()
|
||||
# Setting up the screen shots
|
||||
sctArea = {"mon": 1, "top": videoGameWindow.top + round((videoGameWindow.height - screenShotHeight) / 2), "left": videoGameWindow.left, "width": videoGameWindow.width, "height": screenShotHeight}
|
||||
sct = mss.mss()
|
||||
|
||||
# Calculating the center Autoaim box
|
||||
cWidth = sctArea["width"] / 2
|
||||
cHeight = sctArea["height"] / 2
|
||||
# Calculating the center Autoaim box
|
||||
cWidth = sctArea["width"] / 2
|
||||
cHeight = sctArea["height"] / 2
|
||||
|
||||
# Used for forcing garbage collection
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
# Used for forcing garbage collection
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Main loop
|
||||
while True:
|
||||
# Getting screenshop, making into np.array and dropping alpha dimention.
|
||||
npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2)
|
||||
# Main loop
|
||||
while True:
|
||||
# Getting screenshop, making into np.array and dropping alpha dimention.
|
||||
npImg = np.delete(np.array(sct.grab(sctArea)), 3, axis=2)
|
||||
|
||||
# Detecting all the objects
|
||||
predictions = predictor(npImg)
|
||||
# Detecting all the objects
|
||||
predictions = predictor(npImg)
|
||||
|
||||
# Removing anything that isn't a human and getting the center of those object boxes
|
||||
predCenters = predictions['instances'][predictions['instances'].pred_classes== 0].pred_boxes.get_centers()
|
||||
# Removing anything that isn't a human and getting the center of those object boxes
|
||||
predCenters = predictions['instances'][predictions['instances'].pred_classes== 0].pred_boxes.get_centers()
|
||||
|
||||
# Returns an array of trues/falses depending if it is in the center Autoaim box or not
|
||||
cResults = ((predCenters[::,0] > cWidth - aaDetectionBox) & (predCenters[::,0] < cWidth + aaDetectionBox)) & \
|
||||
((predCenters[::,1] > cHeight - aaDetectionBox) & (predCenters[::,1] < cHeight + aaDetectionBox))
|
||||
# Returns an array of trues/falses depending if it is in the center Autoaim box or not
|
||||
cResults = ((predCenters[::,0] > cWidth - aaDetectionBox) & (predCenters[::,0] < cWidth + aaDetectionBox)) & \
|
||||
((predCenters[::,1] > cHeight - aaDetectionBox) & (predCenters[::,1] < cHeight + aaDetectionBox))
|
||||
|
||||
# Moves variable from the GPU to CPU
|
||||
predCenters = predCenters.to("cpu")
|
||||
# Moves variable from the GPU to CPU
|
||||
predCenters = predCenters.to("cpu")
|
||||
|
||||
# Removes all predictions that aren't closest to the center
|
||||
targets = np.array(predCenters[cResults])
|
||||
# Removes all predictions that aren't closest to the center
|
||||
targets = np.array(predCenters[cResults])
|
||||
|
||||
# If there are targets in the center box
|
||||
if len(targets) > 0:
|
||||
# Get the first target
|
||||
mouseMove = targets[0] - [cWidth, cHeight + aaAimExtraVertical]
|
||||
# If there are targets in the center box
|
||||
if len(targets) > 0:
|
||||
# Get the first target
|
||||
mouseMove = targets[0] - [cWidth, cHeight + aaAimExtraVertical]
|
||||
|
||||
# Move the mouse
|
||||
pydirectinput.move(round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), relative=True)
|
||||
# Move the mouse
|
||||
pydirectinput.move(round(mouseMove[0] * aaMovementAmp), round(mouseMove[1] * aaMovementAmp), relative=True)
|
||||
|
||||
# Forced garbage cleanup every second
|
||||
count += 1
|
||||
if (time.time() - sTime) > 1:
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
gc.collect(generation=0)
|
||||
# Forced garbage cleanup every second
|
||||
count += 1
|
||||
if (time.time() - sTime) > 1:
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
gc.collect(generation=0)
|
||||
|
||||
# Uncomment to see visually what the Aimbot sees
|
||||
# v = Visualizer(npImg[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
|
||||
# out = v.draw_instance_predictions(predictions["instances"].to("cpu"))
|
||||
# cv2.imshow('sample image',out.get_image()[:, :, ::-1])
|
||||
# if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
# exit()
|
||||
#! Uncomment to see visually what the Aimbot sees
|
||||
# v = Visualizer(npImg[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
|
||||
# out = v.draw_instance_predictions(predictions["instances"].to("cpu"))
|
||||
# cv2.imshow('sample image',out.get_image()[:, :, ::-1])
|
||||
# if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
# exit()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
x
Reference in New Issue
Block a user