mirror of
https://github.com/RootKit-Org/AI-Aimbot.git
synced 2025-06-21 02:41:01 +08:00
My Custom scripts added (#153)
* Add files via upload * Add files via upload * Update readme.md
This commit is contained in:
parent
7457e51875
commit
3033919f3b
180
customScripts/Villageslayer/main.py
Normal file
180
customScripts/Villageslayer/main.py
Normal file
@ -0,0 +1,180 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
import win32api
|
||||
import win32con
|
||||
import pandas as pd
|
||||
import gc
|
||||
from utils.general import (cv2, non_max_suppression, xyxy2xywh)
|
||||
|
||||
# Could be do with
|
||||
# from config import *
|
||||
# But we are writing it out for clarity for new devs
|
||||
from config import aaMovementAmp, useMask, maskWidth, maskHeight, aaQuitKey, screenShotHeight, confidence, headshot_mode, cpsDisplay, visuals, centerOfScreen
|
||||
import gameSelection
|
||||
|
||||
def main():
|
||||
# External Function for running the game selection menu (gameSelection.py)
|
||||
camera, cWidth, cHeight = gameSelection.gameSelection()
|
||||
|
||||
# Used for forcing garbage collection
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Loading Yolo5 Small AI Model, for better results use yolov5m or yolov5l
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5s',
|
||||
pretrained=True, force_reload=True)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
|
||||
if torch.cuda.is_available():
|
||||
model.half()
|
||||
|
||||
# Used for colors drawn on bounding boxes
|
||||
COLORS = np.random.uniform(0, 255, size=(1500, 3))
|
||||
|
||||
# Main loop Quit if Q is pressed
|
||||
last_mid_coord = None
|
||||
with torch.no_grad():
|
||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||
|
||||
# Getting Frame
|
||||
npImg = np.array(camera.get_latest_frame())
|
||||
|
||||
from config import maskSide # "temporary" workaround for bad syntax
|
||||
if useMask:
|
||||
maskSide = maskSide.lower()
|
||||
if maskSide == "right":
|
||||
npImg[-maskHeight:, -maskWidth:, :] = 0
|
||||
elif maskSide == "left":
|
||||
npImg[-maskHeight:, :maskWidth, :] = 0
|
||||
else:
|
||||
raise Exception('ERROR: Invalid maskSide! Please use "left" or "right"')
|
||||
|
||||
# Normalizing Data
|
||||
im = torch.from_numpy(npImg)
|
||||
if im.shape[2] == 4:
|
||||
# If the image has an alpha channel, remove it
|
||||
im = im[:, :, :3,]
|
||||
|
||||
im = torch.movedim(im, 2, 0)
|
||||
if torch.cuda.is_available():
|
||||
im = im.half()
|
||||
im /= 255
|
||||
if len(im.shape) == 3:
|
||||
im = im[None]
|
||||
|
||||
# Detecting all the objects
|
||||
results = model(im, size=screenShotHeight)
|
||||
|
||||
# Suppressing results that dont meet thresholds
|
||||
pred = non_max_suppression(
|
||||
results, confidence, confidence, 0, False, max_det=1000)
|
||||
|
||||
# Converting output to usable cords
|
||||
targets = []
|
||||
for i, det in enumerate(pred):
|
||||
s = ""
|
||||
gn = torch.tensor(im.shape)[[0, 0, 0, 0]]
|
||||
if len(det):
|
||||
for c in det[:, -1].unique():
|
||||
n = (det[:, -1] == c).sum() # detections per class
|
||||
s += f"{n} {names[int(c)]}, " # add to string
|
||||
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||
|
||||
targets = pd.DataFrame(
|
||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||
|
||||
center_screen = [cWidth, cHeight]
|
||||
|
||||
# If there are people in the center bounding box
|
||||
if len(targets) > 0:
|
||||
if (centerOfScreen):
|
||||
# Compute the distance from the center
|
||||
targets["dist_from_center"] = np.sqrt((targets.current_mid_x - center_screen[0])**2 + (targets.current_mid_y - center_screen[1])**2)
|
||||
|
||||
# Sort the data frame by distance from center
|
||||
targets = targets.sort_values("dist_from_center")
|
||||
|
||||
# Get the last persons mid coordinate if it exists
|
||||
if last_mid_coord:
|
||||
targets['last_mid_x'] = last_mid_coord[0]
|
||||
targets['last_mid_y'] = last_mid_coord[1]
|
||||
# Take distance between current person mid coordinate and last person mid coordinate
|
||||
targets['dist'] = np.linalg.norm(
|
||||
targets.iloc[:, [0, 1]].values - targets.iloc[:, [4, 5]], axis=1)
|
||||
targets.sort_values(by="dist", ascending=False)
|
||||
|
||||
# Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance)
|
||||
xMid = targets.iloc[0].current_mid_x
|
||||
yMid = targets.iloc[0].current_mid_y
|
||||
|
||||
box_height = targets.iloc[0].height
|
||||
if headshot_mode:
|
||||
headshot_offset = box_height * 0.38
|
||||
else:
|
||||
headshot_offset = box_height * 0.2
|
||||
|
||||
mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight]
|
||||
|
||||
# Moving the mouse
|
||||
if win32api.GetKeyState(0x14):
|
||||
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, int(
|
||||
mouseMove[0] * aaMovementAmp), int(mouseMove[1] * aaMovementAmp), 0, 0)
|
||||
last_mid_coord = [xMid, yMid]
|
||||
|
||||
else:
|
||||
last_mid_coord = None
|
||||
|
||||
# See what the bot sees
|
||||
if visuals:
|
||||
# Loops over every item identified and draws a bounding box
|
||||
for i in range(0, len(targets)):
|
||||
halfW = round(targets["width"][i] / 2)
|
||||
halfH = round(targets["height"][i] / 2)
|
||||
midX = targets['current_mid_x'][i]
|
||||
midY = targets['current_mid_y'][i]
|
||||
(startX, startY, endX, endY) = int(
|
||||
midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH)
|
||||
|
||||
idx = 0
|
||||
|
||||
# draw the bounding box and label on the frame
|
||||
label = "{}: {:.2f}%".format(
|
||||
"Human", targets["confidence"][i] * 100)
|
||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||
COLORS[idx], 2)
|
||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||
cv2.putText(npImg, label, (startX, y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||
|
||||
# Forced garbage cleanup every second
|
||||
count += 1
|
||||
if (time.time() - sTime) > 1:
|
||||
if cpsDisplay:
|
||||
print("CPS: {}".format(count))
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Uncomment if you keep running into memory issues
|
||||
# gc.collect(generation=0)
|
||||
|
||||
# See visually what the Aimbot sees
|
||||
if visuals:
|
||||
cv2.imshow('Live Feed', npImg)
|
||||
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
exit()
|
||||
camera.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exception(e)
|
||||
print("ERROR: " + str(e))
|
||||
print("Ask @Wonder for help in our Discord in the #ai-aimbot channel ONLY: https://discord.gg/rootkitorg")
|
203
customScripts/Villageslayer/main_onnx.py
Normal file
203
customScripts/Villageslayer/main_onnx.py
Normal file
@ -0,0 +1,203 @@
|
||||
import onnxruntime as ort
|
||||
import numpy as np
|
||||
import gc
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
import win32api
|
||||
import win32con
|
||||
import pandas as pd
|
||||
from utils.general import (cv2, non_max_suppression, xyxy2xywh)
|
||||
from mouse_driver.MouseMove import mouse_move as ghub_move
|
||||
import torch
|
||||
|
||||
# Could be do with
|
||||
# from config import *
|
||||
# But we are writing it out for clarity for new devs
|
||||
from config import aaMovementAmp, useMask, maskHeight, maskWidth, aaQuitKey, confidence, headshot_mode, cpsDisplay, visuals, onnxChoice, centerOfScreen
|
||||
import gameSelection
|
||||
|
||||
def main():
|
||||
# External Function for running the game selection menu (gameSelection.py)
|
||||
camera, cWidth, cHeight = gameSelection.gameSelection()
|
||||
|
||||
# Used for forcing garbage collection
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Choosing the correct ONNX Provider based on config.py
|
||||
onnxProvider = ""
|
||||
if onnxChoice == 1:
|
||||
onnxProvider = "CPUExecutionProvider"
|
||||
elif onnxChoice == 2:
|
||||
onnxProvider = "DmlExecutionProvider"
|
||||
elif onnxChoice == 3:
|
||||
import cupy as cp
|
||||
onnxProvider = "CUDAExecutionProvider"
|
||||
|
||||
so = ort.SessionOptions()
|
||||
so.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
ort_sess = ort.InferenceSession('RRRR.onnx', sess_options=so, providers=[
|
||||
onnxProvider])
|
||||
|
||||
# Used for colors drawn on bounding boxes
|
||||
COLORS = np.random.uniform(0, 255, size=(1500, 3))
|
||||
|
||||
# Main loop Quit if Q is pressed
|
||||
last_mid_coord = None
|
||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||
|
||||
# Getting Frame
|
||||
npImg = np.array(camera.get_latest_frame())
|
||||
|
||||
from config import maskSide # "temporary" workaround for bad syntax
|
||||
if useMask:
|
||||
maskSide = maskSide.lower()
|
||||
if maskSide == "right":
|
||||
npImg[-maskHeight:, -maskWidth:, :] = 0
|
||||
elif maskSide == "left":
|
||||
npImg[-maskHeight:, :maskWidth, :] = 0
|
||||
else:
|
||||
raise Exception('ERROR: Invalid maskSide! Please use "left" or "right"')
|
||||
|
||||
# If Nvidia, do this
|
||||
if onnxChoice == 3:
|
||||
# Normalizing Data
|
||||
im = torch.from_numpy(npImg).to('cuda')
|
||||
if im.shape[2] == 4:
|
||||
# If the image has an alpha channel, remove it
|
||||
im = im[:, :, :3,]
|
||||
|
||||
im = torch.movedim(im, 2, 0)
|
||||
im = im.half()
|
||||
im /= 255
|
||||
if len(im.shape) == 3:
|
||||
im = im[None]
|
||||
# If AMD or CPU, do this
|
||||
else:
|
||||
# Normalizing Data
|
||||
im = np.array([npImg])
|
||||
if im.shape[3] == 4:
|
||||
# If the image has an alpha channel, remove it
|
||||
im = im[:, :, :, :3]
|
||||
im = im / 255
|
||||
im = im.astype(np.half)
|
||||
im = np.moveaxis(im, 3, 1)
|
||||
|
||||
# If Nvidia, do this
|
||||
if onnxChoice == 3:
|
||||
outputs = ort_sess.run(None, {'images': cp.asnumpy(im)})
|
||||
# If AMD or CPU, do this
|
||||
else:
|
||||
outputs = ort_sess.run(None, {'images': np.array(im)})
|
||||
|
||||
im = torch.from_numpy(outputs[0]).to('cpu')
|
||||
|
||||
pred = non_max_suppression(
|
||||
im, confidence, confidence, 0, False, max_det=10)
|
||||
|
||||
targets = []
|
||||
for i, det in enumerate(pred):
|
||||
s = ""
|
||||
gn = torch.tensor(im.shape)[[0, 0, 0, 0]]
|
||||
if len(det):
|
||||
for c in det[:, -1].unique():
|
||||
n = (det[:, -1] == c).sum() # detections per class
|
||||
s += f"{n} {int(c)}, " # add to string
|
||||
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||
|
||||
targets = pd.DataFrame(
|
||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||
|
||||
center_screen = [cWidth, cHeight]
|
||||
|
||||
# If there are people in the center bounding box
|
||||
if len(targets) > 0:
|
||||
if (centerOfScreen):
|
||||
# Compute the distance from the center
|
||||
targets["dist_from_center"] = np.sqrt((targets.current_mid_x - center_screen[0])**2 + (targets.current_mid_y - center_screen[1])**2)
|
||||
|
||||
# Sort the data frame by distance from center
|
||||
targets = targets.sort_values("dist_from_center")
|
||||
|
||||
# Get the last persons mid coordinate if it exists
|
||||
if last_mid_coord:
|
||||
targets['last_mid_x'] = last_mid_coord[0]
|
||||
targets['last_mid_y'] = last_mid_coord[1]
|
||||
# Take distance between current person mid coordinate and last person mid coordinate
|
||||
targets['dist'] = np.linalg.norm(
|
||||
targets.iloc[:, [0, 1]].values - targets.iloc[:, [4, 5]], axis=1)
|
||||
targets.sort_values(by="dist", ascending=False)
|
||||
|
||||
# Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance)
|
||||
xMid = targets.iloc[0].current_mid_x
|
||||
yMid = targets.iloc[0].current_mid_y
|
||||
|
||||
box_height = targets.iloc[0].height
|
||||
if headshot_mode:
|
||||
headshot_offset = box_height * 0.38
|
||||
else:
|
||||
headshot_offset = box_height * 0.2
|
||||
|
||||
mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight]
|
||||
# Moving the mouse
|
||||
#imagine recalculating everything to find out you have a drop in replacement
|
||||
if win32api.GetKeyState(0x02) < 0:
|
||||
ghub_move(mouseMove[0],mouseMove[1])
|
||||
last_mid_coord = [xMid, yMid]
|
||||
|
||||
else:
|
||||
last_mid_coord = None
|
||||
|
||||
# See what the bot sees
|
||||
if visuals:
|
||||
# Loops over every item identified and draws a bounding box
|
||||
for i in range(0, len(targets)):
|
||||
halfW = round(targets["width"][i] / 2)
|
||||
halfH = round(targets["height"][i] / 2)
|
||||
midX = targets['current_mid_x'][i]
|
||||
midY = targets['current_mid_y'][i]
|
||||
(startX, startY, endX, endY) = int(midX + halfW), int(midY +
|
||||
halfH), int(midX - halfW), int(midY - halfH)
|
||||
|
||||
idx = 0
|
||||
# draw the bounding box and label on the frame
|
||||
label = "{}: {:.2f}%".format(
|
||||
"Character", targets["confidence"][i] * 100)
|
||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||
COLORS[idx], 2)
|
||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||
cv2.putText(npImg, label, (startX, y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||
|
||||
|
||||
# Forced garbage cleanup every second
|
||||
count += 1
|
||||
if (time.time() - sTime) > 1:
|
||||
if cpsDisplay:
|
||||
print("CPS: {}".format(count))
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Uncomment if you keep running into memory issues
|
||||
# gc.collect(generation=0)
|
||||
|
||||
# See visually what the Aimbot sees
|
||||
if visuals:
|
||||
cv2.imshow('Live Feed', npImg)
|
||||
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
exit()
|
||||
camera.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exception(e)
|
||||
print("ERROR: " + str(e))
|
||||
print("Ask @Wonder for help in our Discord in the #ai-aimbot channel ONLY: https://discord.gg/rootkitorg")
|
172
customScripts/Villageslayer/main_tensorrt.py
Normal file
172
customScripts/Villageslayer/main_tensorrt.py
Normal file
@ -0,0 +1,172 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
import win32api
|
||||
import win32con
|
||||
import pandas as pd
|
||||
from utils.general import (cv2, non_max_suppression, xyxy2xywh)
|
||||
from models.common import DetectMultiBackend
|
||||
from mouse_driver.MouseMove import mouse_move as ghub_move
|
||||
import cupy as cp
|
||||
|
||||
# Could be do with
|
||||
# from config import *
|
||||
# But we are writing it out for clarity for new devs
|
||||
from config import aaMovementAmp, useMask, maskHeight, maskWidth, aaQuitKey, confidence, headshot_mode, cpsDisplay, visuals, centerOfScreen, screenShotWidth
|
||||
import gameSelection
|
||||
|
||||
def main():
|
||||
# External Function for running the game selection menu (gameSelection.py)
|
||||
camera, cWidth, cHeight = gameSelection.gameSelection()
|
||||
|
||||
# Used for forcing garbage collection
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Loading Yolo5 Small AI Model
|
||||
model = DetectMultiBackend('RRRR320half.engine', device=torch.device(
|
||||
'cuda'), dnn=False, data='', fp16=True)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
|
||||
# Used for colors drawn on bounding boxes
|
||||
COLORS = np.random.uniform(0, 255, size=(1500, 3))
|
||||
|
||||
# Main loop Quit if exit key is pressed
|
||||
last_mid_coord = None
|
||||
with torch.no_grad():
|
||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||
|
||||
npImg = cp.array([camera.get_latest_frame()])
|
||||
if npImg.shape[3] == 4:
|
||||
# If the image has an alpha channel, remove it
|
||||
npImg = npImg[:, :, :, :3]
|
||||
|
||||
from config import maskSide # "temporary" workaround for bad syntax
|
||||
if useMask:
|
||||
maskSide = maskSide.lower()
|
||||
if maskSide == "right":
|
||||
npImg[:, -maskHeight:, -maskWidth:, :] = 0
|
||||
elif maskSide == "left":
|
||||
npImg[:, -maskHeight:, :maskWidth, :] = 0
|
||||
else:
|
||||
raise Exception('ERROR: Invalid maskSide! Please use "left" or "right"')
|
||||
|
||||
im = npImg / 255
|
||||
im = im.astype(cp.half)
|
||||
|
||||
im = cp.moveaxis(im, 3, 1)
|
||||
im = torch.from_numpy(cp.asnumpy(im)).to('cuda')
|
||||
|
||||
# Detecting all the objects
|
||||
results = model(im)
|
||||
|
||||
pred = non_max_suppression(
|
||||
results, confidence, confidence, 0, False, max_det=2)
|
||||
|
||||
targets = []
|
||||
for i, det in enumerate(pred):
|
||||
s = ""
|
||||
gn = torch.tensor(im.shape)[[0, 0, 0, 0]]
|
||||
if len(det):
|
||||
for c in det[:, -1].unique():
|
||||
n = (det[:, -1] == c).sum() # detections per class
|
||||
s += f"{n} {names[int(c)]}, " # add to string
|
||||
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||
|
||||
targets = pd.DataFrame(
|
||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||
|
||||
center_screen = [cWidth, cHeight]
|
||||
|
||||
# If there are people in the center bounding box
|
||||
if len(targets) > 0:
|
||||
if (centerOfScreen):
|
||||
# Compute the distance from the center
|
||||
targets["dist_from_center"] = np.sqrt((targets.current_mid_x - center_screen[0])**2 + (targets.current_mid_y - center_screen[1])**2)
|
||||
|
||||
# Sort the data frame by distance from center
|
||||
targets = targets.sort_values("dist_from_center")
|
||||
|
||||
# Get the last persons mid coordinate if it exists
|
||||
if last_mid_coord:
|
||||
targets['last_mid_x'] = last_mid_coord[0]
|
||||
targets['last_mid_y'] = last_mid_coord[1]
|
||||
# Take distance between current person mid coordinate and last person mid coordinate
|
||||
targets['dist'] = np.linalg.norm(
|
||||
targets.iloc[:, [0, 1]].values - targets.iloc[:, [4, 5]], axis=1)
|
||||
targets.sort_values(by="dist", ascending=False)
|
||||
|
||||
# Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance)
|
||||
xMid = targets.iloc[0].current_mid_x
|
||||
yMid = targets.iloc[0].current_mid_y
|
||||
|
||||
box_height = targets.iloc[0].height
|
||||
if headshot_mode:
|
||||
headshot_offset = box_height * 0.38
|
||||
else:
|
||||
headshot_offset = box_height * 0.2
|
||||
|
||||
mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight]
|
||||
|
||||
if win32api.GetKeyState(0x91):# Moving the mouse
|
||||
if win32api.GetKeyState(0x02) < 0 or win32api.GetKeyState(0x01) < 0:
|
||||
ghub_move(mouseMove[0],mouseMove[1])
|
||||
time.sleep(0.01)
|
||||
last_mid_coord = [xMid, yMid]
|
||||
|
||||
else:
|
||||
last_mid_coord = None
|
||||
|
||||
# See what the bot sees
|
||||
if visuals:
|
||||
npImg = cp.asnumpy(npImg[0])
|
||||
# Loops over every item identified and draws a bounding box
|
||||
for i in range(0, len(targets)):
|
||||
halfW = round(targets["width"][i] / 2)
|
||||
halfH = round(targets["height"][i] / 2)
|
||||
midX = targets['current_mid_x'][i]
|
||||
midY = targets['current_mid_y'][i]
|
||||
(startX, startY, endX, endY) = int(
|
||||
midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH)
|
||||
|
||||
idx = 0
|
||||
# draw the bounding box and label on the frame
|
||||
label = "{}: {:.2f}%".format(
|
||||
"Character", targets["confidence"][i] * 100)
|
||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||
COLORS[idx], 2)
|
||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||
cv2.putText(npImg, label, (startX, y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||
|
||||
# Forced garbage cleanup every second
|
||||
count += 1
|
||||
if (time.time() - sTime) > 1:
|
||||
if cpsDisplay:
|
||||
print("CPS: {}".format(count))
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Uncomment if you keep running into memory issues
|
||||
# gc.collect(generation=0)
|
||||
|
||||
# See visually what the Aimbot sees
|
||||
if visuals:
|
||||
cv2.imshow('Live Feed', npImg)
|
||||
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
exit()
|
||||
camera.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exception(e)
|
||||
print("ERROR: " + str(e))
|
||||
print("Ask @Wonder for help in our Discord in the #ai-aimbot channel ONLY: https://discord.gg/rootkitorg")
|
5
customScripts/Villageslayer/readme.md
Normal file
5
customScripts/Villageslayer/readme.md
Normal file
@ -0,0 +1,5 @@
|
||||
# Explain your model
|
||||
|
||||
switched aimkey to RMB
|
||||
|
||||
added scrollock as a toggle key
|
180
customScripts/main.py
Normal file
180
customScripts/main.py
Normal file
@ -0,0 +1,180 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
import win32api
|
||||
import win32con
|
||||
import pandas as pd
|
||||
import gc
|
||||
from utils.general import (cv2, non_max_suppression, xyxy2xywh)
|
||||
|
||||
# Could be do with
|
||||
# from config import *
|
||||
# But we are writing it out for clarity for new devs
|
||||
from config import aaMovementAmp, useMask, maskWidth, maskHeight, aaQuitKey, screenShotHeight, confidence, headshot_mode, cpsDisplay, visuals, centerOfScreen
|
||||
import gameSelection
|
||||
|
||||
def main():
|
||||
# External Function for running the game selection menu (gameSelection.py)
|
||||
camera, cWidth, cHeight = gameSelection.gameSelection()
|
||||
|
||||
# Used for forcing garbage collection
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Loading Yolo5 Small AI Model, for better results use yolov5m or yolov5l
|
||||
model = torch.hub.load('ultralytics/yolov5', 'yolov5s',
|
||||
pretrained=True, force_reload=True)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
|
||||
if torch.cuda.is_available():
|
||||
model.half()
|
||||
|
||||
# Used for colors drawn on bounding boxes
|
||||
COLORS = np.random.uniform(0, 255, size=(1500, 3))
|
||||
|
||||
# Main loop Quit if Q is pressed
|
||||
last_mid_coord = None
|
||||
with torch.no_grad():
|
||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||
|
||||
# Getting Frame
|
||||
npImg = np.array(camera.get_latest_frame())
|
||||
|
||||
from config import maskSide # "temporary" workaround for bad syntax
|
||||
if useMask:
|
||||
maskSide = maskSide.lower()
|
||||
if maskSide == "right":
|
||||
npImg[-maskHeight:, -maskWidth:, :] = 0
|
||||
elif maskSide == "left":
|
||||
npImg[-maskHeight:, :maskWidth, :] = 0
|
||||
else:
|
||||
raise Exception('ERROR: Invalid maskSide! Please use "left" or "right"')
|
||||
|
||||
# Normalizing Data
|
||||
im = torch.from_numpy(npImg)
|
||||
if im.shape[2] == 4:
|
||||
# If the image has an alpha channel, remove it
|
||||
im = im[:, :, :3,]
|
||||
|
||||
im = torch.movedim(im, 2, 0)
|
||||
if torch.cuda.is_available():
|
||||
im = im.half()
|
||||
im /= 255
|
||||
if len(im.shape) == 3:
|
||||
im = im[None]
|
||||
|
||||
# Detecting all the objects
|
||||
results = model(im, size=screenShotHeight)
|
||||
|
||||
# Suppressing results that dont meet thresholds
|
||||
pred = non_max_suppression(
|
||||
results, confidence, confidence, 0, False, max_det=1000)
|
||||
|
||||
# Converting output to usable cords
|
||||
targets = []
|
||||
for i, det in enumerate(pred):
|
||||
s = ""
|
||||
gn = torch.tensor(im.shape)[[0, 0, 0, 0]]
|
||||
if len(det):
|
||||
for c in det[:, -1].unique():
|
||||
n = (det[:, -1] == c).sum() # detections per class
|
||||
s += f"{n} {names[int(c)]}, " # add to string
|
||||
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||
|
||||
targets = pd.DataFrame(
|
||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||
|
||||
center_screen = [cWidth, cHeight]
|
||||
|
||||
# If there are people in the center bounding box
|
||||
if len(targets) > 0:
|
||||
if (centerOfScreen):
|
||||
# Compute the distance from the center
|
||||
targets["dist_from_center"] = np.sqrt((targets.current_mid_x - center_screen[0])**2 + (targets.current_mid_y - center_screen[1])**2)
|
||||
|
||||
# Sort the data frame by distance from center
|
||||
targets = targets.sort_values("dist_from_center")
|
||||
|
||||
# Get the last persons mid coordinate if it exists
|
||||
if last_mid_coord:
|
||||
targets['last_mid_x'] = last_mid_coord[0]
|
||||
targets['last_mid_y'] = last_mid_coord[1]
|
||||
# Take distance between current person mid coordinate and last person mid coordinate
|
||||
targets['dist'] = np.linalg.norm(
|
||||
targets.iloc[:, [0, 1]].values - targets.iloc[:, [4, 5]], axis=1)
|
||||
targets.sort_values(by="dist", ascending=False)
|
||||
|
||||
# Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance)
|
||||
xMid = targets.iloc[0].current_mid_x
|
||||
yMid = targets.iloc[0].current_mid_y
|
||||
|
||||
box_height = targets.iloc[0].height
|
||||
if headshot_mode:
|
||||
headshot_offset = box_height * 0.38
|
||||
else:
|
||||
headshot_offset = box_height * 0.2
|
||||
|
||||
mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight]
|
||||
|
||||
# Moving the mouse
|
||||
if win32api.GetKeyState(0x14):
|
||||
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, int(
|
||||
mouseMove[0] * aaMovementAmp), int(mouseMove[1] * aaMovementAmp), 0, 0)
|
||||
last_mid_coord = [xMid, yMid]
|
||||
|
||||
else:
|
||||
last_mid_coord = None
|
||||
|
||||
# See what the bot sees
|
||||
if visuals:
|
||||
# Loops over every item identified and draws a bounding box
|
||||
for i in range(0, len(targets)):
|
||||
halfW = round(targets["width"][i] / 2)
|
||||
halfH = round(targets["height"][i] / 2)
|
||||
midX = targets['current_mid_x'][i]
|
||||
midY = targets['current_mid_y'][i]
|
||||
(startX, startY, endX, endY) = int(
|
||||
midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH)
|
||||
|
||||
idx = 0
|
||||
|
||||
# draw the bounding box and label on the frame
|
||||
label = "{}: {:.2f}%".format(
|
||||
"Human", targets["confidence"][i] * 100)
|
||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||
COLORS[idx], 2)
|
||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||
cv2.putText(npImg, label, (startX, y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||
|
||||
# Forced garbage cleanup every second
|
||||
count += 1
|
||||
if (time.time() - sTime) > 1:
|
||||
if cpsDisplay:
|
||||
print("CPS: {}".format(count))
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Uncomment if you keep running into memory issues
|
||||
# gc.collect(generation=0)
|
||||
|
||||
# See visually what the Aimbot sees
|
||||
if visuals:
|
||||
cv2.imshow('Live Feed', npImg)
|
||||
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
exit()
|
||||
camera.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exception(e)
|
||||
print("ERROR: " + str(e))
|
||||
print("Ask @Wonder for help in our Discord in the #ai-aimbot channel ONLY: https://discord.gg/rootkitorg")
|
203
customScripts/main_onnx.py
Normal file
203
customScripts/main_onnx.py
Normal file
@ -0,0 +1,203 @@
|
||||
import onnxruntime as ort
|
||||
import numpy as np
|
||||
import gc
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
import win32api
|
||||
import win32con
|
||||
import pandas as pd
|
||||
from utils.general import (cv2, non_max_suppression, xyxy2xywh)
|
||||
from mouse_driver.MouseMove import mouse_move as ghub_move
|
||||
import torch
|
||||
|
||||
# Could be do with
|
||||
# from config import *
|
||||
# But we are writing it out for clarity for new devs
|
||||
from config import aaMovementAmp, useMask, maskHeight, maskWidth, aaQuitKey, confidence, headshot_mode, cpsDisplay, visuals, onnxChoice, centerOfScreen
|
||||
import gameSelection
|
||||
|
||||
def main():
|
||||
# External Function for running the game selection menu (gameSelection.py)
|
||||
camera, cWidth, cHeight = gameSelection.gameSelection()
|
||||
|
||||
# Used for forcing garbage collection
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Choosing the correct ONNX Provider based on config.py
|
||||
onnxProvider = ""
|
||||
if onnxChoice == 1:
|
||||
onnxProvider = "CPUExecutionProvider"
|
||||
elif onnxChoice == 2:
|
||||
onnxProvider = "DmlExecutionProvider"
|
||||
elif onnxChoice == 3:
|
||||
import cupy as cp
|
||||
onnxProvider = "CUDAExecutionProvider"
|
||||
|
||||
so = ort.SessionOptions()
|
||||
so.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
ort_sess = ort.InferenceSession('RRRR.onnx', sess_options=so, providers=[
|
||||
onnxProvider])
|
||||
|
||||
# Used for colors drawn on bounding boxes
|
||||
COLORS = np.random.uniform(0, 255, size=(1500, 3))
|
||||
|
||||
# Main loop Quit if Q is pressed
|
||||
last_mid_coord = None
|
||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||
|
||||
# Getting Frame
|
||||
npImg = np.array(camera.get_latest_frame())
|
||||
|
||||
from config import maskSide # "temporary" workaround for bad syntax
|
||||
if useMask:
|
||||
maskSide = maskSide.lower()
|
||||
if maskSide == "right":
|
||||
npImg[-maskHeight:, -maskWidth:, :] = 0
|
||||
elif maskSide == "left":
|
||||
npImg[-maskHeight:, :maskWidth, :] = 0
|
||||
else:
|
||||
raise Exception('ERROR: Invalid maskSide! Please use "left" or "right"')
|
||||
|
||||
# If Nvidia, do this
|
||||
if onnxChoice == 3:
|
||||
# Normalizing Data
|
||||
im = torch.from_numpy(npImg).to('cuda')
|
||||
if im.shape[2] == 4:
|
||||
# If the image has an alpha channel, remove it
|
||||
im = im[:, :, :3,]
|
||||
|
||||
im = torch.movedim(im, 2, 0)
|
||||
im = im.half()
|
||||
im /= 255
|
||||
if len(im.shape) == 3:
|
||||
im = im[None]
|
||||
# If AMD or CPU, do this
|
||||
else:
|
||||
# Normalizing Data
|
||||
im = np.array([npImg])
|
||||
if im.shape[3] == 4:
|
||||
# If the image has an alpha channel, remove it
|
||||
im = im[:, :, :, :3]
|
||||
im = im / 255
|
||||
im = im.astype(np.half)
|
||||
im = np.moveaxis(im, 3, 1)
|
||||
|
||||
# If Nvidia, do this
|
||||
if onnxChoice == 3:
|
||||
outputs = ort_sess.run(None, {'images': cp.asnumpy(im)})
|
||||
# If AMD or CPU, do this
|
||||
else:
|
||||
outputs = ort_sess.run(None, {'images': np.array(im)})
|
||||
|
||||
im = torch.from_numpy(outputs[0]).to('cpu')
|
||||
|
||||
pred = non_max_suppression(
|
||||
im, confidence, confidence, 0, False, max_det=10)
|
||||
|
||||
targets = []
|
||||
for i, det in enumerate(pred):
|
||||
s = ""
|
||||
gn = torch.tensor(im.shape)[[0, 0, 0, 0]]
|
||||
if len(det):
|
||||
for c in det[:, -1].unique():
|
||||
n = (det[:, -1] == c).sum() # detections per class
|
||||
s += f"{n} {int(c)}, " # add to string
|
||||
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||
|
||||
targets = pd.DataFrame(
|
||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||
|
||||
center_screen = [cWidth, cHeight]
|
||||
|
||||
# If there are people in the center bounding box
|
||||
if len(targets) > 0:
|
||||
if (centerOfScreen):
|
||||
# Compute the distance from the center
|
||||
targets["dist_from_center"] = np.sqrt((targets.current_mid_x - center_screen[0])**2 + (targets.current_mid_y - center_screen[1])**2)
|
||||
|
||||
# Sort the data frame by distance from center
|
||||
targets = targets.sort_values("dist_from_center")
|
||||
|
||||
# Get the last persons mid coordinate if it exists
|
||||
if last_mid_coord:
|
||||
targets['last_mid_x'] = last_mid_coord[0]
|
||||
targets['last_mid_y'] = last_mid_coord[1]
|
||||
# Take distance between current person mid coordinate and last person mid coordinate
|
||||
targets['dist'] = np.linalg.norm(
|
||||
targets.iloc[:, [0, 1]].values - targets.iloc[:, [4, 5]], axis=1)
|
||||
targets.sort_values(by="dist", ascending=False)
|
||||
|
||||
# Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance)
|
||||
xMid = targets.iloc[0].current_mid_x
|
||||
yMid = targets.iloc[0].current_mid_y
|
||||
|
||||
box_height = targets.iloc[0].height
|
||||
if headshot_mode:
|
||||
headshot_offset = box_height * 0.38
|
||||
else:
|
||||
headshot_offset = box_height * 0.2
|
||||
|
||||
mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight]
|
||||
# Moving the mouse
|
||||
#imagine recalculating everything to find out you have a drop in replacement
|
||||
if win32api.GetKeyState(0x02) < 0:
|
||||
ghub_move(mouseMove[0],mouseMove[1])
|
||||
last_mid_coord = [xMid, yMid]
|
||||
|
||||
else:
|
||||
last_mid_coord = None
|
||||
|
||||
# See what the bot sees
|
||||
if visuals:
|
||||
# Loops over every item identified and draws a bounding box
|
||||
for i in range(0, len(targets)):
|
||||
halfW = round(targets["width"][i] / 2)
|
||||
halfH = round(targets["height"][i] / 2)
|
||||
midX = targets['current_mid_x'][i]
|
||||
midY = targets['current_mid_y'][i]
|
||||
(startX, startY, endX, endY) = int(midX + halfW), int(midY +
|
||||
halfH), int(midX - halfW), int(midY - halfH)
|
||||
|
||||
idx = 0
|
||||
# draw the bounding box and label on the frame
|
||||
label = "{}: {:.2f}%".format(
|
||||
"Character", targets["confidence"][i] * 100)
|
||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||
COLORS[idx], 2)
|
||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||
cv2.putText(npImg, label, (startX, y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||
|
||||
|
||||
# Forced garbage cleanup every second
|
||||
count += 1
|
||||
if (time.time() - sTime) > 1:
|
||||
if cpsDisplay:
|
||||
print("CPS: {}".format(count))
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Uncomment if you keep running into memory issues
|
||||
# gc.collect(generation=0)
|
||||
|
||||
# See visually what the Aimbot sees
|
||||
if visuals:
|
||||
cv2.imshow('Live Feed', npImg)
|
||||
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
exit()
|
||||
camera.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exception(e)
|
||||
print("ERROR: " + str(e))
|
||||
print("Ask @Wonder for help in our Discord in the #ai-aimbot channel ONLY: https://discord.gg/rootkitorg")
|
172
customScripts/main_tensorrt.py
Normal file
172
customScripts/main_tensorrt.py
Normal file
@ -0,0 +1,172 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
import win32api
|
||||
import win32con
|
||||
import pandas as pd
|
||||
from utils.general import (cv2, non_max_suppression, xyxy2xywh)
|
||||
from models.common import DetectMultiBackend
|
||||
from mouse_driver.MouseMove import mouse_move as ghub_move
|
||||
import cupy as cp
|
||||
|
||||
# Could be do with
|
||||
# from config import *
|
||||
# But we are writing it out for clarity for new devs
|
||||
from config import aaMovementAmp, useMask, maskHeight, maskWidth, aaQuitKey, confidence, headshot_mode, cpsDisplay, visuals, centerOfScreen, screenShotWidth
|
||||
import gameSelection
|
||||
|
||||
def main():
|
||||
# External Function for running the game selection menu (gameSelection.py)
|
||||
camera, cWidth, cHeight = gameSelection.gameSelection()
|
||||
|
||||
# Used for forcing garbage collection
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Loading Yolo5 Small AI Model
|
||||
model = DetectMultiBackend('RRRR320half.engine', device=torch.device(
|
||||
'cuda'), dnn=False, data='', fp16=True)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
|
||||
# Used for colors drawn on bounding boxes
|
||||
COLORS = np.random.uniform(0, 255, size=(1500, 3))
|
||||
|
||||
# Main loop Quit if exit key is pressed
|
||||
last_mid_coord = None
|
||||
with torch.no_grad():
|
||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||
|
||||
npImg = cp.array([camera.get_latest_frame()])
|
||||
if npImg.shape[3] == 4:
|
||||
# If the image has an alpha channel, remove it
|
||||
npImg = npImg[:, :, :, :3]
|
||||
|
||||
from config import maskSide # "temporary" workaround for bad syntax
|
||||
if useMask:
|
||||
maskSide = maskSide.lower()
|
||||
if maskSide == "right":
|
||||
npImg[:, -maskHeight:, -maskWidth:, :] = 0
|
||||
elif maskSide == "left":
|
||||
npImg[:, -maskHeight:, :maskWidth, :] = 0
|
||||
else:
|
||||
raise Exception('ERROR: Invalid maskSide! Please use "left" or "right"')
|
||||
|
||||
im = npImg / 255
|
||||
im = im.astype(cp.half)
|
||||
|
||||
im = cp.moveaxis(im, 3, 1)
|
||||
im = torch.from_numpy(cp.asnumpy(im)).to('cuda')
|
||||
|
||||
# Detecting all the objects
|
||||
results = model(im)
|
||||
|
||||
pred = non_max_suppression(
|
||||
results, confidence, confidence, 0, False, max_det=2)
|
||||
|
||||
targets = []
|
||||
for i, det in enumerate(pred):
|
||||
s = ""
|
||||
gn = torch.tensor(im.shape)[[0, 0, 0, 0]]
|
||||
if len(det):
|
||||
for c in det[:, -1].unique():
|
||||
n = (det[:, -1] == c).sum() # detections per class
|
||||
s += f"{n} {names[int(c)]}, " # add to string
|
||||
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||
|
||||
targets = pd.DataFrame(
|
||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||
|
||||
center_screen = [cWidth, cHeight]
|
||||
|
||||
# If there are people in the center bounding box
|
||||
if len(targets) > 0:
|
||||
if (centerOfScreen):
|
||||
# Compute the distance from the center
|
||||
targets["dist_from_center"] = np.sqrt((targets.current_mid_x - center_screen[0])**2 + (targets.current_mid_y - center_screen[1])**2)
|
||||
|
||||
# Sort the data frame by distance from center
|
||||
targets = targets.sort_values("dist_from_center")
|
||||
|
||||
# Get the last persons mid coordinate if it exists
|
||||
if last_mid_coord:
|
||||
targets['last_mid_x'] = last_mid_coord[0]
|
||||
targets['last_mid_y'] = last_mid_coord[1]
|
||||
# Take distance between current person mid coordinate and last person mid coordinate
|
||||
targets['dist'] = np.linalg.norm(
|
||||
targets.iloc[:, [0, 1]].values - targets.iloc[:, [4, 5]], axis=1)
|
||||
targets.sort_values(by="dist", ascending=False)
|
||||
|
||||
# Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance)
|
||||
xMid = targets.iloc[0].current_mid_x
|
||||
yMid = targets.iloc[0].current_mid_y
|
||||
|
||||
box_height = targets.iloc[0].height
|
||||
if headshot_mode:
|
||||
headshot_offset = box_height * 0.38
|
||||
else:
|
||||
headshot_offset = box_height * 0.2
|
||||
|
||||
mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight]
|
||||
|
||||
if win32api.GetKeyState(0x91):# Moving the mouse
|
||||
if win32api.GetKeyState(0x02) < 0 or win32api.GetKeyState(0x01) < 0:
|
||||
ghub_move(mouseMove[0],mouseMove[1])
|
||||
time.sleep(0.01)
|
||||
last_mid_coord = [xMid, yMid]
|
||||
|
||||
else:
|
||||
last_mid_coord = None
|
||||
|
||||
# See what the bot sees
|
||||
if visuals:
|
||||
npImg = cp.asnumpy(npImg[0])
|
||||
# Loops over every item identified and draws a bounding box
|
||||
for i in range(0, len(targets)):
|
||||
halfW = round(targets["width"][i] / 2)
|
||||
halfH = round(targets["height"][i] / 2)
|
||||
midX = targets['current_mid_x'][i]
|
||||
midY = targets['current_mid_y'][i]
|
||||
(startX, startY, endX, endY) = int(
|
||||
midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH)
|
||||
|
||||
idx = 0
|
||||
# draw the bounding box and label on the frame
|
||||
label = "{}: {:.2f}%".format(
|
||||
"Character", targets["confidence"][i] * 100)
|
||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||
COLORS[idx], 2)
|
||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||
cv2.putText(npImg, label, (startX, y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||
|
||||
# Forced garbage cleanup every second
|
||||
count += 1
|
||||
if (time.time() - sTime) > 1:
|
||||
if cpsDisplay:
|
||||
print("CPS: {}".format(count))
|
||||
count = 0
|
||||
sTime = time.time()
|
||||
|
||||
# Uncomment if you keep running into memory issues
|
||||
# gc.collect(generation=0)
|
||||
|
||||
# See visually what the Aimbot sees
|
||||
if visuals:
|
||||
cv2.imshow('Live Feed', npImg)
|
||||
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||
exit()
|
||||
camera.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exception(e)
|
||||
print("ERROR: " + str(e))
|
||||
print("Ask @Wonder for help in our Discord in the #ai-aimbot channel ONLY: https://discord.gg/rootkitorg")
|
Loading…
x
Reference in New Issue
Block a user