mirror of
https://github.com/RootKit-Org/AI-Aimbot.git
synced 2025-06-21 02:41:01 +08:00
Bug fix: confidence works and displays correctly
This commit is contained in:
parent
bc9af95607
commit
4a7853645d
14
main.py
14
main.py
@ -28,7 +28,7 @@ def main():
|
|||||||
aaMovementAmp = .8
|
aaMovementAmp = .8
|
||||||
|
|
||||||
# Person Class Confidence
|
# Person Class Confidence
|
||||||
confidence = 0.5
|
confidence = 0.4
|
||||||
|
|
||||||
# What key to press to quit and shutdown the autoaim
|
# What key to press to quit and shutdown the autoaim
|
||||||
aaQuitKey = "Q"
|
aaQuitKey = "Q"
|
||||||
@ -113,7 +113,7 @@ def main():
|
|||||||
npImg = np.array(camera.get_latest_frame())
|
npImg = np.array(camera.get_latest_frame())
|
||||||
|
|
||||||
# Normalizing Data
|
# Normalizing Data
|
||||||
im = torch.from_numpy(npImg).to('cuda')
|
im = torch.from_numpy(npImg)
|
||||||
im = torch.movedim(im, 2, 0)
|
im = torch.movedim(im, 2, 0)
|
||||||
im = im.half()
|
im = im.half()
|
||||||
im /= 255
|
im /= 255
|
||||||
@ -125,7 +125,7 @@ def main():
|
|||||||
|
|
||||||
# Suppressing results that dont meet thresholds
|
# Suppressing results that dont meet thresholds
|
||||||
pred = non_max_suppression(
|
pred = non_max_suppression(
|
||||||
results, 0.25, 0.25, 0, False, max_det=1000)
|
results, confidence, confidence, 0, False, max_det=1000)
|
||||||
|
|
||||||
# Converting output to usable cords
|
# Converting output to usable cords
|
||||||
targets = []
|
targets = []
|
||||||
@ -139,10 +139,10 @@ def main():
|
|||||||
|
|
||||||
for *xyxy, conf, cls in reversed(det):
|
for *xyxy, conf, cls in reversed(det):
|
||||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||||
1, 4)) / gn).view(-1).tolist()) # normalized xywh
|
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||||
|
|
||||||
targets = pd.DataFrame(
|
targets = pd.DataFrame(
|
||||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"])
|
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||||
|
|
||||||
# If there are people in the center bounding box
|
# If there are people in the center bounding box
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
@ -187,12 +187,10 @@ def main():
|
|||||||
(startX, startY, endX, endY) = int(
|
(startX, startY, endX, endY) = int(
|
||||||
midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH)
|
midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH)
|
||||||
|
|
||||||
confidence = .5
|
|
||||||
|
|
||||||
idx = 0
|
idx = 0
|
||||||
|
|
||||||
# draw the bounding box and label on the frame
|
# draw the bounding box and label on the frame
|
||||||
label = "{}: {:.2f}%".format("Human", confidence * 100)
|
label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100)
|
||||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||||
COLORS[idx], 2)
|
COLORS[idx], 2)
|
||||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||||
|
@ -27,10 +27,10 @@ def main():
|
|||||||
aaRightShift = 0
|
aaRightShift = 0
|
||||||
|
|
||||||
# Autoaim mouse movement amplifier
|
# Autoaim mouse movement amplifier
|
||||||
aaMovementAmp = 0.6
|
aaMovementAmp = .8
|
||||||
|
|
||||||
# Person Class Confidence
|
# Person Class Confidence
|
||||||
confidence = 0.35
|
confidence = 0.4
|
||||||
|
|
||||||
# What key to press to quit and shutdown the autoaim
|
# What key to press to quit and shutdown the autoaim
|
||||||
aaQuitKey = "Q"
|
aaQuitKey = "Q"
|
||||||
@ -105,9 +105,16 @@ def main():
|
|||||||
last_mid_coord = None
|
last_mid_coord = None
|
||||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||||
|
|
||||||
npImg = cp.array([camera.get_latest_frame()]) / 255
|
# Getting Frame
|
||||||
npImg = npImg.astype(cp.half)
|
npImg = np.array(camera.get_latest_frame())
|
||||||
npImg = cp.moveaxis(npImg, 3, 1)
|
|
||||||
|
# Normalizing Data
|
||||||
|
im = torch.from_numpy(npImg)
|
||||||
|
im = torch.movedim(im, 2, 0)
|
||||||
|
im = im.half()
|
||||||
|
im /= 255
|
||||||
|
if len(im.shape) == 3:
|
||||||
|
im = im[None]
|
||||||
|
|
||||||
outputs = ort_sess.run(None, {'images': npImg})
|
outputs = ort_sess.run(None, {'images': npImg})
|
||||||
|
|
||||||
@ -127,10 +134,10 @@ def main():
|
|||||||
|
|
||||||
for *xyxy, conf, cls in reversed(det):
|
for *xyxy, conf, cls in reversed(det):
|
||||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||||
1, 4)) / gn).view(-1).tolist()) # normalized xywh
|
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||||
|
|
||||||
targets = pd.DataFrame(
|
targets = pd.DataFrame(
|
||||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"])
|
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||||
|
|
||||||
# If there are people in the center bounding box
|
# If there are people in the center bounding box
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
@ -177,7 +184,7 @@ def main():
|
|||||||
|
|
||||||
idx = 0
|
idx = 0
|
||||||
# draw the bounding box and label on the frame
|
# draw the bounding box and label on the frame
|
||||||
label = "{}: {:.2f}%".format("Human", confidence * 100)
|
label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100)
|
||||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||||
COLORS[idx], 2)
|
COLORS[idx], 2)
|
||||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||||
|
@ -26,10 +26,10 @@ def main():
|
|||||||
aaRightShift = 0
|
aaRightShift = 0
|
||||||
|
|
||||||
# Autoaim mouse movement amplifier
|
# Autoaim mouse movement amplifier
|
||||||
aaMovementAmp = 0.6
|
aaMovementAmp = .8
|
||||||
|
|
||||||
# Person Class Confidence
|
# Person Class Confidence
|
||||||
confidence = 0.35
|
confidence = 0.4
|
||||||
|
|
||||||
# What key to press to quit and shutdown the autoaim
|
# What key to press to quit and shutdown the autoaim
|
||||||
aaQuitKey = "Q"
|
aaQuitKey = "Q"
|
||||||
@ -106,9 +106,16 @@ def main():
|
|||||||
last_mid_coord = None
|
last_mid_coord = None
|
||||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||||
|
|
||||||
npImg = np.array([camera.get_latest_frame()]) / 255
|
# Getting Frame
|
||||||
npImg = npImg.astype(np.half)
|
npImg = np.array(camera.get_latest_frame())
|
||||||
npImg = np.moveaxis(npImg, 3, 1)
|
|
||||||
|
# Normalizing Data
|
||||||
|
im = torch.from_numpy(npImg)
|
||||||
|
im = torch.movedim(im, 2, 0)
|
||||||
|
im = im.half()
|
||||||
|
im /= 255
|
||||||
|
if len(im.shape) == 3:
|
||||||
|
im = im[None]
|
||||||
|
|
||||||
outputs = ort_sess.run(None, {'images': npImg})
|
outputs = ort_sess.run(None, {'images': npImg})
|
||||||
|
|
||||||
@ -128,10 +135,10 @@ def main():
|
|||||||
|
|
||||||
for *xyxy, conf, cls in reversed(det):
|
for *xyxy, conf, cls in reversed(det):
|
||||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||||
1, 4)) / gn).view(-1).tolist()) # normalized xywh
|
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||||
|
|
||||||
targets = pd.DataFrame(
|
targets = pd.DataFrame(
|
||||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"])
|
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||||
|
|
||||||
# If there are people in the center bounding box
|
# If there are people in the center bounding box
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
@ -178,7 +185,7 @@ def main():
|
|||||||
|
|
||||||
idx = 0
|
idx = 0
|
||||||
# draw the bounding box and label on the frame
|
# draw the bounding box and label on the frame
|
||||||
label = "{}: {:.2f}%".format("Human", confidence * 100)
|
label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100)
|
||||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||||
COLORS[idx], 2)
|
COLORS[idx], 2)
|
||||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||||
|
@ -27,10 +27,10 @@ def main():
|
|||||||
aaRightShift = 0
|
aaRightShift = 0
|
||||||
|
|
||||||
# Autoaim mouse movement amplifier
|
# Autoaim mouse movement amplifier
|
||||||
aaMovementAmp = 0.6
|
aaMovementAmp = .8
|
||||||
|
|
||||||
# Person Class Confidence
|
# Person Class Confidence
|
||||||
confidence = 0.35
|
confidence = 0.4
|
||||||
|
|
||||||
# What key to press to quit and shutdown the autoaim
|
# What key to press to quit and shutdown the autoaim
|
||||||
aaQuitKey = "Q"
|
aaQuitKey = "Q"
|
||||||
@ -42,7 +42,7 @@ def main():
|
|||||||
cpsDisplay = True
|
cpsDisplay = True
|
||||||
|
|
||||||
# Set to True if you want to get the visuals
|
# Set to True if you want to get the visuals
|
||||||
visuals = False
|
visuals = True
|
||||||
|
|
||||||
# Selecting the correct game window
|
# Selecting the correct game window
|
||||||
try:
|
try:
|
||||||
@ -105,11 +105,18 @@ def main():
|
|||||||
last_mid_coord = None
|
last_mid_coord = None
|
||||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||||
|
|
||||||
npImg = cp.array([camera.get_latest_frame()]) / 255
|
# Getting Frame
|
||||||
npImg = npImg.astype(cp.half)
|
npImg = np.array(camera.get_latest_frame())
|
||||||
npImg = cp.moveaxis(npImg, 3, 1)
|
|
||||||
|
|
||||||
outputs = ort_sess.run(None, {'images': cp.asnumpy(npImg)})
|
# Normalizing Data
|
||||||
|
im = torch.from_numpy(npImg).to('cuda')
|
||||||
|
im = torch.movedim(im, 2, 0)
|
||||||
|
im = im.half()
|
||||||
|
im /= 255
|
||||||
|
if len(im.shape) == 3:
|
||||||
|
im = im[None]
|
||||||
|
|
||||||
|
outputs = ort_sess.run(None, {'images': cp.asnumpy(im)})
|
||||||
|
|
||||||
im = torch.from_numpy(outputs[0]).to('cpu')
|
im = torch.from_numpy(outputs[0]).to('cpu')
|
||||||
|
|
||||||
@ -119,7 +126,7 @@ def main():
|
|||||||
targets = []
|
targets = []
|
||||||
for i, det in enumerate(pred):
|
for i, det in enumerate(pred):
|
||||||
s = ""
|
s = ""
|
||||||
gn = torch.tensor(npImg.shape)[[0, 0, 0, 0]]
|
gn = torch.tensor(im.shape)[[0, 0, 0, 0]]
|
||||||
if len(det):
|
if len(det):
|
||||||
for c in det[:, -1].unique():
|
for c in det[:, -1].unique():
|
||||||
n = (det[:, -1] == c).sum() # detections per class
|
n = (det[:, -1] == c).sum() # detections per class
|
||||||
@ -127,10 +134,10 @@ def main():
|
|||||||
|
|
||||||
for *xyxy, conf, cls in reversed(det):
|
for *xyxy, conf, cls in reversed(det):
|
||||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||||
1, 4)) / gn).view(-1).tolist()) # normalized xywh
|
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||||
|
|
||||||
targets = pd.DataFrame(
|
targets = pd.DataFrame(
|
||||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"])
|
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||||
|
|
||||||
# If there are people in the center bounding box
|
# If there are people in the center bounding box
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
@ -177,7 +184,7 @@ def main():
|
|||||||
|
|
||||||
idx = 0
|
idx = 0
|
||||||
# draw the bounding box and label on the frame
|
# draw the bounding box and label on the frame
|
||||||
label = "{}: {:.2f}%".format("Human", confidence * 100)
|
label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100)
|
||||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||||
COLORS[idx], 2)
|
COLORS[idx], 2)
|
||||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
y = startY - 15 if startY - 15 > 15 else startY + 15
|
@ -94,7 +94,7 @@ def main():
|
|||||||
sTime = time.time()
|
sTime = time.time()
|
||||||
|
|
||||||
# Loading Yolo5 Small AI Model
|
# Loading Yolo5 Small AI Model
|
||||||
model = DetectMultiBackend('yolov5s.engine', device=torch.device(
|
model = DetectMultiBackend('yolov5s320Half.engine', device=torch.device(
|
||||||
'cuda'), dnn=False, data='', fp16=True)
|
'cuda'), dnn=False, data='', fp16=True)
|
||||||
stride, names, pt = model.stride, model.names, model.pt
|
stride, names, pt = model.stride, model.names, model.pt
|
||||||
|
|
||||||
@ -106,17 +106,18 @@ def main():
|
|||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
while win32api.GetAsyncKeyState(ord(aaQuitKey)) == 0:
|
||||||
|
|
||||||
npImg = cp.array([camera.get_latest_frame()]) / 255
|
npImg = cp.array([camera.get_latest_frame()])
|
||||||
npImg = npImg.astype(cp.half)
|
im = npImg / 255
|
||||||
|
im = im.astype(cp.half)
|
||||||
|
|
||||||
im = cp.moveaxis(npImg, 3, 1)
|
im = cp.moveaxis(im, 3, 1)
|
||||||
im = torch.from_numpy(cp.asnumpy(im)).to('cuda')
|
im = torch.from_numpy(cp.asnumpy(im)).to('cuda')
|
||||||
|
|
||||||
# Converting to numpy for visuals
|
# # Converting to numpy for visuals
|
||||||
im0 = im[0].permute(1, 2, 0) * 255
|
# im0 = im[0].permute(1, 2, 0) * 255
|
||||||
im0 = im0.cpu().numpy().astype(np.uint8)
|
# im0 = im0.cpu().numpy().astype(np.uint8)
|
||||||
# Image has to be in BGR for visualization
|
# # Image has to be in BGR for visualization
|
||||||
im0 = cv2.cvtColor(im0, cv2.COLOR_RGB2BGR)
|
# im0 = cv2.cvtColor(im0, cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
# Detecting all the objects
|
# Detecting all the objects
|
||||||
results = model(im)
|
results = model(im)
|
||||||
@ -135,10 +136,10 @@ def main():
|
|||||||
|
|
||||||
for *xyxy, conf, cls in reversed(det):
|
for *xyxy, conf, cls in reversed(det):
|
||||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||||
1, 4)) / gn).view(-1).tolist()) # normalized xywh
|
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||||
|
|
||||||
targets = pd.DataFrame(
|
targets = pd.DataFrame(
|
||||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"])
|
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||||
|
|
||||||
# If there are people in the center bounding box
|
# If there are people in the center bounding box
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
@ -174,6 +175,7 @@ def main():
|
|||||||
|
|
||||||
# See what the bot sees
|
# See what the bot sees
|
||||||
if visuals:
|
if visuals:
|
||||||
|
npImg = cp.asnumpy(npImg[0])
|
||||||
# Loops over every item identified and draws a bounding box
|
# Loops over every item identified and draws a bounding box
|
||||||
for i in range(0, len(targets)):
|
for i in range(0, len(targets)):
|
||||||
halfW = round(targets["width"][i] / 2)
|
halfW = round(targets["width"][i] / 2)
|
||||||
@ -185,11 +187,11 @@ def main():
|
|||||||
|
|
||||||
idx = 0
|
idx = 0
|
||||||
# draw the bounding box and label on the frame
|
# draw the bounding box and label on the frame
|
||||||
label = "{}: {:.2f}%".format("Human", confidence * 100)
|
label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100)
|
||||||
cv2.rectangle(im0, (startX, startY), (endX, endY),
|
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||||
COLORS[idx], 2)
|
COLORS[idx], 2)
|
||||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||||
cv2.putText(im0, label, (startX, y),
|
cv2.putText(npImg, label, (startX, y),
|
||||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||||
|
|
||||||
# Forced garbage cleanup every second
|
# Forced garbage cleanup every second
|
||||||
@ -205,7 +207,7 @@ def main():
|
|||||||
|
|
||||||
# See visually what the Aimbot sees
|
# See visually what the Aimbot sees
|
||||||
if visuals:
|
if visuals:
|
||||||
cv2.imshow('Live Feed', im0)
|
cv2.imshow('Live Feed', npImg)
|
||||||
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
if (cv2.waitKey(1) & 0xFF) == ord('q'):
|
||||||
exit()
|
exit()
|
||||||
camera.stop()
|
camera.stop()
|
||||||
|
@ -25,10 +25,10 @@ def main():
|
|||||||
aaRightShift = 0
|
aaRightShift = 0
|
||||||
|
|
||||||
# Autoaim mouse movement amplifier
|
# Autoaim mouse movement amplifier
|
||||||
aaMovementAmp = 1.0
|
aaMovementAmp = .8
|
||||||
|
|
||||||
# Person Class Confidence
|
# Person Class Confidence
|
||||||
confidence = 0.25
|
confidence = 0.4
|
||||||
|
|
||||||
# What key to press to quit and shutdown the autoaim
|
# What key to press to quit and shutdown the autoaim
|
||||||
aaQuitKey = "Q"
|
aaQuitKey = "Q"
|
||||||
@ -136,10 +136,10 @@ def main():
|
|||||||
|
|
||||||
for *xyxy, conf, cls in reversed(det):
|
for *xyxy, conf, cls in reversed(det):
|
||||||
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
targets.append((xyxy2xywh(torch.tensor(xyxy).view(
|
||||||
1, 4)) / gn).view(-1).tolist()) # normalized xywh
|
1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh
|
||||||
|
|
||||||
targets = pd.DataFrame(
|
targets = pd.DataFrame(
|
||||||
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height"])
|
targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"])
|
||||||
|
|
||||||
# If there are people in the center bounding box
|
# If there are people in the center bounding box
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
@ -186,7 +186,7 @@ def main():
|
|||||||
|
|
||||||
idx = 0
|
idx = 0
|
||||||
# draw the bounding box and label on the frame
|
# draw the bounding box and label on the frame
|
||||||
label = "{}: {:.2f}%".format("Human", confidence * 100)
|
label = "{}: {:.2f}%".format("Human", targets["confidence"][i] * 100)
|
||||||
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
cv2.rectangle(npImg, (startX, startY), (endX, endY),
|
||||||
COLORS[idx], 2)
|
COLORS[idx], 2)
|
||||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||||
|
@ -531,13 +531,14 @@ class LoadImagesAndLabels(Dataset):
|
|||||||
|
|
||||||
# Update labels
|
# Update labels
|
||||||
include_class = [] # filter labels to include only these classes (optional)
|
include_class = [] # filter labels to include only these classes (optional)
|
||||||
|
self.segments = list(self.segments)
|
||||||
include_class_array = np.array(include_class).reshape(1, -1)
|
include_class_array = np.array(include_class).reshape(1, -1)
|
||||||
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
|
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
|
||||||
if include_class:
|
if include_class:
|
||||||
j = (label[:, 0:1] == include_class_array).any(1)
|
j = (label[:, 0:1] == include_class_array).any(1)
|
||||||
self.labels[i] = label[j]
|
self.labels[i] = label[j]
|
||||||
if segment:
|
if segment:
|
||||||
self.segments[i] = segment[j]
|
self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem]
|
||||||
if single_cls: # single-class training, merge all classes into 0
|
if single_cls: # single-class training, merge all classes into 0
|
||||||
self.labels[i][:, 0] = 0
|
self.labels[i][:, 0] = 0
|
||||||
|
|
||||||
|
@ -2,9 +2,8 @@
|
|||||||
# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
||||||
# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
|
# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
|
||||||
|
|
||||||
# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
|
# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch
|
||||||
# FROM docker.io/pytorch/pytorch:latest
|
FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime
|
||||||
FROM pytorch/pytorch:latest
|
|
||||||
|
|
||||||
# Downloads to user config dir
|
# Downloads to user config dir
|
||||||
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
||||||
|
@ -118,8 +118,8 @@ def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'):
|
|||||||
except Exception:
|
except Exception:
|
||||||
tag = release
|
tag = release
|
||||||
|
|
||||||
file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
|
|
||||||
if name in assets:
|
if name in assets:
|
||||||
|
file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
|
||||||
safe_download(file,
|
safe_download(file,
|
||||||
url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
|
url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
|
||||||
min_bytes=1E5,
|
min_bytes=1E5,
|
||||||
|
@ -1119,13 +1119,13 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False):
|
|||||||
imshow_ = cv2.imshow # copy to avoid recursion errors
|
imshow_ = cv2.imshow # copy to avoid recursion errors
|
||||||
|
|
||||||
|
|
||||||
def imread(path, flags=cv2.IMREAD_COLOR):
|
def imread(filename, flags=cv2.IMREAD_COLOR):
|
||||||
return cv2.imdecode(np.fromfile(path, np.uint8), flags)
|
return cv2.imdecode(np.fromfile(filename, np.uint8), flags)
|
||||||
|
|
||||||
|
|
||||||
def imwrite(path, im):
|
def imwrite(filename, img):
|
||||||
try:
|
try:
|
||||||
cv2.imencode(Path(path).suffix, im)[1].tofile(path)
|
cv2.imencode(Path(filename).suffix, img)[1].tofile(filename)
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
@ -10,7 +10,7 @@ def crop_mask(masks, boxes):
|
|||||||
Vectorized by Chong (thanks Chong).
|
Vectorized by Chong (thanks Chong).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
- masks should be a size [h, w, n] tensor of masks
|
- masks should be a size [n, h, w] tensor of masks
|
||||||
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
|
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -16,7 +16,6 @@ class ComputeLoss:
|
|||||||
self.overlap = overlap
|
self.overlap = overlap
|
||||||
device = next(model.parameters()).device # get model device
|
device = next(model.parameters()).device # get model device
|
||||||
h = model.hyp # hyperparameters
|
h = model.hyp # hyperparameters
|
||||||
self.device = device
|
|
||||||
|
|
||||||
# Define criteria
|
# Define criteria
|
||||||
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
|
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
|
||||||
|
@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg'
|
|||||||
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
||||||
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
||||||
if paths:
|
if paths:
|
||||||
annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
||||||
if len(targets) > 0:
|
if len(targets) > 0:
|
||||||
idx = targets[:, 0] == i
|
idx = targets[:, 0] == i
|
||||||
ti = targets[idx] # image targets
|
ti = targets[idx] # image targets
|
||||||
|
Loading…
x
Reference in New Issue
Block a user