mirror of
https://github.com/RootKit-Org/AI-Aimbot.git
synced 2025-06-21 02:41:01 +08:00
Updating yolo libs
This commit is contained in:
parent
d3bdf0e1fa
commit
8a56d64275
@ -354,6 +354,7 @@ class DetectMultiBackend(nn.Module):
|
||||
import onnxruntime
|
||||
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
||||
session = onnxruntime.InferenceSession(w, providers=providers)
|
||||
output_names = [x.name for x in session.get_outputs()]
|
||||
meta = session.get_modelmeta().custom_metadata_map # metadata
|
||||
if 'stride' in meta:
|
||||
stride, names = int(meta['stride']), eval(meta['names'])
|
||||
@ -372,9 +373,7 @@ class DetectMultiBackend(nn.Module):
|
||||
batch_size = batch_dim.get_length()
|
||||
executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2
|
||||
output_layer = next(iter(executable_network.outputs))
|
||||
meta = Path(w).with_suffix('.yaml')
|
||||
if meta.exists():
|
||||
stride, names = self._load_metadata(meta) # load metadata
|
||||
stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
|
||||
elif engine: # TensorRT
|
||||
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
||||
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
|
||||
@ -458,7 +457,7 @@ class DetectMultiBackend(nn.Module):
|
||||
|
||||
self.__dict__.update(locals()) # assign all variables to self
|
||||
|
||||
def forward(self, im, augment=False, visualize=False, val=False):
|
||||
def forward(self, im, augment=False, visualize=False):
|
||||
# YOLOv5 MultiBackend inference
|
||||
b, ch, h, w = im.shape # batch, channel, height, width
|
||||
if self.fp16 and im.dtype != torch.float16:
|
||||
@ -466,17 +465,15 @@ class DetectMultiBackend(nn.Module):
|
||||
|
||||
if self.pt: # PyTorch
|
||||
y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
|
||||
if isinstance(y, tuple):
|
||||
y = y[0]
|
||||
elif self.jit: # TorchScript
|
||||
y = self.model(im)[0]
|
||||
y = self.model(im)
|
||||
elif self.dnn: # ONNX OpenCV DNN
|
||||
im = im.cpu().numpy() # torch to numpy
|
||||
self.net.setInput(im)
|
||||
y = self.net.forward()
|
||||
elif self.onnx: # ONNX Runtime
|
||||
im = im.cpu().numpy() # torch to numpy
|
||||
y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
|
||||
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
|
||||
elif self.xml: # OpenVINO
|
||||
im = im.cpu().numpy() # FP32
|
||||
y = self.executable_network([im])[self.output_layer]
|
||||
@ -523,9 +520,13 @@ class DetectMultiBackend(nn.Module):
|
||||
y = (y.astype(np.float32) - zero_point) * scale # re-scale
|
||||
y[..., :4] *= [w, h, w, h] # xywh normalized to pixels
|
||||
|
||||
if isinstance(y, np.ndarray):
|
||||
y = torch.tensor(y, device=self.device)
|
||||
return (y, []) if val else y
|
||||
if isinstance(y, (list, tuple)):
|
||||
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
|
||||
else:
|
||||
return self.from_numpy(y)
|
||||
|
||||
def from_numpy(self, x):
|
||||
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
|
||||
|
||||
def warmup(self, imgsz=(1, 3, 640, 640)):
|
||||
# Warmup model by running inference once
|
||||
@ -548,10 +549,12 @@ class DetectMultiBackend(nn.Module):
|
||||
return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs
|
||||
|
||||
@staticmethod
|
||||
def _load_metadata(f='path/to/meta.yaml'):
|
||||
def _load_metadata(f=Path('path/to/meta.yaml')):
|
||||
# Load metadata from meta.yaml if it exists
|
||||
if f.exists():
|
||||
d = yaml_load(f)
|
||||
return d['stride'], d['names'] # assign stride, names
|
||||
return None, None
|
||||
|
||||
|
||||
class AutoShape(nn.Module):
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -92,10 +92,14 @@ def run(
|
||||
LOGGER.info('\n')
|
||||
parse_opt()
|
||||
notebook_init() # print system info
|
||||
c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
|
||||
c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
|
||||
py = pd.DataFrame(y, columns=c)
|
||||
LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
|
||||
LOGGER.info(str(py if map else py.iloc[:, :2]))
|
||||
if hard_fail and isinstance(hard_fail, str):
|
||||
metrics = py['mAP50-95'].array # values to compare to floor
|
||||
floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
|
||||
assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}'
|
||||
return py
|
||||
|
||||
|
||||
@ -141,7 +145,7 @@ def parse_opt():
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--test', action='store_true', help='test exports only')
|
||||
parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
|
||||
parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure')
|
||||
parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric')
|
||||
opt = parser.parse_args()
|
||||
opt.data = check_yaml(opt.data) # check YAML
|
||||
print_args(vars(opt))
|
||||
|
@ -187,7 +187,7 @@ class _RepeatSampler:
|
||||
|
||||
class LoadImages:
|
||||
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
|
||||
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None):
|
||||
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
||||
files = []
|
||||
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
|
||||
p = str(Path(p).resolve())
|
||||
@ -212,6 +212,7 @@ class LoadImages:
|
||||
self.mode = 'image'
|
||||
self.auto = auto
|
||||
self.transforms = transforms # optional
|
||||
self.vid_stride = vid_stride # video frame-rate stride
|
||||
if any(videos):
|
||||
self._new_video(videos[0]) # new video
|
||||
else:
|
||||
@ -232,6 +233,7 @@ class LoadImages:
|
||||
# Read video
|
||||
self.mode = 'video'
|
||||
ret_val, im0 = self.cap.read()
|
||||
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride
|
||||
while not ret_val:
|
||||
self.count += 1
|
||||
self.cap.release()
|
||||
@ -265,7 +267,7 @@ class LoadImages:
|
||||
# Create a new video capture object
|
||||
self.frame = 0
|
||||
self.cap = cv2.VideoCapture(path)
|
||||
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
|
||||
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
|
||||
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
|
||||
|
||||
@ -285,11 +287,12 @@ class LoadImages:
|
||||
|
||||
class LoadStreams:
|
||||
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
|
||||
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None):
|
||||
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
||||
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
|
||||
self.mode = 'stream'
|
||||
self.img_size = img_size
|
||||
self.stride = stride
|
||||
self.vid_stride = vid_stride # video frame-rate stride
|
||||
sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources]
|
||||
n = len(sources)
|
||||
self.sources = [clean_str(x) for x in sources] # clean source names for later
|
||||
@ -329,11 +332,11 @@ class LoadStreams:
|
||||
|
||||
def update(self, i, cap, stream):
|
||||
# Read stream `i` frames in daemon thread
|
||||
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
|
||||
n, f = 0, self.frames[i] # frame number, frame array
|
||||
while cap.isOpened() and n < f:
|
||||
n += 1
|
||||
cap.grab() # .read() = .grab() followed by .retrieve()
|
||||
if n % read == 0:
|
||||
if n % self.vid_stride == 0:
|
||||
success, im = cap.retrieve()
|
||||
if success:
|
||||
self.imgs[i] = im
|
||||
|
@ -813,6 +813,9 @@ def non_max_suppression(prediction,
|
||||
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
|
||||
"""
|
||||
|
||||
if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)
|
||||
prediction = prediction[0] # select only inference output
|
||||
|
||||
bs = prediction.shape[0] # batch size
|
||||
nc = prediction.shape[2] - 5 # number of classes
|
||||
xc = prediction[..., 4] > conf_thres # candidates
|
||||
|
@ -233,7 +233,9 @@ class Loggers():
|
||||
self.wandb.finish_run()
|
||||
|
||||
if self.clearml and not self.opt.evolve:
|
||||
self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), name='Best Model')
|
||||
self.clearml.task.update_output_model(model_path=str(best if best.exists() else last),
|
||||
name='Best Model',
|
||||
auto_delete_file=False)
|
||||
|
||||
def on_params_update(self, params: dict):
|
||||
# Update hyperparams or configs of the experiment
|
||||
|
Loading…
x
Reference in New Issue
Block a user