mirror of
https://github.com/RootKit-Org/AI-Aimbot.git
synced 2025-06-21 02:41:01 +08:00
fixed tensor context is none issue
This commit is contained in:
parent
b605e59718
commit
c5618aa4ab
@ -38,7 +38,7 @@ Intended for educational use 🎓, our aim is to highlight the vulnerability of
|
||||
- 🛑 Is it a `pip is not recognized...` error? [WATCH THIS!](https://youtu.be/zWYvRS7DtOg)
|
||||
3. Fire up `PowerShell` or `Command Prompt` on Windows 🔍.
|
||||
4. To install `PyTorch`, select the appropriate command based on your GPU.
|
||||
- Nvidia `pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118`
|
||||
- Nvidia `pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118`
|
||||
- AMD or CPU `pip install torch torchvision torchaudio`
|
||||
5. 📦 Run the command below to install the required Open Source packages:
|
||||
```
|
||||
|
239
export.py
239
export.py
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
||||
|
||||
@ -77,6 +77,25 @@ from utils.torch_utils import select_device, smart_inference_mode
|
||||
MACOS = platform.system() == 'Darwin' # macOS environment
|
||||
|
||||
|
||||
class iOSModel(torch.nn.Module):
|
||||
|
||||
def __init__(self, model, im):
|
||||
super().__init__()
|
||||
b, c, h, w = im.shape # batch, channel, height, width
|
||||
self.model = model
|
||||
self.nc = model.nc # number of classes
|
||||
if w == h:
|
||||
self.normalize = 1. / w
|
||||
else:
|
||||
self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller)
|
||||
# np = model(im)[0].shape[1] # number of points
|
||||
# self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
|
||||
|
||||
def forward(self, x):
|
||||
xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
|
||||
return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
|
||||
|
||||
|
||||
def export_formats():
|
||||
# YOLOv5 export formats
|
||||
x = [
|
||||
@ -91,7 +110,7 @@ def export_formats():
|
||||
['TensorFlow Lite', 'tflite', '.tflite', True, False],
|
||||
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
|
||||
['TensorFlow.js', 'tfjs', '_web_model', False, False],
|
||||
['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
|
||||
['PaddlePaddle', 'paddle', '_paddle_model', True, True], ]
|
||||
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
|
||||
|
||||
|
||||
@ -136,7 +155,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
|
||||
import onnx
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
|
||||
f = file.with_suffix('.onnx')
|
||||
f = str(file.with_suffix('.onnx'))
|
||||
|
||||
output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
|
||||
if dynamic:
|
||||
@ -186,23 +205,68 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
|
||||
|
||||
|
||||
@try_export
|
||||
def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
|
||||
def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')):
|
||||
# YOLOv5 OpenVINO export
|
||||
check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
import openvino.inference_engine as ie
|
||||
check_requirements('openvino-dev>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
import openvino.runtime as ov # noqa
|
||||
from openvino.tools import mo # noqa
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
|
||||
f = str(file).replace('.pt', f'_openvino_model{os.sep}')
|
||||
LOGGER.info(f'\n{prefix} starting export with openvino {ov.__version__}...')
|
||||
f = str(file).replace(file.suffix, f'_openvino_model{os.sep}')
|
||||
f_onnx = file.with_suffix('.onnx')
|
||||
f_ov = str(Path(f) / file.with_suffix('.xml').name)
|
||||
if int8:
|
||||
check_requirements('nncf>=2.4.0') # requires at least version 2.4.0 to use the post-training quantization
|
||||
import nncf
|
||||
import numpy as np
|
||||
from openvino.runtime import Core
|
||||
|
||||
args = [
|
||||
'mo',
|
||||
'--input_model',
|
||||
str(file.with_suffix('.onnx')),
|
||||
'--output_dir',
|
||||
f,
|
||||
'--data_type',
|
||||
('FP16' if half else 'FP32'),]
|
||||
subprocess.run(args, check=True, env=os.environ) # export
|
||||
from utils.dataloaders import create_dataloader
|
||||
core = Core()
|
||||
onnx_model = core.read_model(f_onnx) # export
|
||||
|
||||
def prepare_input_tensor(image: np.ndarray):
|
||||
input_tensor = image.astype(np.float32) # uint8 to fp16/32
|
||||
input_tensor /= 255.0 # 0 - 255 to 0.0 - 1.0
|
||||
|
||||
if input_tensor.ndim == 3:
|
||||
input_tensor = np.expand_dims(input_tensor, 0)
|
||||
return input_tensor
|
||||
|
||||
def gen_dataloader(yaml_path, task='train', imgsz=640, workers=4):
|
||||
data_yaml = check_yaml(yaml_path)
|
||||
data = check_dataset(data_yaml)
|
||||
dataloader = create_dataloader(data[task],
|
||||
imgsz=imgsz,
|
||||
batch_size=1,
|
||||
stride=32,
|
||||
pad=0.5,
|
||||
single_cls=False,
|
||||
rect=False,
|
||||
workers=workers)[0]
|
||||
return dataloader
|
||||
|
||||
# noqa: F811
|
||||
|
||||
def transform_fn(data_item):
|
||||
"""
|
||||
Quantization transform function. Extracts and preprocess input data from dataloader item for quantization.
|
||||
Parameters:
|
||||
data_item: Tuple with data item produced by DataLoader during iteration
|
||||
Returns:
|
||||
input_tensor: Input data for quantization
|
||||
"""
|
||||
img = data_item[0].numpy()
|
||||
input_tensor = prepare_input_tensor(img)
|
||||
return input_tensor
|
||||
|
||||
ds = gen_dataloader(data)
|
||||
quantization_dataset = nncf.Dataset(ds, transform_fn)
|
||||
ov_model = nncf.quantize(onnx_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED)
|
||||
else:
|
||||
ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export
|
||||
|
||||
ov.serialize(ov_model, f_ov) # save
|
||||
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
||||
return f, None
|
||||
|
||||
@ -223,7 +287,7 @@ def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
|
||||
|
||||
|
||||
@try_export
|
||||
def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
|
||||
def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')):
|
||||
# YOLOv5 CoreML export
|
||||
check_requirements('coremltools')
|
||||
import coremltools as ct
|
||||
@ -231,6 +295,8 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
|
||||
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
|
||||
f = file.with_suffix('.mlmodel')
|
||||
|
||||
if nms:
|
||||
model = iOSModel(model, im)
|
||||
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
|
||||
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
|
||||
bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
|
||||
@ -435,7 +501,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
|
||||
'10',
|
||||
'--out_dir',
|
||||
str(file.parent),
|
||||
f_tfl,], check=True)
|
||||
f_tfl, ], check=True)
|
||||
return f, None
|
||||
|
||||
|
||||
@ -456,7 +522,7 @@ def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')):
|
||||
'--quantize_uint8' if int8 else '',
|
||||
'--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
|
||||
str(f_pb),
|
||||
str(f),]
|
||||
str(f), ]
|
||||
subprocess.run([arg for arg in args if arg], check=True)
|
||||
|
||||
json = Path(f_json).read_text()
|
||||
@ -506,6 +572,129 @@ def add_tflite_metadata(file, metadata, num_outputs):
|
||||
tmp_file.unlink()
|
||||
|
||||
|
||||
def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')):
|
||||
# YOLOv5 CoreML pipeline
|
||||
import coremltools as ct
|
||||
from PIL import Image
|
||||
|
||||
print(f'{prefix} starting pipeline with coremltools {ct.__version__}...')
|
||||
batch_size, ch, h, w = list(im.shape) # BCHW
|
||||
t = time.time()
|
||||
|
||||
# YOLOv5 Output shapes
|
||||
spec = model.get_spec()
|
||||
out0, out1 = iter(spec.description.output)
|
||||
if platform.system() == 'Darwin':
|
||||
img = Image.new('RGB', (w, h)) # img(192 width, 320 height)
|
||||
# img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection
|
||||
out = model.predict({'image': img})
|
||||
out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape
|
||||
else: # linux and windows can not run model.predict(), get sizes from pytorch output y
|
||||
s = tuple(y[0].shape)
|
||||
out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4)
|
||||
|
||||
# Checks
|
||||
nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
|
||||
na, nc = out0_shape
|
||||
# na, nc = out0.type.multiArrayType.shape # number anchors, classes
|
||||
assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check
|
||||
|
||||
# Define output shapes (missing)
|
||||
out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
|
||||
out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
|
||||
# spec.neuralNetwork.preprocessing[0].featureName = '0'
|
||||
|
||||
# Flexible input shapes
|
||||
# from coremltools.models.neural_network import flexible_shape_utils
|
||||
# s = [] # shapes
|
||||
# s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
|
||||
# s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
|
||||
# flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
|
||||
# r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
|
||||
# r.add_height_range((192, 640))
|
||||
# r.add_width_range((192, 640))
|
||||
# flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
|
||||
|
||||
# Print
|
||||
print(spec.description)
|
||||
|
||||
# Model from spec
|
||||
model = ct.models.MLModel(spec)
|
||||
|
||||
# 3. Create NMS protobuf
|
||||
nms_spec = ct.proto.Model_pb2.Model()
|
||||
nms_spec.specificationVersion = 5
|
||||
for i in range(2):
|
||||
decoder_output = model._spec.description.output[i].SerializeToString()
|
||||
nms_spec.description.input.add()
|
||||
nms_spec.description.input[i].ParseFromString(decoder_output)
|
||||
nms_spec.description.output.add()
|
||||
nms_spec.description.output[i].ParseFromString(decoder_output)
|
||||
|
||||
nms_spec.description.output[0].name = 'confidence'
|
||||
nms_spec.description.output[1].name = 'coordinates'
|
||||
|
||||
output_sizes = [nc, 4]
|
||||
for i in range(2):
|
||||
ma_type = nms_spec.description.output[i].type.multiArrayType
|
||||
ma_type.shapeRange.sizeRanges.add()
|
||||
ma_type.shapeRange.sizeRanges[0].lowerBound = 0
|
||||
ma_type.shapeRange.sizeRanges[0].upperBound = -1
|
||||
ma_type.shapeRange.sizeRanges.add()
|
||||
ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
|
||||
ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
|
||||
del ma_type.shape[:]
|
||||
|
||||
nms = nms_spec.nonMaximumSuppression
|
||||
nms.confidenceInputFeatureName = out0.name # 1x507x80
|
||||
nms.coordinatesInputFeatureName = out1.name # 1x507x4
|
||||
nms.confidenceOutputFeatureName = 'confidence'
|
||||
nms.coordinatesOutputFeatureName = 'coordinates'
|
||||
nms.iouThresholdInputFeatureName = 'iouThreshold'
|
||||
nms.confidenceThresholdInputFeatureName = 'confidenceThreshold'
|
||||
nms.iouThreshold = 0.45
|
||||
nms.confidenceThreshold = 0.25
|
||||
nms.pickTop.perClass = True
|
||||
nms.stringClassLabels.vector.extend(names.values())
|
||||
nms_model = ct.models.MLModel(nms_spec)
|
||||
|
||||
# 4. Pipeline models together
|
||||
pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)),
|
||||
('iouThreshold', ct.models.datatypes.Double()),
|
||||
('confidenceThreshold', ct.models.datatypes.Double())],
|
||||
output_features=['confidence', 'coordinates'])
|
||||
pipeline.add_model(model)
|
||||
pipeline.add_model(nms_model)
|
||||
|
||||
# Correct datatypes
|
||||
pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
|
||||
pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
|
||||
pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
|
||||
|
||||
# Update metadata
|
||||
pipeline.spec.specificationVersion = 5
|
||||
pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5'
|
||||
pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5'
|
||||
pipeline.spec.description.metadata.author = 'glenn.jocher@ultralytics.com'
|
||||
pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE'
|
||||
pipeline.spec.description.metadata.userDefined.update({
|
||||
'classes': ','.join(names.values()),
|
||||
'iou_threshold': str(nms.iouThreshold),
|
||||
'confidence_threshold': str(nms.confidenceThreshold)})
|
||||
|
||||
# Save the model
|
||||
f = file.with_suffix('.mlmodel') # filename
|
||||
model = ct.models.MLModel(pipeline.spec)
|
||||
model.input_description['image'] = 'Input image'
|
||||
model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})'
|
||||
model.input_description['confidenceThreshold'] = \
|
||||
f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})'
|
||||
model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")'
|
||||
model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)'
|
||||
model.save(f) # pipelined
|
||||
print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)')
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
||||
@ -582,9 +771,11 @@ def run(
|
||||
if onnx or xml: # OpenVINO requires ONNX
|
||||
f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
|
||||
if xml: # OpenVINO
|
||||
f[3], _ = export_openvino(file, metadata, half)
|
||||
f[3], _ = export_openvino(file, metadata, half, int8, data)
|
||||
if coreml: # CoreML
|
||||
f[4], _ = export_coreml(model, im, file, int8, half)
|
||||
f[4], ct_model = export_coreml(model, im, file, int8, half, nms)
|
||||
if nms:
|
||||
pipeline_coreml(ct_model, im, file, model.names, y)
|
||||
if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
|
||||
assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
|
||||
assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
|
||||
@ -640,7 +831,7 @@ def parse_opt(known=False):
|
||||
parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
|
||||
parser.add_argument('--keras', action='store_true', help='TF: use Keras')
|
||||
parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
|
||||
parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
|
||||
parser.add_argument('--int8', action='store_true', help='CoreML/TF/OpenVINO INT8 quantization')
|
||||
parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
|
||||
parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
|
||||
parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')
|
||||
@ -669,4 +860,4 @@ def main(opt):
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
||||
main(opt)
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Common modules
|
||||
"""
|
||||
@ -24,12 +24,24 @@ import torch.nn as nn
|
||||
from PIL import Image
|
||||
from torch.cuda import amp
|
||||
|
||||
# Import 'ultralytics' package or install if if missing
|
||||
try:
|
||||
import ultralytics
|
||||
|
||||
assert hasattr(ultralytics, '__version__') # verify package is not directory
|
||||
except (ImportError, AssertionError):
|
||||
import os
|
||||
|
||||
os.system('pip install -U ultralytics')
|
||||
import ultralytics
|
||||
|
||||
from ultralytics.utils.plotting import Annotator, colors, save_one_box
|
||||
|
||||
from utils import TryExcept
|
||||
from utils.dataloaders import exif_transpose, letterbox
|
||||
from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
|
||||
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
|
||||
xyxy2xywh, yaml_load)
|
||||
from utils.plots import Annotator, colors, save_one_box
|
||||
from utils.torch_utils import copy_attr, smart_inference_mode
|
||||
|
||||
|
||||
@ -333,7 +345,7 @@ class DetectMultiBackend(nn.Module):
|
||||
super().__init__()
|
||||
w = str(weights[0] if isinstance(weights, list) else weights)
|
||||
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
|
||||
fp16 &= pt or jit or onnx or engine # FP16
|
||||
fp16 &= pt or jit or onnx or engine or triton # FP16
|
||||
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
|
||||
stride = 32 # default stride
|
||||
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
||||
@ -353,8 +365,9 @@ class DetectMultiBackend(nn.Module):
|
||||
model.half() if fp16 else model.float()
|
||||
if extra_files['config.txt']: # load metadata dict
|
||||
d = json.loads(extra_files['config.txt'],
|
||||
object_hook=lambda d: {int(k) if k.isdigit() else k: v
|
||||
for k, v in d.items()})
|
||||
object_hook=lambda d: {
|
||||
int(k) if k.isdigit() else k: v
|
||||
for k, v in d.items()})
|
||||
stride, names = int(d['stride']), d['names']
|
||||
elif dnn: # ONNX OpenCV DNN
|
||||
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
|
||||
@ -372,18 +385,18 @@ class DetectMultiBackend(nn.Module):
|
||||
stride, names = int(meta['stride']), eval(meta['names'])
|
||||
elif xml: # OpenVINO
|
||||
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
||||
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
from openvino.runtime import Core, Layout, get_batch
|
||||
ie = Core()
|
||||
core = Core()
|
||||
if not Path(w).is_file(): # if not *.xml
|
||||
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
||||
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
|
||||
if network.get_parameters()[0].get_layout().empty:
|
||||
network.get_parameters()[0].set_layout(Layout('NCHW'))
|
||||
batch_dim = get_batch(network)
|
||||
ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))
|
||||
if ov_model.get_parameters()[0].get_layout().empty:
|
||||
ov_model.get_parameters()[0].set_layout(Layout('NCHW'))
|
||||
batch_dim = get_batch(ov_model)
|
||||
if batch_dim.is_static:
|
||||
batch_size = batch_dim.get_length()
|
||||
executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2
|
||||
ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device
|
||||
stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
|
||||
elif engine: # TensorRT
|
||||
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
||||
@ -523,7 +536,7 @@ class DetectMultiBackend(nn.Module):
|
||||
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
|
||||
elif self.xml: # OpenVINO
|
||||
im = im.cpu().numpy() # FP32
|
||||
y = list(self.executable_network([im]).values())
|
||||
y = list(self.ov_compiled_model(im).values())
|
||||
elif self.engine: # TensorRT
|
||||
if self.dynamic and im.shape != self.bindings['images'].shape:
|
||||
i = self.model.get_binding_index('images')
|
||||
@ -540,7 +553,7 @@ class DetectMultiBackend(nn.Module):
|
||||
elif self.coreml: # CoreML
|
||||
im = im.cpu().numpy()
|
||||
im = Image.fromarray((im[0] * 255).astype('uint8'))
|
||||
# im = im.resize((192, 320), Image.ANTIALIAS)
|
||||
# im = im.resize((192, 320), Image.BILINEAR)
|
||||
y = self.model.predict({'image': im}) # coordinates are xywh normalized
|
||||
if 'confidence' in y:
|
||||
box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Experimental modules
|
||||
"""
|
||||
@ -87,11 +87,11 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
|
||||
|
||||
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
|
||||
|
||||
# Module compatibility updates
|
||||
# Module updates
|
||||
for m in model.modules():
|
||||
t = type(m)
|
||||
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
|
||||
m.inplace = inplace # torch 1.7.0 compatibility
|
||||
m.inplace = inplace
|
||||
if t is Detect and not isinstance(m.anchor_grid, list):
|
||||
delattr(m, 'anchor_grid')
|
||||
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Default anchors for COCO data
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
TensorFlow, Keras and TFLite versions of YOLOv5
|
||||
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
|
||||
@ -310,7 +310,7 @@ class TFDetect(keras.layers.Layer):
|
||||
y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
|
||||
z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
|
||||
|
||||
return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)
|
||||
return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), )
|
||||
|
||||
@staticmethod
|
||||
def _make_grid(nx=20, ny=20):
|
||||
@ -486,7 +486,7 @@ class TFModel:
|
||||
iou_thres,
|
||||
conf_thres,
|
||||
clip_boxes=False)
|
||||
return (nms,)
|
||||
return (nms, )
|
||||
return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
|
||||
# x = x[0] # [x(1,6300,85), ...] to x(6300,85)
|
||||
# xywh = x[..., :4] # x(6300,4) boxes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
YOLO-specific modules
|
||||
|
||||
@ -21,8 +21,8 @@ if str(ROOT) not in sys.path:
|
||||
if platform.system() != 'Windows':
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import *
|
||||
from models.experimental import *
|
||||
from models.common import * # noqa
|
||||
from models.experimental import * # noqa
|
||||
from utils.autoanchor import check_anchor_order
|
||||
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
|
||||
from utils.plots import feature_visualization
|
||||
@ -76,7 +76,7 @@ class Detect(nn.Module):
|
||||
y = torch.cat((xy, wh, conf), 4)
|
||||
z.append(y.view(bs, self.na * nx * ny, self.no))
|
||||
|
||||
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
|
||||
return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x)
|
||||
|
||||
def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
|
||||
d = self.anchors[i].device
|
||||
@ -126,7 +126,7 @@ class BaseModel(nn.Module):
|
||||
|
||||
def _profile_one_layer(self, m, x, dt):
|
||||
c = m == self.model[-1] # is final layer, copy input as inplace fix
|
||||
o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
|
||||
o = thop.profile(m, inputs=(x.copy() if c else x, ), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
|
||||
t = time_sync()
|
||||
for _ in range(10):
|
||||
m(x.copy() if c else x)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
utils/initialization
|
||||
"""
|
||||
@ -54,13 +54,17 @@ def notebook_init(verbose=True):
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from utils.general import check_font, check_requirements, is_colab
|
||||
from ultralytics.utils.checks import check_requirements
|
||||
|
||||
from utils.general import check_font, is_colab
|
||||
from utils.torch_utils import select_device # imports
|
||||
|
||||
check_font()
|
||||
|
||||
import psutil
|
||||
|
||||
if check_requirements('wandb', install=False):
|
||||
os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang
|
||||
if is_colab():
|
||||
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Activation functions
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Image augmentation functions
|
||||
"""
|
||||
@ -330,7 +330,7 @@ def classify_albumentations(
|
||||
if vflip > 0:
|
||||
T += [A.VerticalFlip(p=vflip)]
|
||||
if jitter > 0:
|
||||
color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
|
||||
color_jitter = (float(jitter), ) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
|
||||
T += [A.ColorJitter(*color_jitter, 0)]
|
||||
else: # Use fixed crop for eval set (reproducibility)
|
||||
T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
AutoAnchor utils
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Auto-batch utils
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Callback utils
|
||||
"""
|
||||
@ -32,7 +32,7 @@ class Callbacks:
|
||||
'on_model_save': [],
|
||||
'on_train_end': [],
|
||||
'on_params_update': [],
|
||||
'teardown': [],}
|
||||
'teardown': [], }
|
||||
self.stop_training = False # set True to interrupt training
|
||||
|
||||
def register_action(self, hook, name='', callback=None):
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Dataloaders and dataset utils
|
||||
"""
|
||||
@ -36,7 +36,7 @@ from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, c
|
||||
from utils.torch_utils import torch_distributed_zero_first
|
||||
|
||||
# Parameters
|
||||
HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
||||
HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data'
|
||||
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
|
||||
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
|
||||
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
||||
@ -355,7 +355,7 @@ class LoadStreams:
|
||||
# Start thread to read frames from video stream
|
||||
st = f'{i + 1}/{n}: {s}... '
|
||||
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
|
||||
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'
|
||||
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
|
||||
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
|
||||
import pafy
|
||||
s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
||||
# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
|
||||
|
||||
@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
RUN apt update
|
||||
RUN TZ=Etc/UTC apt install -y tzdata
|
||||
RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
|
||||
RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg
|
||||
# RUN alias python=python3
|
||||
|
||||
# Security updates
|
||||
@ -24,14 +24,13 @@ RUN rm -rf /usr/src/app && mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Copy contents
|
||||
# COPY . /usr/src/app (issues as not a .git directory)
|
||||
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
||||
COPY . /usr/src/app
|
||||
|
||||
# Install pip packages
|
||||
COPY requirements.txt .
|
||||
RUN python3 -m pip install --upgrade pip wheel
|
||||
RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
|
||||
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3'
|
||||
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0'
|
||||
# tensorflow tensorflowjs \
|
||||
|
||||
# Set environment variables
|
||||
|
@ -1,9 +1,9 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
||||
# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi
|
||||
|
||||
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
|
||||
FROM arm64v8/ubuntu:rolling
|
||||
FROM arm64v8/ubuntu:22.10
|
||||
|
||||
# Downloads to user config dir
|
||||
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
||||
@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
RUN apt update
|
||||
RUN TZ=Etc/UTC apt install -y tzdata
|
||||
RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev
|
||||
RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev
|
||||
# RUN alias python=python3
|
||||
|
||||
# Install pip packages
|
||||
@ -27,8 +27,7 @@ RUN mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Copy contents
|
||||
# COPY . /usr/src/app (issues as not a .git directory)
|
||||
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
||||
COPY . /usr/src/app
|
||||
ENV DEBIAN_FRONTEND teletype
|
||||
|
||||
|
||||
|
@ -1,25 +1,27 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
|
||||
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
|
||||
|
||||
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
|
||||
FROM ubuntu:rolling
|
||||
FROM ubuntu:mantic-20231011
|
||||
|
||||
# Downloads to user config dir
|
||||
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
||||
|
||||
# Install linux packages
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
RUN apt update
|
||||
RUN TZ=Etc/UTC apt install -y tzdata
|
||||
RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
|
||||
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
||||
RUN apt update \
|
||||
&& apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
||||
# RUN alias python=python3
|
||||
|
||||
# Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error
|
||||
RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED
|
||||
|
||||
# Install pip packages
|
||||
COPY requirements.txt .
|
||||
RUN python3 -m pip install --upgrade pip wheel
|
||||
RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
|
||||
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \
|
||||
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \
|
||||
# tensorflow tensorflowjs \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
@ -28,9 +30,7 @@ RUN mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Copy contents
|
||||
# COPY . /usr/src/app (issues as not a .git directory)
|
||||
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
||||
ENV DEBIAN_FRONTEND teletype
|
||||
COPY . /usr/src/app
|
||||
|
||||
|
||||
# Usage Examples -------------------------------------------------------------------------------------------------------
|
||||
|
@ -1,10 +1,9 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Download utils
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import urllib
|
||||
from pathlib import Path
|
||||
@ -53,7 +52,7 @@ def curl_download(url, filename, *, silent: bool = False) -> bool:
|
||||
'--retry',
|
||||
'9',
|
||||
'-C',
|
||||
'-',])
|
||||
'-', ])
|
||||
return proc.returncode == 0
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Perform test request
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Run a Flask REST API exposing one or more YOLOv5s models
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
General utils
|
||||
"""
|
||||
@ -36,6 +36,17 @@ import torch
|
||||
import torchvision
|
||||
import yaml
|
||||
|
||||
# Import 'ultralytics' package or install if if missing
|
||||
try:
|
||||
import ultralytics
|
||||
|
||||
assert hasattr(ultralytics, '__version__') # verify package is not directory
|
||||
except (ImportError, AssertionError):
|
||||
os.system('pip install -U ultralytics')
|
||||
import ultralytics
|
||||
|
||||
from ultralytics.utils.checks import check_requirements
|
||||
|
||||
from utils import TryExcept, emojis
|
||||
from utils.downloads import curl_download, gsutil_getsize
|
||||
from utils.metrics import box_iou, fitness
|
||||
@ -58,6 +69,7 @@ pd.options.display.max_columns = 10
|
||||
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
|
||||
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
|
||||
os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy)
|
||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab
|
||||
|
||||
|
||||
def is_ascii(s=''):
|
||||
@ -137,12 +149,12 @@ def set_logging(name=LOGGING_NAME, verbose=True):
|
||||
name: {
|
||||
'class': 'logging.StreamHandler',
|
||||
'formatter': name,
|
||||
'level': level,}},
|
||||
'level': level, }},
|
||||
'loggers': {
|
||||
name: {
|
||||
'level': level,
|
||||
'handlers': [name],
|
||||
'propagate': False,}}})
|
||||
'propagate': False, }}})
|
||||
|
||||
|
||||
set_logging(LOGGING_NAME) # run before defining LOGGER
|
||||
@ -369,7 +381,7 @@ def check_git_info(path='.'):
|
||||
return {'remote': None, 'branch': None, 'commit': None}
|
||||
|
||||
|
||||
def check_python(minimum='3.7.0'):
|
||||
def check_python(minimum='3.8.0'):
|
||||
# Check current python version vs. required python version
|
||||
check_version(platform.python_version(), minimum, name='Python ', hard=True)
|
||||
|
||||
@ -386,41 +398,6 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals
|
||||
return result
|
||||
|
||||
|
||||
@TryExcept()
|
||||
def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):
|
||||
# Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str)
|
||||
prefix = colorstr('red', 'bold', 'requirements:')
|
||||
check_python() # check python version
|
||||
if isinstance(requirements, Path): # requirements.txt file
|
||||
file = requirements.resolve()
|
||||
assert file.exists(), f'{prefix} {file} not found, check failed.'
|
||||
with file.open() as f:
|
||||
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
|
||||
elif isinstance(requirements, str):
|
||||
requirements = [requirements]
|
||||
|
||||
s = ''
|
||||
n = 0
|
||||
for r in requirements:
|
||||
try:
|
||||
pkg.require(r)
|
||||
except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met
|
||||
s += f'"{r}" '
|
||||
n += 1
|
||||
|
||||
if s and install and AUTOINSTALL: # check environment variable
|
||||
LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...")
|
||||
try:
|
||||
# assert check_online(), "AutoUpdate skipped (offline)"
|
||||
LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode())
|
||||
source = file if 'file' in locals() else requirements
|
||||
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
|
||||
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
|
||||
LOGGER.info(s)
|
||||
except Exception as e:
|
||||
LOGGER.warning(f'{prefix} ❌ {e}')
|
||||
|
||||
|
||||
def check_img_size(imgsz, s=32, floor=0):
|
||||
# Verify image size is a multiple of stride s in each dimension
|
||||
if isinstance(imgsz, int): # integer i.e. img_size=640
|
||||
@ -449,7 +426,7 @@ def check_imshow(warn=False):
|
||||
return False
|
||||
|
||||
|
||||
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
|
||||
def check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):
|
||||
# Check file(s) for acceptable suffix
|
||||
if file and suffix:
|
||||
if isinstance(suffix, str):
|
||||
@ -1135,6 +1112,7 @@ def imshow(path, im):
|
||||
imshow_(path.encode('unicode_escape').decode(), im)
|
||||
|
||||
|
||||
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
|
||||
if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
|
||||
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
|
||||
|
||||
# Variables ------------------------------------------------------------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
# add these requirements in your app on top of the existing ones
|
||||
pip==21.1
|
||||
Flask==1.0.2
|
||||
pip==23.3
|
||||
Flask==2.3.2
|
||||
gunicorn==19.10.0
|
||||
werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Logging utils
|
||||
"""
|
||||
@ -9,7 +9,6 @@ from pathlib import Path
|
||||
|
||||
import pkg_resources as pkg
|
||||
import torch
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
from utils.general import LOGGER, colorstr, cv2
|
||||
from utils.loggers.clearml.clearml_utils import ClearmlLogger
|
||||
@ -20,6 +19,11 @@ from utils.torch_utils import de_parallel
|
||||
LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
|
||||
try:
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
except ImportError:
|
||||
SummaryWriter = lambda *args: None # None = SummaryWriter(str)
|
||||
|
||||
try:
|
||||
import wandb
|
||||
|
||||
@ -42,15 +46,15 @@ except (ImportError, AssertionError):
|
||||
clearml = None
|
||||
|
||||
try:
|
||||
if RANK not in [0, -1]:
|
||||
comet_ml = None
|
||||
else:
|
||||
if RANK in {0, -1}:
|
||||
import comet_ml
|
||||
|
||||
assert hasattr(comet_ml, '__version__') # verify package import not local dir
|
||||
from utils.loggers.comet import CometLogger
|
||||
|
||||
except (ModuleNotFoundError, ImportError, AssertionError):
|
||||
else:
|
||||
comet_ml = None
|
||||
except (ImportError, AssertionError):
|
||||
comet_ml = None
|
||||
|
||||
|
||||
@ -84,10 +88,6 @@ class Loggers():
|
||||
self.csv = True # always log to csv
|
||||
|
||||
# Messages
|
||||
if not clearml:
|
||||
prefix = colorstr('ClearML: ')
|
||||
s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML"
|
||||
self.logger.info(s)
|
||||
if not comet_ml:
|
||||
prefix = colorstr('Comet: ')
|
||||
s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet"
|
||||
@ -114,7 +114,7 @@ class Loggers():
|
||||
self.clearml = None
|
||||
prefix = colorstr('ClearML: ')
|
||||
LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.'
|
||||
f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme')
|
||||
f' See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme')
|
||||
|
||||
else:
|
||||
self.clearml = None
|
||||
|
@ -5,8 +5,7 @@ from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import yaml
|
||||
|
||||
from utils.plots import Annotator, colors
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
try:
|
||||
import clearml
|
||||
|
@ -59,7 +59,7 @@ Check out an example of a [completed run here](https://www.comet.com/examples/co
|
||||
|
||||
Or better yet, try it out yourself in this Colab Notebook
|
||||
|
||||
[](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)
|
||||
[](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-training/yolov5/notebooks/Comet_and_YOLOv5.ipynb)
|
||||
|
||||
# Log automatically
|
||||
|
||||
@ -164,7 +164,7 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \
|
||||
|
||||
If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag.
|
||||
|
||||
The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file.
|
||||
The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file.
|
||||
|
||||
```shell
|
||||
python train.py \
|
||||
|
@ -18,7 +18,7 @@ try:
|
||||
# Project Configuration
|
||||
config = comet_ml.config.get_config()
|
||||
COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5')
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
except ImportError:
|
||||
comet_ml = None
|
||||
COMET_PROJECT_NAME = None
|
||||
|
||||
@ -42,7 +42,7 @@ COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
|
||||
COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true'
|
||||
|
||||
# Evaluation Settings
|
||||
COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true'
|
||||
COMET_LOG_CONFUSION_MATRIX = (os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true')
|
||||
COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true'
|
||||
COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100))
|
||||
|
||||
@ -51,10 +51,10 @@ CONF_THRES = float(os.getenv('CONF_THRES', 0.001))
|
||||
IOU_THRES = float(os.getenv('IOU_THRES', 0.6))
|
||||
|
||||
# Batch Logging Settings
|
||||
COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true'
|
||||
COMET_LOG_BATCH_METRICS = (os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true')
|
||||
COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1)
|
||||
COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1)
|
||||
COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true'
|
||||
COMET_LOG_PER_CLASS_METRICS = (os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true')
|
||||
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
|
||||
@ -82,7 +82,7 @@ class CometLogger:
|
||||
self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
|
||||
|
||||
# Dataset Artifact Settings
|
||||
self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET
|
||||
self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET
|
||||
self.resume = self.opt.resume
|
||||
|
||||
# Default parameters to pass to Experiment objects
|
||||
@ -90,9 +90,10 @@ class CometLogger:
|
||||
'log_code': False,
|
||||
'log_env_gpu': True,
|
||||
'log_env_cpu': True,
|
||||
'project_name': COMET_PROJECT_NAME,}
|
||||
'project_name': COMET_PROJECT_NAME, }
|
||||
self.default_experiment_kwargs.update(experiment_kwargs)
|
||||
self.experiment = self._get_experiment(self.comet_mode, run_id)
|
||||
self.experiment.set_name(self.opt.name)
|
||||
|
||||
self.data_dict = self.check_dataset(self.opt.data)
|
||||
self.class_names = self.data_dict['names']
|
||||
@ -136,7 +137,7 @@ class CometLogger:
|
||||
|
||||
self.comet_log_predictions = COMET_LOG_PREDICTIONS
|
||||
if self.opt.bbox_interval == -1:
|
||||
self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10
|
||||
self.comet_log_prediction_interval = (1 if self.opt.epochs < 10 else self.opt.epochs // 10)
|
||||
else:
|
||||
self.comet_log_prediction_interval = self.opt.bbox_interval
|
||||
|
||||
@ -152,7 +153,7 @@ class CometLogger:
|
||||
'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS,
|
||||
'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS,
|
||||
'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX,
|
||||
'comet_model_name': COMET_MODEL_NAME,})
|
||||
'comet_model_name': COMET_MODEL_NAME, })
|
||||
|
||||
# Check if running the Experiment with the Comet Optimizer
|
||||
if hasattr(self.opt, 'comet_optimizer_id'):
|
||||
@ -169,7 +170,7 @@ class CometLogger:
|
||||
**self.default_experiment_kwargs,
|
||||
)
|
||||
|
||||
return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,)
|
||||
return comet_ml.OfflineExperiment(**self.default_experiment_kwargs, )
|
||||
|
||||
else:
|
||||
try:
|
||||
@ -213,7 +214,7 @@ class CometLogger:
|
||||
'fitness_score': fitness_score[-1],
|
||||
'epochs_trained': epoch + 1,
|
||||
'save_period': opt.save_period,
|
||||
'total_epochs': opt.epochs,}
|
||||
'total_epochs': opt.epochs, }
|
||||
|
||||
model_files = glob.glob(f'{path}/*.pt')
|
||||
for model_path in model_files:
|
||||
@ -231,7 +232,8 @@ class CometLogger:
|
||||
with open(data_file) as f:
|
||||
data_config = yaml.safe_load(f)
|
||||
|
||||
if data_config['path'].startswith(COMET_PREFIX):
|
||||
path = data_config.get('path')
|
||||
if path and path.startswith(COMET_PREFIX):
|
||||
path = data_config['path'].replace(COMET_PREFIX, '')
|
||||
data_dict = self.download_dataset_artifact(path)
|
||||
|
||||
@ -269,7 +271,7 @@ class CometLogger:
|
||||
'x': xyxy[0],
|
||||
'y': xyxy[1],
|
||||
'x2': xyxy[2],
|
||||
'y2': xyxy[3]},})
|
||||
'y2': xyxy[3]}, })
|
||||
for *xyxy, conf, cls in filtered_detections.tolist():
|
||||
metadata.append({
|
||||
'label': f'{self.class_names[int(cls)]}',
|
||||
@ -278,7 +280,7 @@ class CometLogger:
|
||||
'x': xyxy[0],
|
||||
'y': xyxy[1],
|
||||
'x2': xyxy[2],
|
||||
'y2': xyxy[3]},})
|
||||
'y2': xyxy[3]}, })
|
||||
|
||||
self.metadata_dict[image_name] = metadata
|
||||
self.logged_images_count += 1
|
||||
@ -312,8 +314,16 @@ class CometLogger:
|
||||
image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
|
||||
|
||||
try:
|
||||
artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split})
|
||||
artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split})
|
||||
artifact.add(
|
||||
image_file,
|
||||
logical_path=image_logical_path,
|
||||
metadata={'split': split},
|
||||
)
|
||||
artifact.add(
|
||||
label_file,
|
||||
logical_path=label_logical_path,
|
||||
metadata={'split': split},
|
||||
)
|
||||
except ValueError as e:
|
||||
logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
|
||||
logger.error(f'COMET ERROR: {e}')
|
||||
@ -355,15 +365,14 @@ class CometLogger:
|
||||
data_dict['path'] = artifact_save_dir
|
||||
|
||||
metadata_names = metadata.get('names')
|
||||
if type(metadata_names) == dict:
|
||||
if isinstance(metadata_names, dict):
|
||||
data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()}
|
||||
elif type(metadata_names) == list:
|
||||
elif isinstance(metadata_names, list):
|
||||
data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
|
||||
else:
|
||||
raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
|
||||
|
||||
data_dict = self.update_data_paths(data_dict)
|
||||
return data_dict
|
||||
return self.update_data_paths(data_dict)
|
||||
|
||||
def update_data_paths(self, data_dict):
|
||||
path = data_dict.get('path', '')
|
||||
@ -475,8 +484,9 @@ class CometLogger:
|
||||
'f1': f1[i],
|
||||
'true_positives': tp[i],
|
||||
'false_positives': fp[i],
|
||||
'support': nt[c]},
|
||||
prefix=class_name)
|
||||
'support': nt[c], },
|
||||
prefix=class_name,
|
||||
)
|
||||
|
||||
if self.comet_log_confusion_matrix:
|
||||
epoch = self.experiment.curr_epoch
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# WARNING ⚠️ wandb is deprecated and will be removed in future release.
|
||||
# See supported integrations at https://github.com/ultralytics/yolov5#integrations
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Loss functions
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Model validation metrics
|
||||
"""
|
||||
|
130
utils/plots.py
130
utils/plots.py
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Plotting utils
|
||||
"""
|
||||
@ -8,7 +8,6 @@ import math
|
||||
import os
|
||||
from copy import copy
|
||||
from pathlib import Path
|
||||
from urllib.error import URLError
|
||||
|
||||
import cv2
|
||||
import matplotlib
|
||||
@ -17,13 +16,13 @@ import numpy as np
|
||||
import pandas as pd
|
||||
import seaborn as sn
|
||||
import torch
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from PIL import Image, ImageDraw
|
||||
from scipy.ndimage.filters import gaussian_filter1d
|
||||
from ultralytics.utils.plotting import Annotator
|
||||
|
||||
from utils import TryExcept, threaded
|
||||
from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path,
|
||||
is_ascii, xywh2xyxy, xyxy2xywh)
|
||||
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
|
||||
from utils.metrics import fitness
|
||||
from utils.segment.general import scale_image
|
||||
|
||||
# Settings
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
@ -52,120 +51,6 @@ class Colors:
|
||||
colors = Colors() # create instance for 'from utils.plots import colors'
|
||||
|
||||
|
||||
def check_pil_font(font=FONT, size=10):
|
||||
# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
|
||||
font = Path(font)
|
||||
font = font if font.exists() else (CONFIG_DIR / font.name)
|
||||
try:
|
||||
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
|
||||
except Exception: # download if missing
|
||||
try:
|
||||
check_font(font)
|
||||
return ImageFont.truetype(str(font), size)
|
||||
except TypeError:
|
||||
check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
|
||||
except URLError: # not online
|
||||
return ImageFont.load_default()
|
||||
|
||||
|
||||
class Annotator:
|
||||
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
|
||||
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
|
||||
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
|
||||
non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
|
||||
self.pil = pil or non_ascii
|
||||
if self.pil: # use PIL
|
||||
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
|
||||
self.draw = ImageDraw.Draw(self.im)
|
||||
self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font,
|
||||
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
|
||||
else: # use cv2
|
||||
self.im = im
|
||||
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
|
||||
|
||||
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
|
||||
# Add one xyxy box to image with label
|
||||
if self.pil or not is_ascii(label):
|
||||
self.draw.rectangle(box, width=self.lw, outline=color) # box
|
||||
if label:
|
||||
w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0
|
||||
# _, _, w, h = self.font.getbbox(label) # text width, height (New)
|
||||
outside = box[1] - h >= 0 # label fits outside box
|
||||
self.draw.rectangle(
|
||||
(box[0], box[1] - h if outside else box[1], box[0] + w + 1,
|
||||
box[1] + 1 if outside else box[1] + h + 1),
|
||||
fill=color,
|
||||
)
|
||||
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
|
||||
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
|
||||
else: # cv2
|
||||
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
||||
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
|
||||
if label:
|
||||
tf = max(self.lw - 1, 1) # font thickness
|
||||
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
|
||||
outside = p1[1] - h >= 3
|
||||
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
|
||||
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
|
||||
cv2.putText(self.im,
|
||||
label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
|
||||
0,
|
||||
self.lw / 3,
|
||||
txt_color,
|
||||
thickness=tf,
|
||||
lineType=cv2.LINE_AA)
|
||||
|
||||
def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):
|
||||
"""Plot masks at once.
|
||||
Args:
|
||||
masks (tensor): predicted masks on cuda, shape: [n, h, w]
|
||||
colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n]
|
||||
im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1]
|
||||
alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque
|
||||
"""
|
||||
if self.pil:
|
||||
# convert to numpy first
|
||||
self.im = np.asarray(self.im).copy()
|
||||
if len(masks) == 0:
|
||||
self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
|
||||
colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0
|
||||
colors = colors[:, None, None] # shape(n,1,1,3)
|
||||
masks = masks.unsqueeze(3) # shape(n,h,w,1)
|
||||
masks_color = masks * (colors * alpha) # shape(n,h,w,3)
|
||||
|
||||
inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
|
||||
mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3)
|
||||
|
||||
im_gpu = im_gpu.flip(dims=[0]) # flip channel
|
||||
im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
|
||||
im_gpu = im_gpu * inv_alph_masks[-1] + mcs
|
||||
im_mask = (im_gpu * 255).byte().cpu().numpy()
|
||||
self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape)
|
||||
if self.pil:
|
||||
# convert im back to PIL and update draw
|
||||
self.fromarray(self.im)
|
||||
|
||||
def rectangle(self, xy, fill=None, outline=None, width=1):
|
||||
# Add rectangle to image (PIL-only)
|
||||
self.draw.rectangle(xy, fill, outline, width)
|
||||
|
||||
def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
|
||||
# Add text to image (PIL-only)
|
||||
if anchor == 'bottom': # start y from font bottom
|
||||
w, h = self.font.getsize(text) # text width, height
|
||||
xy[1] += 1 - h
|
||||
self.draw.text(xy, text, fill=txt_color, font=self.font)
|
||||
|
||||
def fromarray(self, im):
|
||||
# Update self.im from a numpy array
|
||||
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
|
||||
self.draw = ImageDraw.Draw(self.im)
|
||||
|
||||
def result(self):
|
||||
# Return annotated image as array
|
||||
return np.asarray(self.im)
|
||||
|
||||
|
||||
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
|
||||
"""
|
||||
x: Features to be visualized
|
||||
@ -265,7 +150,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None):
|
||||
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
||||
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
||||
if paths:
|
||||
annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
||||
annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
||||
if len(targets) > 0:
|
||||
ti = targets[targets[:, 0] == i] # image targets
|
||||
boxes = xywh2xyxy(ti[:, 2:6]).T
|
||||
@ -500,7 +385,8 @@ def plot_results(file='path/to/results.csv', dir=''):
|
||||
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
|
||||
y = data.values[:, j].astype('float')
|
||||
# y[y == 0] = np.nan # don't show zero values
|
||||
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
|
||||
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) # actual results
|
||||
ax[i].plot(x, gaussian_filter1d(y, sigma=3), ':', label='smooth', linewidth=2) # smoothing line
|
||||
ax[i].set_title(s[j], fontsize=12)
|
||||
# if j in [8, 9, 10]: # share train and val loss y axes
|
||||
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Image augmentation functions
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Dataloaders
|
||||
"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Model validation metrics
|
||||
"""
|
||||
@ -196,7 +196,7 @@ KEYS = [
|
||||
'val/cls_loss',
|
||||
'x/lr0',
|
||||
'x/lr1',
|
||||
'x/lr2',]
|
||||
'x/lr2', ]
|
||||
|
||||
BEST_KEYS = [
|
||||
'best/epoch',
|
||||
@ -207,4 +207,4 @@ BEST_KEYS = [
|
||||
'best/precision(M)',
|
||||
'best/recall(M)',
|
||||
'best/mAP_0.5(M)',
|
||||
'best/mAP_0.5:0.95(M)',]
|
||||
'best/mAP_0.5:0.95(M)', ]
|
||||
|
@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg'
|
||||
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
||||
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
||||
if paths:
|
||||
annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
||||
annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
||||
if len(targets) > 0:
|
||||
idx = targets[:, 0] == i
|
||||
ti = targets[idx] # image targets
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
PyTorch utils
|
||||
"""
|
||||
@ -170,7 +170,7 @@ def profile(input, ops, n=10, device=None):
|
||||
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
|
||||
tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
|
||||
try:
|
||||
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
|
||||
flops = thop.profile(m, inputs=(x, ), verbose=False)[0] / 1E9 * 2 # GFLOPs
|
||||
except Exception:
|
||||
flops = 0
|
||||
|
||||
@ -284,7 +284,7 @@ def model_info(model, verbose=False, imgsz=640):
|
||||
p = next(model.parameters())
|
||||
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride
|
||||
im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format
|
||||
flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
|
||||
flops = thop.profile(deepcopy(model), inputs=(im, ), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
|
||||
imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float
|
||||
fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs
|
||||
except Exception:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
""" Utils to interact with the Triton Inference Server
|
||||
"""
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading…
x
Reference in New Issue
Block a user