fixed tensor context is none issue

This commit is contained in:
Elijah Harmon 2023-11-29 23:08:02 -08:00
parent b605e59718
commit c5618aa4ab
68 changed files with 409 additions and 331 deletions

View File

@ -38,7 +38,7 @@ Intended for educational use 🎓, our aim is to highlight the vulnerability of
- 🛑 Is it a `pip is not recognized...` error? [WATCH THIS!](https://youtu.be/zWYvRS7DtOg) - 🛑 Is it a `pip is not recognized...` error? [WATCH THIS!](https://youtu.be/zWYvRS7DtOg)
3. Fire up `PowerShell` or `Command Prompt` on Windows 🔍. 3. Fire up `PowerShell` or `Command Prompt` on Windows 🔍.
4. To install `PyTorch`, select the appropriate command based on your GPU. 4. To install `PyTorch`, select the appropriate command based on your GPU.
- Nvidia `pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118` - Nvidia `pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118`
- AMD or CPU `pip install torch torchvision torchaudio` - AMD or CPU `pip install torch torchvision torchaudio`
5. 📦 Run the command below to install the required Open Source packages: 5. 📦 Run the command below to install the required Open Source packages:
``` ```

231
export.py
View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
@ -77,6 +77,25 @@ from utils.torch_utils import select_device, smart_inference_mode
MACOS = platform.system() == 'Darwin' # macOS environment MACOS = platform.system() == 'Darwin' # macOS environment
class iOSModel(torch.nn.Module):
def __init__(self, model, im):
super().__init__()
b, c, h, w = im.shape # batch, channel, height, width
self.model = model
self.nc = model.nc # number of classes
if w == h:
self.normalize = 1. / w
else:
self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller)
# np = model(im)[0].shape[1] # number of points
# self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
def forward(self, x):
xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
def export_formats(): def export_formats():
# YOLOv5 export formats # YOLOv5 export formats
x = [ x = [
@ -136,7 +155,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
import onnx import onnx
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
f = file.with_suffix('.onnx') f = str(file.with_suffix('.onnx'))
output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
if dynamic: if dynamic:
@ -186,23 +205,68 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
@try_export @try_export
def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')):
# YOLOv5 OpenVINO export # YOLOv5 OpenVINO export
check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ check_requirements('openvino-dev>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
import openvino.inference_engine as ie import openvino.runtime as ov # noqa
from openvino.tools import mo # noqa
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') LOGGER.info(f'\n{prefix} starting export with openvino {ov.__version__}...')
f = str(file).replace('.pt', f'_openvino_model{os.sep}') f = str(file).replace(file.suffix, f'_openvino_model{os.sep}')
f_onnx = file.with_suffix('.onnx')
f_ov = str(Path(f) / file.with_suffix('.xml').name)
if int8:
check_requirements('nncf>=2.4.0') # requires at least version 2.4.0 to use the post-training quantization
import nncf
import numpy as np
from openvino.runtime import Core
args = [ from utils.dataloaders import create_dataloader
'mo', core = Core()
'--input_model', onnx_model = core.read_model(f_onnx) # export
str(file.with_suffix('.onnx')),
'--output_dir', def prepare_input_tensor(image: np.ndarray):
f, input_tensor = image.astype(np.float32) # uint8 to fp16/32
'--data_type', input_tensor /= 255.0 # 0 - 255 to 0.0 - 1.0
('FP16' if half else 'FP32'),]
subprocess.run(args, check=True, env=os.environ) # export if input_tensor.ndim == 3:
input_tensor = np.expand_dims(input_tensor, 0)
return input_tensor
def gen_dataloader(yaml_path, task='train', imgsz=640, workers=4):
data_yaml = check_yaml(yaml_path)
data = check_dataset(data_yaml)
dataloader = create_dataloader(data[task],
imgsz=imgsz,
batch_size=1,
stride=32,
pad=0.5,
single_cls=False,
rect=False,
workers=workers)[0]
return dataloader
# noqa: F811
def transform_fn(data_item):
"""
Quantization transform function. Extracts and preprocess input data from dataloader item for quantization.
Parameters:
data_item: Tuple with data item produced by DataLoader during iteration
Returns:
input_tensor: Input data for quantization
"""
img = data_item[0].numpy()
input_tensor = prepare_input_tensor(img)
return input_tensor
ds = gen_dataloader(data)
quantization_dataset = nncf.Dataset(ds, transform_fn)
ov_model = nncf.quantize(onnx_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED)
else:
ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export
ov.serialize(ov_model, f_ov) # save
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
return f, None return f, None
@ -223,7 +287,7 @@ def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
@try_export @try_export
def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')):
# YOLOv5 CoreML export # YOLOv5 CoreML export
check_requirements('coremltools') check_requirements('coremltools')
import coremltools as ct import coremltools as ct
@ -231,6 +295,8 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
f = file.with_suffix('.mlmodel') f = file.with_suffix('.mlmodel')
if nms:
model = iOSModel(model, im)
ts = torch.jit.trace(model, im, strict=False) # TorchScript model ts = torch.jit.trace(model, im, strict=False) # TorchScript model
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
@ -506,6 +572,129 @@ def add_tflite_metadata(file, metadata, num_outputs):
tmp_file.unlink() tmp_file.unlink()
def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')):
# YOLOv5 CoreML pipeline
import coremltools as ct
from PIL import Image
print(f'{prefix} starting pipeline with coremltools {ct.__version__}...')
batch_size, ch, h, w = list(im.shape) # BCHW
t = time.time()
# YOLOv5 Output shapes
spec = model.get_spec()
out0, out1 = iter(spec.description.output)
if platform.system() == 'Darwin':
img = Image.new('RGB', (w, h)) # img(192 width, 320 height)
# img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection
out = model.predict({'image': img})
out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape
else: # linux and windows can not run model.predict(), get sizes from pytorch output y
s = tuple(y[0].shape)
out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4)
# Checks
nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
na, nc = out0_shape
# na, nc = out0.type.multiArrayType.shape # number anchors, classes
assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check
# Define output shapes (missing)
out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
# spec.neuralNetwork.preprocessing[0].featureName = '0'
# Flexible input shapes
# from coremltools.models.neural_network import flexible_shape_utils
# s = [] # shapes
# s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
# s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
# flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
# r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
# r.add_height_range((192, 640))
# r.add_width_range((192, 640))
# flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
# Print
print(spec.description)
# Model from spec
model = ct.models.MLModel(spec)
# 3. Create NMS protobuf
nms_spec = ct.proto.Model_pb2.Model()
nms_spec.specificationVersion = 5
for i in range(2):
decoder_output = model._spec.description.output[i].SerializeToString()
nms_spec.description.input.add()
nms_spec.description.input[i].ParseFromString(decoder_output)
nms_spec.description.output.add()
nms_spec.description.output[i].ParseFromString(decoder_output)
nms_spec.description.output[0].name = 'confidence'
nms_spec.description.output[1].name = 'coordinates'
output_sizes = [nc, 4]
for i in range(2):
ma_type = nms_spec.description.output[i].type.multiArrayType
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[0].lowerBound = 0
ma_type.shapeRange.sizeRanges[0].upperBound = -1
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
del ma_type.shape[:]
nms = nms_spec.nonMaximumSuppression
nms.confidenceInputFeatureName = out0.name # 1x507x80
nms.coordinatesInputFeatureName = out1.name # 1x507x4
nms.confidenceOutputFeatureName = 'confidence'
nms.coordinatesOutputFeatureName = 'coordinates'
nms.iouThresholdInputFeatureName = 'iouThreshold'
nms.confidenceThresholdInputFeatureName = 'confidenceThreshold'
nms.iouThreshold = 0.45
nms.confidenceThreshold = 0.25
nms.pickTop.perClass = True
nms.stringClassLabels.vector.extend(names.values())
nms_model = ct.models.MLModel(nms_spec)
# 4. Pipeline models together
pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)),
('iouThreshold', ct.models.datatypes.Double()),
('confidenceThreshold', ct.models.datatypes.Double())],
output_features=['confidence', 'coordinates'])
pipeline.add_model(model)
pipeline.add_model(nms_model)
# Correct datatypes
pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
# Update metadata
pipeline.spec.specificationVersion = 5
pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5'
pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5'
pipeline.spec.description.metadata.author = 'glenn.jocher@ultralytics.com'
pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE'
pipeline.spec.description.metadata.userDefined.update({
'classes': ','.join(names.values()),
'iou_threshold': str(nms.iouThreshold),
'confidence_threshold': str(nms.confidenceThreshold)})
# Save the model
f = file.with_suffix('.mlmodel') # filename
model = ct.models.MLModel(pipeline.spec)
model.input_description['image'] = 'Input image'
model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})'
model.input_description['confidenceThreshold'] = \
f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})'
model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")'
model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)'
model.save(f) # pipelined
print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)')
@smart_inference_mode() @smart_inference_mode()
def run( def run(
data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
@ -582,9 +771,11 @@ def run(
if onnx or xml: # OpenVINO requires ONNX if onnx or xml: # OpenVINO requires ONNX
f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
if xml: # OpenVINO if xml: # OpenVINO
f[3], _ = export_openvino(file, metadata, half) f[3], _ = export_openvino(file, metadata, half, int8, data)
if coreml: # CoreML if coreml: # CoreML
f[4], _ = export_coreml(model, im, file, int8, half) f[4], ct_model = export_coreml(model, im, file, int8, half, nms)
if nms:
pipeline_coreml(ct_model, im, file, model.names, y)
if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
@ -640,7 +831,7 @@ def parse_opt(known=False):
parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
parser.add_argument('--keras', action='store_true', help='TF: use Keras') parser.add_argument('--keras', action='store_true', help='TF: use Keras')
parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--int8', action='store_true', help='CoreML/TF/OpenVINO INT8 quantization')
parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Common modules Common modules
""" """
@ -24,12 +24,24 @@ import torch.nn as nn
from PIL import Image from PIL import Image
from torch.cuda import amp from torch.cuda import amp
# Import 'ultralytics' package or install if if missing
try:
import ultralytics
assert hasattr(ultralytics, '__version__') # verify package is not directory
except (ImportError, AssertionError):
import os
os.system('pip install -U ultralytics')
import ultralytics
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from utils import TryExcept from utils import TryExcept
from utils.dataloaders import exif_transpose, letterbox from utils.dataloaders import exif_transpose, letterbox
from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
xyxy2xywh, yaml_load) xyxy2xywh, yaml_load)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import copy_attr, smart_inference_mode from utils.torch_utils import copy_attr, smart_inference_mode
@ -333,7 +345,7 @@ class DetectMultiBackend(nn.Module):
super().__init__() super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights) w = str(weights[0] if isinstance(weights, list) else weights)
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
fp16 &= pt or jit or onnx or engine # FP16 fp16 &= pt or jit or onnx or engine or triton # FP16
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
stride = 32 # default stride stride = 32 # default stride
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
@ -353,7 +365,8 @@ class DetectMultiBackend(nn.Module):
model.half() if fp16 else model.float() model.half() if fp16 else model.float()
if extra_files['config.txt']: # load metadata dict if extra_files['config.txt']: # load metadata dict
d = json.loads(extra_files['config.txt'], d = json.loads(extra_files['config.txt'],
object_hook=lambda d: {int(k) if k.isdigit() else k: v object_hook=lambda d: {
int(k) if k.isdigit() else k: v
for k, v in d.items()}) for k, v in d.items()})
stride, names = int(d['stride']), d['names'] stride, names = int(d['stride']), d['names']
elif dnn: # ONNX OpenCV DNN elif dnn: # ONNX OpenCV DNN
@ -372,18 +385,18 @@ class DetectMultiBackend(nn.Module):
stride, names = int(meta['stride']), eval(meta['names']) stride, names = int(meta['stride']), eval(meta['names'])
elif xml: # OpenVINO elif xml: # OpenVINO
LOGGER.info(f'Loading {w} for OpenVINO inference...') LOGGER.info(f'Loading {w} for OpenVINO inference...')
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
from openvino.runtime import Core, Layout, get_batch from openvino.runtime import Core, Layout, get_batch
ie = Core() core = Core()
if not Path(w).is_file(): # if not *.xml if not Path(w).is_file(): # if not *.xml
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))
if network.get_parameters()[0].get_layout().empty: if ov_model.get_parameters()[0].get_layout().empty:
network.get_parameters()[0].set_layout(Layout('NCHW')) ov_model.get_parameters()[0].set_layout(Layout('NCHW'))
batch_dim = get_batch(network) batch_dim = get_batch(ov_model)
if batch_dim.is_static: if batch_dim.is_static:
batch_size = batch_dim.get_length() batch_size = batch_dim.get_length()
executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device
stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
elif engine: # TensorRT elif engine: # TensorRT
LOGGER.info(f'Loading {w} for TensorRT inference...') LOGGER.info(f'Loading {w} for TensorRT inference...')
@ -523,7 +536,7 @@ class DetectMultiBackend(nn.Module):
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
elif self.xml: # OpenVINO elif self.xml: # OpenVINO
im = im.cpu().numpy() # FP32 im = im.cpu().numpy() # FP32
y = list(self.executable_network([im]).values()) y = list(self.ov_compiled_model(im).values())
elif self.engine: # TensorRT elif self.engine: # TensorRT
if self.dynamic and im.shape != self.bindings['images'].shape: if self.dynamic and im.shape != self.bindings['images'].shape:
i = self.model.get_binding_index('images') i = self.model.get_binding_index('images')
@ -540,7 +553,7 @@ class DetectMultiBackend(nn.Module):
elif self.coreml: # CoreML elif self.coreml: # CoreML
im = im.cpu().numpy() im = im.cpu().numpy()
im = Image.fromarray((im[0] * 255).astype('uint8')) im = Image.fromarray((im[0] * 255).astype('uint8'))
# im = im.resize((192, 320), Image.ANTIALIAS) # im = im.resize((192, 320), Image.BILINEAR)
y = self.model.predict({'image': im}) # coordinates are xywh normalized y = self.model.predict({'image': im}) # coordinates are xywh normalized
if 'confidence' in y: if 'confidence' in y:
box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Experimental modules Experimental modules
""" """
@ -87,11 +87,11 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
# Module compatibility updates # Module updates
for m in model.modules(): for m in model.modules():
t = type(m) t = type(m)
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
m.inplace = inplace # torch 1.7.0 compatibility m.inplace = inplace
if t is Detect and not isinstance(m.anchor_grid, list): if t is Detect and not isinstance(m.anchor_grid, list):
delattr(m, 'anchor_grid') delattr(m, 'anchor_grid')
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Default anchors for COCO data # Default anchors for COCO data

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
TensorFlow, Keras and TFLite versions of YOLOv5 TensorFlow, Keras and TFLite versions of YOLOv5
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
YOLO-specific modules YOLO-specific modules
@ -21,8 +21,8 @@ if str(ROOT) not in sys.path:
if platform.system() != 'Windows': if platform.system() != 'Windows':
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import * from models.common import * # noqa
from models.experimental import * from models.experimental import * # noqa
from utils.autoanchor import check_anchor_order from utils.autoanchor import check_anchor_order
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
from utils.plots import feature_visualization from utils.plots import feature_visualization

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Parameters # Parameters
nc: 80 # number of classes nc: 80 # number of classes

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
utils/initialization utils/initialization
""" """
@ -54,13 +54,17 @@ def notebook_init(verbose=True):
import os import os
import shutil import shutil
from utils.general import check_font, check_requirements, is_colab from ultralytics.utils.checks import check_requirements
from utils.general import check_font, is_colab
from utils.torch_utils import select_device # imports from utils.torch_utils import select_device # imports
check_font() check_font()
import psutil import psutil
if check_requirements('wandb', install=False):
os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang
if is_colab(): if is_colab():
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Activation functions Activation functions
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Image augmentation functions Image augmentation functions
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
AutoAnchor utils AutoAnchor utils
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Auto-batch utils Auto-batch utils
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Callback utils Callback utils
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Dataloaders and dataset utils Dataloaders and dataset utils
""" """
@ -36,7 +36,7 @@ from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, c
from utils.torch_utils import torch_distributed_zero_first from utils.torch_utils import torch_distributed_zero_first
# Parameters # Parameters
HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data'
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
@ -355,7 +355,7 @@ class LoadStreams:
# Start thread to read frames from video stream # Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... ' st = f'{i + 1}/{n}: {s}... '
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
check_requirements(('pafy', 'youtube_dl==2020.12.2')) check_requirements(('pafy', 'youtube_dl==2020.12.2'))
import pafy import pafy
s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive
RUN apt update RUN apt update
RUN TZ=Etc/UTC apt install -y tzdata RUN TZ=Etc/UTC apt install -y tzdata
RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg
# RUN alias python=python3 # RUN alias python=python3
# Security updates # Security updates
@ -24,14 +24,13 @@ RUN rm -rf /usr/src/app && mkdir -p /usr/src/app
WORKDIR /usr/src/app WORKDIR /usr/src/app
# Copy contents # Copy contents
# COPY . /usr/src/app (issues as not a .git directory) COPY . /usr/src/app
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
# Install pip packages # Install pip packages
COPY requirements.txt . COPY requirements.txt .
RUN python3 -m pip install --upgrade pip wheel RUN python3 -m pip install --upgrade pip wheel
RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0'
# tensorflow tensorflowjs \ # tensorflow tensorflowjs \
# Set environment variables # Set environment variables

View File

@ -1,9 +1,9 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
FROM arm64v8/ubuntu:rolling FROM arm64v8/ubuntu:22.10
# Downloads to user config dir # Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive
RUN apt update RUN apt update
RUN TZ=Etc/UTC apt install -y tzdata RUN TZ=Etc/UTC apt install -y tzdata
RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev
# RUN alias python=python3 # RUN alias python=python3
# Install pip packages # Install pip packages
@ -27,8 +27,7 @@ RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app WORKDIR /usr/src/app
# Copy contents # Copy contents
# COPY . /usr/src/app (issues as not a .git directory) COPY . /usr/src/app
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
ENV DEBIAN_FRONTEND teletype ENV DEBIAN_FRONTEND teletype

View File

@ -1,25 +1,27 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
FROM ubuntu:rolling FROM ubuntu:mantic-20231011
# Downloads to user config dir # Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
# Install linux packages # Install linux packages
ENV DEBIAN_FRONTEND noninteractive # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
RUN apt update RUN apt update \
RUN TZ=Etc/UTC apt install -y tzdata && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
# RUN alias python=python3 # RUN alias python=python3
# Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error
RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED
# Install pip packages # Install pip packages
COPY requirements.txt . COPY requirements.txt .
RUN python3 -m pip install --upgrade pip wheel RUN python3 -m pip install --upgrade pip wheel
RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \
# tensorflow tensorflowjs \ # tensorflow tensorflowjs \
--extra-index-url https://download.pytorch.org/whl/cpu --extra-index-url https://download.pytorch.org/whl/cpu
@ -28,9 +30,7 @@ RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app WORKDIR /usr/src/app
# Copy contents # Copy contents
# COPY . /usr/src/app (issues as not a .git directory) COPY . /usr/src/app
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
ENV DEBIAN_FRONTEND teletype
# Usage Examples ------------------------------------------------------------------------------------------------------- # Usage Examples -------------------------------------------------------------------------------------------------------

View File

@ -1,10 +1,9 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Download utils Download utils
""" """
import logging import logging
import os
import subprocess import subprocess
import urllib import urllib
from pathlib import Path from pathlib import Path

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Perform test request Perform test request
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Run a Flask REST API exposing one or more YOLOv5s models Run a Flask REST API exposing one or more YOLOv5s models
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
General utils General utils
""" """
@ -36,6 +36,17 @@ import torch
import torchvision import torchvision
import yaml import yaml
# Import 'ultralytics' package or install if if missing
try:
import ultralytics
assert hasattr(ultralytics, '__version__') # verify package is not directory
except (ImportError, AssertionError):
os.system('pip install -U ultralytics')
import ultralytics
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness from utils.metrics import box_iou, fitness
@ -58,6 +69,7 @@ pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab
def is_ascii(s=''): def is_ascii(s=''):
@ -369,7 +381,7 @@ def check_git_info(path='.'):
return {'remote': None, 'branch': None, 'commit': None} return {'remote': None, 'branch': None, 'commit': None}
def check_python(minimum='3.7.0'): def check_python(minimum='3.8.0'):
# Check current python version vs. required python version # Check current python version vs. required python version
check_version(platform.python_version(), minimum, name='Python ', hard=True) check_version(platform.python_version(), minimum, name='Python ', hard=True)
@ -386,41 +398,6 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals
return result return result
@TryExcept()
def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):
# Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, Path): # requirements.txt file
file = requirements.resolve()
assert file.exists(), f'{prefix} {file} not found, check failed.'
with file.open() as f:
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
elif isinstance(requirements, str):
requirements = [requirements]
s = ''
n = 0
for r in requirements:
try:
pkg.require(r)
except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met
s += f'"{r}" '
n += 1
if s and install and AUTOINSTALL: # check environment variable
LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...")
try:
# assert check_online(), "AutoUpdate skipped (offline)"
LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode())
source = file if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
LOGGER.info(s)
except Exception as e:
LOGGER.warning(f'{prefix}{e}')
def check_img_size(imgsz, s=32, floor=0): def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension # Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640 if isinstance(imgsz, int): # integer i.e. img_size=640
@ -1135,6 +1112,7 @@ def imshow(path, im):
imshow_(path.encode('unicode_escape').decode(), im) imshow_(path.encode('unicode_escape').decode(), im)
if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
# Variables ------------------------------------------------------------------------------------------------------------ # Variables ------------------------------------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# add these requirements in your app on top of the existing ones # add these requirements in your app on top of the existing ones
pip==21.1 pip==23.3
Flask==1.0.2 Flask==2.3.2
gunicorn==19.10.0 gunicorn==19.10.0
werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Logging utils Logging utils
""" """
@ -9,7 +9,6 @@ from pathlib import Path
import pkg_resources as pkg import pkg_resources as pkg
import torch import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import LOGGER, colorstr, cv2 from utils.general import LOGGER, colorstr, cv2
from utils.loggers.clearml.clearml_utils import ClearmlLogger from utils.loggers.clearml.clearml_utils import ClearmlLogger
@ -20,6 +19,11 @@ from utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML
RANK = int(os.getenv('RANK', -1)) RANK = int(os.getenv('RANK', -1))
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
SummaryWriter = lambda *args: None # None = SummaryWriter(str)
try: try:
import wandb import wandb
@ -42,15 +46,15 @@ except (ImportError, AssertionError):
clearml = None clearml = None
try: try:
if RANK not in [0, -1]: if RANK in {0, -1}:
comet_ml = None
else:
import comet_ml import comet_ml
assert hasattr(comet_ml, '__version__') # verify package import not local dir assert hasattr(comet_ml, '__version__') # verify package import not local dir
from utils.loggers.comet import CometLogger from utils.loggers.comet import CometLogger
except (ModuleNotFoundError, ImportError, AssertionError): else:
comet_ml = None
except (ImportError, AssertionError):
comet_ml = None comet_ml = None
@ -84,10 +88,6 @@ class Loggers():
self.csv = True # always log to csv self.csv = True # always log to csv
# Messages # Messages
if not clearml:
prefix = colorstr('ClearML: ')
s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML"
self.logger.info(s)
if not comet_ml: if not comet_ml:
prefix = colorstr('Comet: ') prefix = colorstr('Comet: ')
s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet"
@ -114,7 +114,7 @@ class Loggers():
self.clearml = None self.clearml = None
prefix = colorstr('ClearML: ') prefix = colorstr('ClearML: ')
LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.'
f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') f' See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme')
else: else:
self.clearml = None self.clearml = None

View File

@ -5,8 +5,7 @@ from pathlib import Path
import numpy as np import numpy as np
import yaml import yaml
from ultralytics.utils.plotting import Annotator, colors
from utils.plots import Annotator, colors
try: try:
import clearml import clearml

View File

@ -59,7 +59,7 @@ Check out an example of a [completed run here](https://www.comet.com/examples/co
Or better yet, try it out yourself in this Colab Notebook Or better yet, try it out yourself in this Colab Notebook
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-training/yolov5/notebooks/Comet_and_YOLOv5.ipynb)
# Log automatically # Log automatically
@ -164,7 +164,7 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \
If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag.
The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file.
```shell ```shell
python train.py \ python train.py \

View File

@ -18,7 +18,7 @@ try:
# Project Configuration # Project Configuration
config = comet_ml.config.get_config() config = comet_ml.config.get_config()
COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5')
except (ModuleNotFoundError, ImportError): except ImportError:
comet_ml = None comet_ml = None
COMET_PROJECT_NAME = None COMET_PROJECT_NAME = None
@ -42,7 +42,7 @@ COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true'
# Evaluation Settings # Evaluation Settings
COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true' COMET_LOG_CONFUSION_MATRIX = (os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true')
COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true'
COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100))
@ -51,10 +51,10 @@ CONF_THRES = float(os.getenv('CONF_THRES', 0.001))
IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) IOU_THRES = float(os.getenv('IOU_THRES', 0.6))
# Batch Logging Settings # Batch Logging Settings
COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true' COMET_LOG_BATCH_METRICS = (os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true')
COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1)
COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1)
COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true' COMET_LOG_PER_CLASS_METRICS = (os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true')
RANK = int(os.getenv('RANK', -1)) RANK = int(os.getenv('RANK', -1))
@ -82,7 +82,7 @@ class CometLogger:
self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
# Dataset Artifact Settings # Dataset Artifact Settings
self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET
self.resume = self.opt.resume self.resume = self.opt.resume
# Default parameters to pass to Experiment objects # Default parameters to pass to Experiment objects
@ -93,6 +93,7 @@ class CometLogger:
'project_name': COMET_PROJECT_NAME, } 'project_name': COMET_PROJECT_NAME, }
self.default_experiment_kwargs.update(experiment_kwargs) self.default_experiment_kwargs.update(experiment_kwargs)
self.experiment = self._get_experiment(self.comet_mode, run_id) self.experiment = self._get_experiment(self.comet_mode, run_id)
self.experiment.set_name(self.opt.name)
self.data_dict = self.check_dataset(self.opt.data) self.data_dict = self.check_dataset(self.opt.data)
self.class_names = self.data_dict['names'] self.class_names = self.data_dict['names']
@ -136,7 +137,7 @@ class CometLogger:
self.comet_log_predictions = COMET_LOG_PREDICTIONS self.comet_log_predictions = COMET_LOG_PREDICTIONS
if self.opt.bbox_interval == -1: if self.opt.bbox_interval == -1:
self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 self.comet_log_prediction_interval = (1 if self.opt.epochs < 10 else self.opt.epochs // 10)
else: else:
self.comet_log_prediction_interval = self.opt.bbox_interval self.comet_log_prediction_interval = self.opt.bbox_interval
@ -231,7 +232,8 @@ class CometLogger:
with open(data_file) as f: with open(data_file) as f:
data_config = yaml.safe_load(f) data_config = yaml.safe_load(f)
if data_config['path'].startswith(COMET_PREFIX): path = data_config.get('path')
if path and path.startswith(COMET_PREFIX):
path = data_config['path'].replace(COMET_PREFIX, '') path = data_config['path'].replace(COMET_PREFIX, '')
data_dict = self.download_dataset_artifact(path) data_dict = self.download_dataset_artifact(path)
@ -312,8 +314,16 @@ class CometLogger:
image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
try: try:
artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split}) artifact.add(
artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split}) image_file,
logical_path=image_logical_path,
metadata={'split': split},
)
artifact.add(
label_file,
logical_path=label_logical_path,
metadata={'split': split},
)
except ValueError as e: except ValueError as e:
logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
logger.error(f'COMET ERROR: {e}') logger.error(f'COMET ERROR: {e}')
@ -355,15 +365,14 @@ class CometLogger:
data_dict['path'] = artifact_save_dir data_dict['path'] = artifact_save_dir
metadata_names = metadata.get('names') metadata_names = metadata.get('names')
if type(metadata_names) == dict: if isinstance(metadata_names, dict):
data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()}
elif type(metadata_names) == list: elif isinstance(metadata_names, list):
data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
else: else:
raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
data_dict = self.update_data_paths(data_dict) return self.update_data_paths(data_dict)
return data_dict
def update_data_paths(self, data_dict): def update_data_paths(self, data_dict):
path = data_dict.get('path', '') path = data_dict.get('path', '')
@ -475,8 +484,9 @@ class CometLogger:
'f1': f1[i], 'f1': f1[i],
'true_positives': tp[i], 'true_positives': tp[i],
'false_positives': fp[i], 'false_positives': fp[i],
'support': nt[c]}, 'support': nt[c], },
prefix=class_name) prefix=class_name,
)
if self.comet_log_confusion_matrix: if self.comet_log_confusion_matrix:
epoch = self.experiment.curr_epoch epoch = self.experiment.curr_epoch

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
# WARNING ⚠️ wandb is deprecated and will be removed in future release. # WARNING ⚠️ wandb is deprecated and will be removed in future release.
# See supported integrations at https://github.com/ultralytics/yolov5#integrations # See supported integrations at https://github.com/ultralytics/yolov5#integrations

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Loss functions Loss functions
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Model validation metrics Model validation metrics
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Plotting utils Plotting utils
""" """
@ -8,7 +8,6 @@ import math
import os import os
from copy import copy from copy import copy
from pathlib import Path from pathlib import Path
from urllib.error import URLError
import cv2 import cv2
import matplotlib import matplotlib
@ -17,13 +16,13 @@ import numpy as np
import pandas as pd import pandas as pd
import seaborn as sn import seaborn as sn
import torch import torch
from PIL import Image, ImageDraw, ImageFont from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded from utils import TryExcept, threaded
from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
is_ascii, xywh2xyxy, xyxy2xywh)
from utils.metrics import fitness from utils.metrics import fitness
from utils.segment.general import scale_image
# Settings # Settings
RANK = int(os.getenv('RANK', -1)) RANK = int(os.getenv('RANK', -1))
@ -52,120 +51,6 @@ class Colors:
colors = Colors() # create instance for 'from utils.plots import colors' colors = Colors() # create instance for 'from utils.plots import colors'
def check_pil_font(font=FONT, size=10):
# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
font = Path(font)
font = font if font.exists() else (CONFIG_DIR / font.name)
try:
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
except Exception: # download if missing
try:
check_font(font)
return ImageFont.truetype(str(font), size)
except TypeError:
check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
except URLError: # not online
return ImageFont.load_default()
class Annotator:
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
self.pil = pil or non_ascii
if self.pil: # use PIL
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font,
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
else: # use cv2
self.im = im
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
# Add one xyxy box to image with label
if self.pil or not is_ascii(label):
self.draw.rectangle(box, width=self.lw, outline=color) # box
if label:
w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0
# _, _, w, h = self.font.getbbox(label) # text width, height (New)
outside = box[1] - h >= 0 # label fits outside box
self.draw.rectangle(
(box[0], box[1] - h if outside else box[1], box[0] + w + 1,
box[1] + 1 if outside else box[1] + h + 1),
fill=color,
)
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
else: # cv2
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
if label:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
outside = p1[1] - h >= 3
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
cv2.putText(self.im,
label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
0,
self.lw / 3,
txt_color,
thickness=tf,
lineType=cv2.LINE_AA)
def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):
"""Plot masks at once.
Args:
masks (tensor): predicted masks on cuda, shape: [n, h, w]
colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n]
im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1]
alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque
"""
if self.pil:
# convert to numpy first
self.im = np.asarray(self.im).copy()
if len(masks) == 0:
self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0
colors = colors[:, None, None] # shape(n,1,1,3)
masks = masks.unsqueeze(3) # shape(n,h,w,1)
masks_color = masks * (colors * alpha) # shape(n,h,w,3)
inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3)
im_gpu = im_gpu.flip(dims=[0]) # flip channel
im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
im_gpu = im_gpu * inv_alph_masks[-1] + mcs
im_mask = (im_gpu * 255).byte().cpu().numpy()
self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape)
if self.pil:
# convert im back to PIL and update draw
self.fromarray(self.im)
def rectangle(self, xy, fill=None, outline=None, width=1):
# Add rectangle to image (PIL-only)
self.draw.rectangle(xy, fill, outline, width)
def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
# Add text to image (PIL-only)
if anchor == 'bottom': # start y from font bottom
w, h = self.font.getsize(text) # text width, height
xy[1] += 1 - h
self.draw.text(xy, text, fill=txt_color, font=self.font)
def fromarray(self, im):
# Update self.im from a numpy array
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
def result(self):
# Return annotated image as array
return np.asarray(self.im)
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
""" """
x: Features to be visualized x: Features to be visualized
@ -265,7 +150,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None):
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
if paths: if paths:
annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
if len(targets) > 0: if len(targets) > 0:
ti = targets[targets[:, 0] == i] # image targets ti = targets[targets[:, 0] == i] # image targets
boxes = xywh2xyxy(ti[:, 2:6]).T boxes = xywh2xyxy(ti[:, 2:6]).T
@ -500,7 +385,8 @@ def plot_results(file='path/to/results.csv', dir=''):
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
y = data.values[:, j].astype('float') y = data.values[:, j].astype('float')
# y[y == 0] = np.nan # don't show zero values # y[y == 0] = np.nan # don't show zero values
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) # actual results
ax[i].plot(x, gaussian_filter1d(y, sigma=3), ':', label='smooth', linewidth=2) # smoothing line
ax[i].set_title(s[j], fontsize=12) ax[i].set_title(s[j], fontsize=12)
# if j in [8, 9, 10]: # share train and val loss y axes # if j in [8, 9, 10]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Image augmentation functions Image augmentation functions
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Dataloaders Dataloaders
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
Model validation metrics Model validation metrics
""" """

View File

@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg'
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
if paths: if paths:
annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
if len(targets) > 0: if len(targets) > 0:
idx = targets[:, 0] == i idx = targets[:, 0] == i
ti = targets[idx] # image targets ti = targets[idx] # image targets

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" """
PyTorch utils PyTorch utils
""" """

View File

@ -1,4 +1,4 @@
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
""" Utils to interact with the Triton Inference Server """ Utils to interact with the Triton Inference Server
""" """

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.