1、與YOLO V4的區別


1.1、輸入端的區別
1 Mosaic資料增強

-
1、豐富資料集:隨機使用4張圖片,隨機縮放,再隨機分佈進行拼接,大大豐富了檢測資料集,特別是隨機縮放增加了很多小目標,讓網路的魯棒性更好。
-
2、減少GPU:可能會有人說,隨機縮放,普通的資料增強也可以做,但作者考慮到很多人可能只有一個GPU,因此Mosaic增強訓練時,可以直接計算4張圖片的資料,使得Mini-batch大小並不需要很大,一個GPU就可以達到比較好的效果。
2 自適應錨框計算

3 自適應圖片縮放


原始縮放尺寸是416×416,都除以原始影象的尺寸後,可以得到0.52,和0.69兩個縮放係數,選擇小的縮放係數0.52。


1.2、Backbone的區別
1 Focus結構

classFocus(nn.Module):
# Focus wh information into c-space
def__init__(self, c1, c2, k=1):
super(Focus, self).__init__()
self.conv = Conv(c1 *
4
, c2, k,
1
)
defforward(self, x):# x(b,c,w,h) -> y(b,4c,w/2,h/2)
return
self.conv(torch.cat([x[..., ::
2
, ::
2
], x[...,
1
::
2
, ::
2
], x[..., ::
2
,
1
::
2
], x[...,
1
::
2
,
1
::
2
]],
1
))
2 CSP結構

classConv(nn.Module):
# Standard convolution
def__init__(self, c1, c2, k=1, s=1, g=1, act=True):# ch_in, ch_out, kernel, stride, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, k //
2
, groups=g, bias=
False
)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(
0.1
, inplace=
True
)
if
act
else
nn.Identity()
defforward(self, x):
return
self.act(self.bn(self.conv(x)))
deffuseforward(self, x):
return
self.act(self.conv(x))
classBottleneck(nn.Module):
# Standard bottleneck
def__init__(self, c1, c2, shortcut=True, g=1, e=0.5):# ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e)
# hidden channels
self.cv1 = Conv(c1, c_,
1
,
1
)
self.cv2 = Conv(c_, c2,
3
,
1
, g=g)
self.add = shortcut
and
c1 == c2
defforward(self, x):
return
x + self.cv2(self.cv1(x))
if
self.add
else
self.cv2(self.cv1(x))
classBottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def__init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):# ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e)
# hidden channels
self.cv1 = Conv(c1, c_,
1
,
1
)
self.cv2 = nn.Conv2d(c1, c_,
1
,
1
, bias=
False
)
self.cv3 = nn.Conv2d(c_, c_,
1
,
1
, bias=
False
)
self.cv4 = Conv(c2, c2,
1
,
1
)
self.bn = nn.BatchNorm2d(
2
* c_)
# applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(
0.1
, inplace=
True
)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=
1.0
)
for
_
in
range(n)])
defforward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return
self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=
1
))))
1.3、Neck的區別

1.4、輸出端的區別
1 Bounding box損失函式


defcompute_loss(p, targets, model):# predictions, targets, model
ft = torch.cuda.FloatTensor
if
p[
0
].is_cuda
else
torch.Tensor
lcls, lbox, lobj = ft([
0
]), ft([
0
]), ft([
0
])
tcls, tbox, indices, anchors = build_targets(p, targets, model)
# targets
h = model.hyp
# hyperparameters
red =
'mean'# Loss reduction (sum or mean)
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h[
'cls_pw'
]]), reduction=red)
BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h[
'obj_pw'
]]), reduction=red)
# class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=
0.0
)
# focal loss
g = h[
'fl_gamma'
]
# focal loss gamma
if
g >
0
:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
# per output
nt =
0# targets
for
i, pi
in
enumerate(p):
# layer index, layer predictions
b, a, gj, gi = indices[i]
# image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[...,
0
])
# target obj
nb = b.shape[
0
]
# number of targets
if
nb:
nt += nb
# cumulative targets
ps = pi[b, a, gj, gi]
# prediction subset corresponding to targets
# GIoU
pxy = ps[:, :
2
].sigmoid() *
2.
-
0.5
pwh = (ps[:,
2
:
4
].sigmoid() *
2
) **
2
* anchors[i]
pbox = torch.cat((pxy, pwh),
1
)
# predicted box
giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=
False
, GIoU=
True
)
# giou(prediction, target)
lbox += (
1.0
- giou).sum()
if
red ==
'sum'else
(
1.0
- giou).mean()
# giou loss
# Obj
tobj[b, a, gj, gi] = (
1.0
- model.gr) + model.gr * giou.detach().clamp(
0
).type(tobj.dtype)
# giou ratio
# Class
if
model.nc >
1
:
# cls loss (only if multiple classes)
t = torch.full_like(ps[:,
5
:], cn)
# targets
t[range(nb), tcls[i]] = cp
lcls += BCEcls(ps[:,
5
:], t)
# BCE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('.5g ' * 4 % tuple(x) + 'n') for x in torch.cat((txy[i], twh[i]), 1)]
lobj += BCEobj(pi[...,
4
], tobj)
# obj loss
lbox *= h[
'giou'
]
lobj *= h[
'obj'
]
lcls *= h[
'cls'
]
bs = tobj.shape[
0
]
# batch size
if
red ==
'sum'
:
g =
3.0# loss gain
lobj *= g / bs
if
nt:
lcls *= g / nt / model.nc
lbox *= g / nt
loss = lbox + lobj + lcls
return
loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
2 NMS非極大值抑制

遮擋重疊
2、YOLOv5社交距離專案
-
使用目標檢測演算法檢測影片流中的所有人,得到位置資訊和質心位置;
-
計算所有檢測到的人質心之間的相互距離;
-
設定安全距離,計算每個人之間的距離對,檢測兩個人之間的距離是否小於N個畫素,小於則處於安全距離,反之則不處於。


import
argparse
from
utils.datasets
import
*
from
utils.utils
import
*
defdetect(save_img=False):
out, source, weights, view_img, save_txt, imgsz =
opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source ==
'0'or
source.startswith(
'rtsp'
)
or
source.startswith(
'http'
)
or
source.endswith(
'.txt'
)
# Initialize
device = torch_utils.select_device(opt.device)
if
os.path.exists(out):
shutil.rmtree(out)
# delete output folder
os.makedirs(out)
# make new output folder
half = device.type !=
'cpu'# half precision only supported on CUDA
# 下載模型
google_utils.attempt_download(weights)
# 載入權重
model = torch.load(weights, map_location=device)[
'model'
].float()
# torch.save(torch.load(weights, map_location=device), weights) # update model if SourceChangeWarning
# model.fuse()
# 設定模型為推理模式
model.to(device).eval()
if
half:
model.half()
# to FP16
# Second-stage classifier
classify =
False
if
classify:
modelc = torch_utils.load_classifier(name=
'resnet101'
, n=
2
)
# initialize
modelc.load_state_dict(torch.load(
'weights/resnet101.pt'
, map_location=device)[
'model'
])
# load weights
modelc.to(device).eval()
# 設定 Dataloader
vid_path, vid_writer =
None
,
None
if
webcam:
view_img =
True
torch.backends.cudnn.benchmark =
True# set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else
:
save_img =
True
dataset = LoadImages(source, img_size=imgsz)
# 獲取檢測類別的標籤名稱
names = model.names
if
hasattr(model,
'names'
)
else
model.modules.names
# 定義顏色
colors = [[random.randint(
0
,
255
)
for
_
in
range(
3
)]
for
_
in
range(len(names))]
# 開始推理
t0 = time.time()
# 初始化一張全為0的圖片
img = torch.zeros((
1
,
3
, imgsz, imgsz), device=device)
_ = model(img.half()
if
half
else
img)
if
device.type !=
'cpu'elseNone
for
path, img, im0s, vid_cap
in
dataset:
img = torch.from_numpy(img).to(device)
img = img.half()
if
half
else
img.float()
# uint8 to fp16/32
img /=
255.0# 0 - 255 to 0.0 - 1.0
if
img.ndimension() ==
3
:
img = img.unsqueeze(
0
)
# 預測結果
t1 = torch_utils.time_synchronized()
pred = model(img, augment=opt.augment)[
0
]
# 使用NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, fast=
True
, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = torch_utils.time_synchronized()
# 進行分類
if
classify:
pred = apply_classifier(pred, modelc, img, im0s)
people_coords = []
# 處理預測得到的檢測目標
for
i, det
in
enumerate(pred):
if
webcam:
p, s, im0 = path[i],
'%g: '
% i, im0s[i].copy()
else
:
p, s, im0 = path,
''
, im0s
save_path = str(Path(out) / Path(p).name)
s +=
'%gx%g '
% img.shape[
2
:]
# print string
gn = torch.tensor(im0.shape)[[
1
,
0
,
1
,
0
]]
# normalization gain whwh
if
det
isnotNoneand
len(det):
# 把boxes resize到im0的size
det[:, :
4
] = scale_coords(img.shape[
2
:], det[:, :
4
], im0.shape).round()
# 列印結果
for
c
in
det[:,
-1
].unique():
n = (det[:,
-1
] == c).sum()
# detections per class
s +=
'%g %ss, '
% (n, names[int(c)])
# add to string
# 書寫結果
for
*xyxy, conf, cls
in
det:
if
save_txt:
# xyxy2xywh ==> 把預測得到的座標結果[x1, y1, x2, y2]轉換為[x, y, w, h]其中 xy1=top-left, xy2=bottom-right
xywh = (xyxy2xywh(torch.tensor(xyxy).view(
1
,
4
)) / gn).view(
-1
).tolist()
# normalized xywh
with
open(save_path[:save_path.rfind(
'.'
)] +
'.txt'
,
'a'
)
as
file:
file.write((
'%g '
*
5
+
'n'
) % (cls, *xywh))
# label format
if
save_img
or
view_img:
# Add bbox to image
label =
'%s %.2f'
% (names[int(cls)], conf)
if
label
isnotNone
:
if
(label.split())[
0
] ==
'person'
:
# print(xyxy)
people_coords.append(xyxy)
# plot_one_box(xyxy, im0, line_thickness=3)
plot_dots_on_people(xyxy, im0)
# 透過people_coords繪製people之間的連線線
# 這裡主要分為"Low Risk "和"High Risk"
distancing(people_coords, im0, dist_thres_lim=(
200
,
250
))
# Print time (inference + NMS)
print(
'%sDone. (%.3fs)'
% (s, t2 - t1))
# Stream results
if
view_img:
cv2.imshow(p, im0)
if
cv2.waitKey(
1
) == ord(
'q'
):
# q to quit
raise
StopIteration
# Save results (image with detections)
if
save_img:
if
dataset.mode ==
'images'
:
cv2.imwrite(save_path, im0)
else
:
if
vid_path != save_path:
# new video
vid_path = save_path
if
isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release()
# release previous video writer
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))
vid_writer.write(im0)
if
save_txt
or
save_img:
print(
'Results saved to %s'
% os.getcwd() + os.sep + out)
if
platform ==
'darwin'
:
# MacOS
os.system(
'open '
+ save_path)
print(
'Done. (%.3fs)'
% (time.time() - t0))
if
__name__ ==
'__main__'
:
parser = argparse.ArgumentParser()
parser.add_argument(
'--weights'
, type=str, default=
'./weights/yolov5s.pt'
, help=
'model.pt path'
)
parser.add_argument(
'--source'
, type=str, default=
'./inference/videos/'
, help=
'source'
)
# file/folder, 0 for webcam
parser.add_argument(
'--output'
, type=str, default=
'./inference/output'
, help=
'output folder'
)
# output folder
parser.add_argument(
'--img-size'
, type=int, default=
640
, help=
'inference size (pixels)'
)
parser.add_argument(
'--conf-thres'
, type=float, default=
0.4
, help=
'object confidence threshold'
)
parser.add_argument(
'--iou-thres'
, type=float, default=
0.5
, help=
'IOU threshold for NMS'
)
parser.add_argument(
'--fourcc'
, type=str, default=
'mp4v'
, help=
'output video codec (verify ffmpeg support)'
)
parser.add_argument(
'--device'
, default=
'0'
, help=
'cuda device, i.e. 0 or 0,1,2,3 or cpu'
)
parser.add_argument(
'--view-img'
, action=
'store_true'
, help=
'display results'
)
parser.add_argument(
'--save-txt'
, action=
'store_true'
, help=
'save results to *.txt'
)
parser.add_argument(
'--classes'
, nargs=
'+'
, type=int, help=
'filter by class'
)
parser.add_argument(
'--agnostic-nms'
, action=
'store_true'
, help=
'class-agnostic NMS'
)
parser.add_argument(
'--augment'
, action=
'store_true'
, help=
'augmented inference'
)
opt = parser.parse_args()
opt.img_size = check_img_size(opt.img_size)
print(opt)
with
torch.no_grad():
detect()
參考

宣告:轉載請說明出處

點“在看”給我一朵小黃花唄
