반응형
Notice
Recent Posts
Recent Comments
Link
관리 메뉴

bro's coding

YOLO사용 사물 인식 in python 본문

[AI]/YOLO

YOLO사용 사물 인식 in python

givemebro 2020. 7. 2. 16:36
반응형
import cv2
import numpy as np
import time

# if not cv2.ocl.haveOpenCL():
#     print("에러 : OpenCL을 사용할 수 없는 시스템입니다")
#
# context = cv2.create
# cv2.cor
#
cv2.ocl.setUseOpenCL(True)
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
# DNN_BACKEND_OPENCV : 인텔 내장 그래픽 사용
# net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
# net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL)
# DNN_BACKEND_CUDA : NVIDIA CUDA 사용
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
classes = []
with open("coco.names", "r") as f:
    classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))

camera = cv2.VideoCapture(0)
while True:
    # grab the current frame
    start_time = time.time()
    (grabbed, frame) = camera.read()

    frame = cv2.flip(frame, 1)
    height, width, channels = frame.shape
    frame = cv2.UMat(frame)


    blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
    net.setInput(blob)
    outs = net.forward(output_layers)

    class_ids = []
    confidences = []
    boxes = []
    for out in outs:
        for detection in out:
            scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            if confidence > 0.5:
                # Object detected
                center_x = int(detection[0] * width)
                center_y = int(detection[1] * height)
                w = int(detection[2] * width)
                h = int(detection[3] * height)
                # 좌표
                x = int(center_x - w / 2)
                y = int(center_y - h / 2)
                boxes.append([x, y, w, h])
                confidences.append(float(confidence))
                class_ids.append(class_id)

    indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

    font = cv2.FONT_HERSHEY_PLAIN
    for i in range(len(boxes)):
        if i in indexes:
            x, y, w, h = boxes[i]
            label = str(classes[class_ids[i]])
            color = colors[i]
            cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
            cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
    cv2.imshow("Image", frame)
    print(time.time()-start_time)
    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()

반응형
Comments