본문 바로가기

개발/Deep Learning

스파르타 코딩클럽 - 2주차 [이미지 처리로 시작하는 딥러닝]

#1 이미지 전처리 및 후처리

h, w, c = img.shape

img = cv2.resize(img, dsize=(500, int(h / w * 500)))

img = img[162:513, 185:428]

MEAN_VALUE = [103.939, 116.779, 123.680]
blob = cv2.dnn.blobFromImage(img, mean=MEAN_VALUE)

net.setInput(blob)
output = net.forward()

output = output.squeeze().transpose((1, 2, 0))
output += MEAN_VALUE

output = np.clip(output, 0, 255)
output = output.astype('uint8')

 

 

#2 이미지 반으로 나누어 각각 모델 적용하기

import cv2
import numpy as np

net = cv2.dnn.readNetFromTorch('week2/models/instance_norm/mosaic.t7')
net2 = cv2.dnn.readNetFromTorch('week2/models/instance_norm/candy.t7')

img = cv2.imread('week2/imgs/03.jpg')

h, w, c = img.shape

img = cv2.resize(img, dsize=(500, int(h / w * 500)))

MEAN_VALUE = [103.939, 116.779, 123.680]
blob = cv2.dnn.blobFromImage(img, mean=MEAN_VALUE)

net.setInput(blob)
output = net.forward()

output = output.squeeze().transpose((1, 2, 0))

output += MEAN_VALUE
output = np.clip(output, 0, 255)
output = output.astype('uint8')

net2.setInput(blob)
output2 = net2.forward()

output2 = output2.squeeze().transpose((1, 2, 0))

output2 += MEAN_VALUE
output2 = np.clip(output2, 0, 255)
output2 = output2.astype('uint8')

output = output[:, 0:250]
output2 = output2[:, 250:500]

output3 = np.concatenate([output, output2], axis=1)

cv2.imshow('img', img)
cv2.imshow('output', output)
cv2.imshow('output2', output2)
cv2.imshow('output3', output3)
cv2.waitKey(0)

 

 

#3 일정 부분만 crop 해서 추론하기

import cv2
import numpy as np

# Model Load
net = cv2.dnn.readNetFromTorch('models/instance_norm/feathers.t7')

# Image Load
img = cv2.imread('imgs/hw.jpg')

# Loaded Image's Shape
h, w, c = img.shape

# Image Resizing
img = cv2.resize(img, dsize=(1000, int(h / w * 1000)))

# Cropped_Image
inner_img = img[113:287, 375:634]

MEAN_VALUE = [103.939, 116.779, 123.680]
blob = cv2.dnn.blobFromImage(inner_img, mean=MEAN_VALUE)

net.setInput(blob)
output = net.forward()

output = output.squeeze().transpose((1, 2, 0))

output += MEAN_VALUE
output = np.clip(output, 0, 255)
output = output.astype('uint8')

# Composite the converted image to the original image
img[112:288, 375:635] = output

# Show result image
cv2.imshow('result_img', img)
cv2.waitKey(0)

 

 

#4 이미지 가로로 잘라 반반 적용하기

import cv2
import numpy as np

# Model Load
net = cv2.dnn.readNetFromTorch('models/instance_norm/feathers.t7')
net2 = cv2.dnn.readNetFromTorch('models/instance_norm/mosaic.t7')
net3 = cv2.dnn.readNetFromTorch('models/eccv16/la_muse.t7')

# Image Load
img = cv2.imread('imgs/03.jpg')

# Loaded Image's Shape
h, w, c = img.shape

# Image Resizing
img = cv2.resize(img, dsize=(1000, int(h / w * 1000)))

# Check Image's Shape
print(img.shape)

MEAN_VALUE = [103.939, 116.779, 123.680]
blob = cv2.dnn.blobFromImage(img, mean=MEAN_VALUE)

# net 모델 적용
net.setInput(blob)
output = net.forward()

output = output.squeeze().transpose((1, 2, 0))

output += MEAN_VALUE
output = np.clip(output, 0, 255)
output = output.astype('uint8')

# net2 모델 적용
net2.setInput(blob)
output2 = net2.forward()

output2 = output2.squeeze().transpose((1, 2, 0))

output2 += MEAN_VALUE
output2 = np.clip(output2, 0, 255)
output2 = output2.astype('uint8')

# Image Slicing
output = output[0:315, :]
output2 = output2[315:631, :]

# Combine output with output2
output3 = np.concatenate([output, output2], axis=0)

cv2.imshow('result_img', output3)
cv2.waitKey(0)

 

 

#4-1 가운데가 아닌 곳에서 나누기

output = output[0:100, :]
output2 = output2[100:631]

output3 = np.concatenate([output, output2], axis=0)

cv2.imshow('result_img', output3)
cv2.waitKey(0)

 

 

#4-2 3개로 나누기

net3.setInput(blob)
output3 = net3.forward()

output3 = output3.squeeze().transpose((1, 2, 0))

output3 += MEAN_VALUE
output3 = np.clip(output3, 0, 255)
output3 = output3.astype('uint8')

output = output[0:200, :]
output2 = output2[200:400, :]
output3 = output3[400:631, :]

output4 = np.concatenate([output, output2, output3], axis=0)

cv2.imshow('result_image', output4)
cv2.waitKey(0)

 

 

#5 동영상에 적용해보기

import cv2
import numpy as np

# Model Load
net = cv2.dnn.readNetFromTorch('week2/models/instance_norm/la_muse.t7')
net2 = cv2.dnn.readNetFromTorch('week2/models/instance_norm/starry_night.t7')

# Video Load
cap = cv2.VideoCapture('week2/imgs/03.mp4')

while True:
    ret, img = cap.read()

    # Loaded Image's Shape
    h, w, c = img.shape

    # Image Resizing
    img = cv2.resize(img, dsize=(1000, int(h / w * 1000)))

    # Check Image's Shape
    print(img.shape)

    MEAN_VALUE = [103.939, 116.779, 123.680]
    blob = cv2.dnn.blobFromImage(img, mean=MEAN_VALUE)

    # net 모델 적용
    net.setInput(blob)
    output = net.forward()

    output = output.squeeze().transpose((1, 2, 0))

    output += MEAN_VALUE
    output = np.clip(output, 0, 255)
    output = output.astype('uint8')

    # net2 모델 적용
    net2.setInput(blob)
    output2 = net2.forward()

    output2 = output2.squeeze().transpose((1, 2, 0))

    output2 += MEAN_VALUE
    output2 = np.clip(output2, 0, 255)
    output2 = output2.astype('uint8')

    output = output[0:280, :]
    output2 = output2[280:562, :]

    output3 = np.concatenate([output, output2], axis=0)

    cv2.imshow('result_img', output3)

    if cv2.waitKey(10) == ord('q'):
        break

 

 

#6 느낀점

지난 주와 마찬가지로 리마인드 하는 느낌으로 강의를 들었다.

강사님께서 설명 완전 잘해주심 bb