141 lines
4.5 KiB
Python
141 lines
4.5 KiB
Python
|
import torch
|
||
|
import cv2
|
||
|
import numpy as np
|
||
|
from backbones import iresnet50,iresnet18,iresnet100
|
||
|
|
||
|
# print(torch.cuda.is_available())
|
||
|
def load_image(img_path):
|
||
|
img = cv2.imread(img_path)
|
||
|
#print(img)
|
||
|
#img2 = np.fliplr(img)
|
||
|
#print(img2.shape)
|
||
|
img = img.transpose((2, 0, 1))
|
||
|
#img2 = img2.transpose((2, 0, 1))
|
||
|
img = img[np.newaxis, :, :, :]
|
||
|
#img2 = img2[np.newaxis, :, :, :]
|
||
|
#img = np.concatenate((img,img2))
|
||
|
img = np.array(img, dtype=np.float32)
|
||
|
img -= 127.5
|
||
|
img /= 127.5
|
||
|
return img
|
||
|
|
||
|
def findEuclideanDistance(source_representation, test_representation):
|
||
|
euclidean_distance = source_representation - test_representation
|
||
|
euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))
|
||
|
euclidean_distance = np.sqrt(euclidean_distance)
|
||
|
return euclidean_distance
|
||
|
|
||
|
def findCosineDistance(source_representation, test_representation):
|
||
|
a = np.matmul(np.transpose(source_representation), test_representation)
|
||
|
b = np.sum(np.multiply(source_representation, source_representation))
|
||
|
c = np.sum(np.multiply(test_representation, test_representation))
|
||
|
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
|
||
|
|
||
|
def l2_normalize(x):
|
||
|
return x / np.sqrt(np.sum(np.multiply(x, x))-1)
|
||
|
|
||
|
def cosin_metric(x1, x2):
|
||
|
return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))
|
||
|
|
||
|
def get_file_list(file_name):
|
||
|
file1 = []
|
||
|
file2 = []
|
||
|
code = []
|
||
|
with open(file_name,"r") as f:
|
||
|
file_list = f.readlines()
|
||
|
for i in file_list:
|
||
|
data = i.replace("\n","").split(" ")
|
||
|
file1.append(data[0])
|
||
|
file2.append(data[1])
|
||
|
code.append(data[2])
|
||
|
return file1,file2,code
|
||
|
|
||
|
def load_list_images(file_list):
|
||
|
all = np.zeros((len(file_list),3,112,112),dtype=np.float32)
|
||
|
count = 0
|
||
|
for img_path in file_list:
|
||
|
img = cv2.imread(img_path)
|
||
|
img = img.transpose((2, 0, 1))
|
||
|
img = img[np.newaxis, :, :, :]
|
||
|
all[count] = img
|
||
|
count = count + 1
|
||
|
all -= 127.5
|
||
|
all /= 127.5
|
||
|
return all
|
||
|
|
||
|
def threshold_acc(distance,code):
|
||
|
thresholds = np.arange(0.5,2,0.1)
|
||
|
total = len(code)
|
||
|
max_acc = 0
|
||
|
threshold = 0
|
||
|
code_net = []
|
||
|
for i in thresholds:
|
||
|
for ds in distance:
|
||
|
if ds < i:
|
||
|
code_net.append(1)
|
||
|
else:
|
||
|
code_net.append(0)
|
||
|
right = np.sum(code==code_net)
|
||
|
#print(threshold, max_acc)
|
||
|
if right/total > max_acc:
|
||
|
max_acc = right / total
|
||
|
threshold = i
|
||
|
code_net = []
|
||
|
return threshold,max_acc
|
||
|
|
||
|
# if __name__=='__main__':
|
||
|
# img1 = load_image("D:\Download\out\output\Aaron_Peirsol\Aaron_Peirsol_0001.jpg")
|
||
|
# img1 = torch.from_numpy(img1)
|
||
|
# print(img1.shape)
|
||
|
# #print(img1)
|
||
|
# img2 = load_image("D:\Download\out\output\Aaron_Peirsol\Aaron_Peirsol_0002.jpg")
|
||
|
# img2 = torch.from_numpy(img2)
|
||
|
# model = iresnet100()
|
||
|
# model.load_state_dict(torch.load("./model/backbone100.pth",map_location="cpu"))
|
||
|
# #print(model)
|
||
|
# model.eval()
|
||
|
# with torch.no_grad():
|
||
|
# pred1 = model(img1)
|
||
|
# print(pred1.shape)
|
||
|
# #print(pred1)
|
||
|
# pred2 = model(img2)
|
||
|
# pred1 = pred1.numpy()
|
||
|
# print(pred1.shape)
|
||
|
# pred2 = pred2.numpy()
|
||
|
# print("EuclideanDistance is :"+str(findEuclideanDistance(l2_normalize(pred1),l2_normalize(pred2))))
|
||
|
# print(findCosineDistance(pred1[0],pred2[0]))
|
||
|
|
||
|
if __name__=='__main__':
|
||
|
model = iresnet100()
|
||
|
model.load_state_dict(torch.load("./model/backbone100.pth", map_location="cpu"))
|
||
|
model.eval()
|
||
|
file1, file2, code = get_file_list("pairs.txt")
|
||
|
code = np.array(code,dtype=np.int)
|
||
|
img = load_list_images(file1)
|
||
|
print(img.shape)
|
||
|
img = torch.from_numpy(img)
|
||
|
img2 = load_list_images(file2)
|
||
|
img2 = torch.from_numpy(img2)
|
||
|
batch_size = 64
|
||
|
now = 0
|
||
|
number = len(code)
|
||
|
distance = []
|
||
|
with torch.no_grad():
|
||
|
while now < number:
|
||
|
if now + batch_size < number:
|
||
|
pred1 = model(img[now:now + batch_size])
|
||
|
pred2 = model(img2[now:now + batch_size])
|
||
|
else:
|
||
|
pred1 = model(img[now:])
|
||
|
pred2 = model(img2[now:])
|
||
|
now = now + batch_size
|
||
|
pred1 = pred1.numpy()
|
||
|
pred2 = pred2.numpy()
|
||
|
print(pred1.shape)
|
||
|
for i in range(len(pred1)):
|
||
|
distance.append(findEuclideanDistance(l2_normalize(pred1[i]), l2_normalize(pred2[i])))
|
||
|
distance = np.array(distance)
|
||
|
print(distance[:100])
|
||
|
threshold,max_acc = threshold_acc(distance,code)
|
||
|
print(threshold,max_acc)
|