def demo_plt(img_id=0): net = build_ssd('test', 512, 11) # initialize SSD print(net) net.load_weights( '/media/sunwl/Datum/Projects/GraduationProject/Fused_sum_SSD_VHR_512_conv3_3/weights/v2_vhr.pth' ) testset = VHRDetection(VHRroot, ['test2'], None, AnnotationTransform_VHR) image = testset.pull_image(img_id) # image = cv2.imread('demos/047.jpg') rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # View the sampled input image before transform plt.figure(figsize=(10, 10)) plt.imshow(rgb_image) x = cv2.resize(rgb_image, (512, 512)).astype(np.float32) x -= (104.0, 117.0, 123.0) x = x.astype(np.float32) x = x[:, :, ::-1].copy() x = torch.from_numpy(x).permute(2, 0, 1) xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable if torch.cuda.is_available(): xx = xx.cuda() y = net(xx) # plt.figure(figsize=(10, 10)) colors = plt.cm.hsv(np.linspace(0, 1, 11)).tolist() plt.imshow(rgb_image.astype(np.uint8)) # plot the image for matplotlib currentAxis = plt.gca() detections = y.data # scale each detection back up to the image scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2) for i in range(detections.size(1)): j = 0 while detections[0, i, j, 0] >= 0.6: score = detections[0, i, j, 0] label_name = labels[i - 1] display_txt = '%s: %.2f' % (label_name, score) pt = (detections[0, i, j, 1:] * scale).cpu().numpy() color = colors[i] coords = (pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1 currentAxis.add_patch( plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2)) currentAxis.text(pt[0], pt[1], display_txt, bbox={ 'facecolor': color, 'alpha': 0.5 }) j += 1 plt.savefig( '/media/sunwl/Datum/Projects/GraduationProject/Fused_sum_SSD_VHR_512_conv3_3/outputs/vhr_{:03}.png' .format(img_id)) plt.show()
def demo_cv2(img_id=0): net = build_msc('test', 11) # initialize SSD print(net) # net.load_weights('/media/sunwl/Datum/Projects/GraduationProject/Multi_Scale_CNN_512/weights/v2_vhr.pth') net.load_weights( '/media/sunwl/Datum/Projects/GraduationProject/Multi_Scale_CNN_512/weights/msc512_vhr_80000.pth' ) testset = VHRDetection(VHRroot, ['test2'], None, AnnotationTransform_VHR) image = testset.pull_image(img_id) # image = cv2.imread('demos/089.jpg') rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) x = cv2.resize(rgb_image, (512, 512)).astype(np.float32) x -= (104.0, 117.0, 123.0) x = x.astype(np.float32) x = x[:, :, ::-1].copy() x = torch.from_numpy(x).permute(2, 0, 1) xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable if torch.cuda.is_available(): xx = xx.cuda() y = net(xx) colors = plt.cm.hsv(np.linspace(0, 1, 11)).tolist() detections = y.data # scale each detection back up to the image scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) im2show = np.copy(bgr_image) for i in range(detections.size(1)): j = 0 while detections[0, i, j, 0] >= 0.6: score = detections[0, i, j, 0] label_name = labels[i - 1] display_txt = '%s: %.2f' % (label_name, score) pt = (detections[0, i, j, 1:] * scale).cpu().numpy() color = colors[i] color = [int(c * 255) for c in color[:3]] coords = pt[0], pt[1], pt[2], pt[3] cv2.rectangle(im2show, coords[0:2], coords[2:4], color, thickness=2) cv2.putText(im2show, display_txt, (int(coords[0]), int(coords[1]) - 3), cv2.FONT_HERSHEY_PLAIN, 1.0, color, thickness=1) j += 1 cv2.imshow('original', bgr_image) cv2.imshow('demo', im2show) cv2.imwrite( os.path.join( '/media/sunwl/Datum/Projects/GraduationProject/Multi_Scale_CNN_512', "outputs", "{:03d}.jpg".format(img_id)), im2show) cv2.waitKey(0)
coords = (pt[0], pt[1], pt[2], pt[3]) pred_num += 1 with open(filename, mode='a') as f: f.write( str(pred_num) + ' label: ' + label_name + ' score: ' + str(score) + ' ' + ' || '.join(str(c) for c in coords) + '\n') j += 1 if __name__ == '__main__': # load net num_classes = len(VHR_CLASSES) + 1 # +1 background net = build_ssd('test', 300, num_classes) # initialize SSD net.load_state_dict(torch.load(args.trained_model)) net.eval() print('Finished loading model!') # load data testset = VHRDetection(args.vhr_root, ['test2'], None, AnnotationTransform_VHR()) if args.cuda: net = net.cuda() cudnn.benchmark = True # evaluation test_net(args.save_folder, net, args.cuda, testset, BaseTransform(net.size, (104, 117, 123)), thresh=args.visual_threshold)
def evaluate_detections(box_list, output_dir, dataset): write_vhr_results_file(box_list, dataset) do_python_eval(output_dir) if __name__ == '__main__': # load net num_classes = len(VHR_CLASSES) + 1 # +1 background net = build_ssd('test', 300, num_classes) # initialize SSD net.load_state_dict(torch.load(args.trained_model)) net.eval() print('Finished loading model!') print(net) # load data dataset = VHRDetection(args.vhr_root, ['test2'], BaseTransform(300, dataset_mean), AnnotationTransform_VHR()) if args.cuda: net = net.cuda() cudnn.benchmark = True # evaluation test_net(args.save_folder, net, args.cuda, dataset, BaseTransform(net.size, dataset_mean), args.top_k, 300, thresh=args.confidence_threshold)
def train(): net.train() # loss counters loc_loss = 0 # epoch conf_loss = 0 epoch = 0 print('Loading Dataset...') dataset = VHRDetection(args.vhr_root, train_sets, SSDAugmentation(stdn_dim, means), AnnotationTransform_VHR()) epoch_size = len(dataset) // args.batch_size print('Training SSD on', dataset.name) step_index = 0 if args.visdom: # initialize visdom loss plot lot = viz.line(X=torch.zeros((1, )).cpu(), Y=torch.zeros((1, 3)).cpu(), opts=dict(xlabel='Iteration', ylabel='Loss', title='Current SSD Training Loss', legend=['Loc Loss', 'Conf Loss', 'Loss'])) epoch_lot = viz.line(X=torch.zeros((1, )).cpu(), Y=torch.zeros((1, 3)).cpu(), opts=dict( xlabel='Epoch', ylabel='Loss', title='Epoch SSD Training Loss', legend=['Loc Loss', 'Conf Loss', 'Loss'])) batch_iterator = None data_loader = data.DataLoader(dataset, batch_size, num_workers=args.num_workers, shuffle=True, collate_fn=detection_collate_VHR, pin_memory=True) for iteration in range(args.start_iter, max_iter): if (not batch_iterator) or (iteration % epoch_size == 0): # create batch iterator batch_iterator = iter(data_loader) if iteration in stepvalues: step_index += 1 adjust_learning_rate(optimizer, args.gamma, step_index) if args.visdom: viz.line( X=torch.ones((1, 3)).cpu() * epoch, Y=torch.Tensor([loc_loss, conf_loss, loc_loss + conf_loss ]).unsqueeze(0).cpu() / epoch_size, win=epoch_lot, update='append') # reset epoch loss counters loc_loss = 0 conf_loss = 0 epoch += 1 # load train data images, targets = next(batch_iterator) if args.cuda: images = Variable(images.cuda()) targets = [ Variable(anno.cuda(), volatile=True) for anno in targets ] else: images = Variable(images) targets = [Variable(anno, volatile=True) for anno in targets] # forward t0 = time.time() out = net(images) # backprop optimizer.zero_grad() loss_l, loss_c = criterion(out, targets) loss = loss_l + loss_c loss.backward() optimizer.step() t1 = time.time() loc_loss += loss_l.data[0] conf_loss += loss_c.data[0] if iteration % 10 == 0: print('Timer: %.4f sec.' % (t1 - t0)) print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ') if args.visdom and args.send_images_to_visdom: random_batch_index = np.random.randint(images.size(0)) viz.image(images.data[random_batch_index].cpu().numpy()) if args.visdom: viz.line(X=torch.ones((1, 3)).cpu() * iteration, Y=torch.Tensor([ loss_l.data[0], loss_c.data[0], loss_l.data[0] + loss_c.data[0] ]).unsqueeze(0).cpu(), win=lot, update='append') # hacky fencepost solution for 0th epoch plot if iteration == 0: viz.line(X=torch.zeros((1, 3)).cpu(), Y=torch.Tensor( [loc_loss, conf_loss, loc_loss + conf_loss]).unsqueeze(0).cpu(), win=epoch_lot, update=True) if iteration % 5000 == 0: print('Saving state, iter:', iteration) torch.save(msc_net.state_dict(), 'weights/msc512_vhr_' + repr(iteration) + '.pth') torch.save(msc_net.state_dict(), args.save_folder + '' + args.version + '.pth')