def train(train_loader, model, object_idt, classifier, criterion, optimizer, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode model.train() object_idt.train() classifier.train() end = time.time() for i, (input, target, path) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) target = target.cuda(async=True) input_var = torch.autograd.Variable(input) target_var = torch.autograd.Variable(target) # compute output output_conv = model(input_var) obj_id_batch = [] for j in range(len(path)): objects, class_names = detect(args.cfg, args.weight, path[j],args.namesfile) obj_hot_vector = get_hot_vector(objects, class_names) obj_id_batch.append(obj_hot_vector) t = torch.autograd.Variable(torch.FloatTensor(obj_id_batch)) output_idt = object_idt(t) output = classifier(output_conv,output_idt) loss = criterion(output, target_var) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) losses.update(loss, input.size(0)) top1.update(prec1, input.size(0)) top5.update(prec5, input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, object_idt, classifier, criterion): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() object_idt.eval() classifier.eval() end = time.time() for i, (input, target, path) in enumerate(val_loader): target = target.cuda(async=True) with torch.no_grad(): # compute output output_conv = model(input) obj_id_batch = [] for j in range(len(path)): objects, class_names = detect(args.cfg, args.weight, path[j],args.namesfile) obj_hot_vector = get_hot_vector(objects, class_names) obj_id_batch.append(obj_hot_vector) t = torch.autograd.Variable(torch.FloatTensor(obj_id_batch)) output_idt = object_idt(t) output = classifier(output_conv,output_idt) loss = criterion(output_conv, target) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) losses.update(loss, input.size(0)) top1.update(prec1, input.size(0)) top5.update(prec5, input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5)) print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}' .format(top1=top1, top5=top5)) return top1.avg
def get_hot_vector(objects, class_names): v = [0] * 80 indices = [class_names.index(x) for x in objects] for i in indices: v[i] = 1 return v scene_dict = {} scene_obj = defaultdict(list) for class_name in os.listdir(data_dir): for img_name in os.listdir(os.path.join(data_dir, class_name)): img_dir = os.path.join(data_dir, class_name, img_name) objects, class_names = detect(args.cfg, args.weight, img_dir, args.namesfile) obj_hot_vector = get_hot_vector(objects, class_names) if class_name not in scene_dict: scene_dict[class_name] = obj_hot_vector else: scene_dict[class_name] = map(lambda x, y: x + y, scene_dict[class_name], obj_hot_vector) indices = sorted(range(len(scene_dict[class_name])), key=lambda i: scene_dict[class_name][i], reverse=True)[:10] for ind in indices: scene_obj[class_name].append(class_names[ind])
from yolov3.utils import parser from yolov3.detect import detect parser = parser.get_parser_from_arguments() detect(parser)