def get_prediction(image_bytes): # 异常处理:防止传入非图片的东西 try: weights_path = "./Alexnet.pth" class_json_path = "./class_indices.json" assert os.path.exists(weights_path), "weights path does not exist..." assert os.path.exists(class_json_path), "class json path does not exist..." # select device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) # create model model = AlexNet(num_classes=5) # load model weights model.load_state_dict(torch.load(weights_path, map_location=device)) model.to(device) model.eval() # load class info json_file = open(class_json_path, 'rb') class_indict = json.load(json_file) tensor = transform_image(image_bytes=image_bytes) outputs = torch.softmax(model.forward(tensor).squeeze(), dim=0) # detach去除梯度信息 prediction = outputs.detach().cpu().numpy() # < 左对齐 template = "class:{:<15} probability:{:.3f}" index_pre = [(class_indict[str(index)], float(p)) for index, p in enumerate(prediction)] # sort probability index_pre.sort(key=lambda x: x[1], reverse=True) text = [template.format(k, v) for k, v in index_pre] return_info = {"result": text} except Exception as e: return_info = {"result": [str(e)]} return return_info
def train(pertrained=False, resume_file=None): if pertrained: from model import alexnet net = alexnet(pretrained=True, num_classes=NUMBER_CLASSES) else: from model import AlexNet net = AlexNet(num_classes=NUMBER_CLASSES) valid_precision = 0 policies = net.parameters() optimizer = optim.SGD(policies, lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY) train_log = open( "logs/train_logs_{}.log".format( time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())), "w") valid_log = open( "logs/valid_logs_{}.log".format( time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())), "w") train_log.write("{}\t{}\t{}\n".format("epoch", "losses ", "correct")) valid_log.write("{}\t{}\t{}\n".format("epoch", "losses ", "correct")) # 恢复训练 if resume_file: if os.path.isfile(resume_file): print(("=> loading checkpoint '{}'".format(resume_file))) checkpoint = torch.load(resume_file) start_epoch = checkpoint['epoch'] net.load_state_dict(checkpoint['model_state_dict']) print(("=> loaded checkpoint '{}' (epoch {})".format( resume_file, checkpoint['epoch']))) else: start_epoch = 0 print(("=> no checkpoint found at '{}'".format(resume_file))) # valid_precision = valid(net) for epoch in range(start_epoch, EPOCHES): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() correct = AverageMeter() end = time.time() optimizer = adjust_learning_rate(optimizer, epoch, LR, LR_steps, WEIGHT_DECAY) for i_batch, sample_batched in enumerate(train_dataloader): # measure data loading time data_time.update(time.time() - end) inputs, labels = sample_batched if CUDA_AVALIABLE: outputs = net.forward(inputs.cuda()) labels = labels.long().flatten().cuda() else: outputs = net.forward(inputs) labels = labels.long().flatten() outputs = outputs.reshape([-1, NUMBER_CLASSES]) loss = criterion(outputs, labels) # 更新统计数据 losses.update(loss.item(), inputs.size(0)) _, predicted = torch.max(outputs.data, 1) # 计算准确率 correct.update( (predicted == labels.long()).sum().item() / len(labels), inputs.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i_batch % 10 == 0: print(('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format( epoch, i_batch, len(train_dataloader), batch_time=batch_time, data_time=data_time, loss=losses, top1=correct, lr=optimizer.param_groups[-1]['lr']))) train_log.write("{:5d}\t{:.5f}\t{:.5f}\n".format( epoch, losses.avg, correct.avg)) train_log.flush() if epoch % 1 == 0: valid_precision = valid(net, epoch, valid_log) # 保存网络 if (epoch > 0 and epoch % 10 == 0) or epoch == EPOCHES - 1: save_path = os.path.join( "models", "{:d}_{}_{:d}_{:d}_{:.5f}.pt".format(int(time.time()), "alexnet", epoch, BATCHSIZE, valid_precision)) print("[INFO] Save weights to " + save_path) torch.save( { 'epoch': epoch, 'model_state_dict': net.state_dict(), 'optimizer_state_dir': optimizer.state_dict, 'loss': loss }, save_path) train_log.close() valid_log.close()
model.train() lamb = adaptation_factor(epoch * 1.0 / max_epoch) current_lr, _ = lr_schedule(opt, epoch, lr_mult), lr_schedule(opt_D, epoch, lr_mult_D) if epoch % s_loader_len == 0: s_loader_epoch = iter(s_loader) if epoch % t_loader_len == 0: t_loader_epoch = iter(t_loader) xs, ys = s_loader_epoch.next() xt, yt = t_loader_epoch.next() if cuda: xs, ys, xt, yt = xs.cuda(), ys.cuda(), xt.cuda(), yt.cuda() # forward s_feature, s_score, s_pred = model.forward(xs) t_feature, t_score, t_pred = model.forward(xt) C_loss = model.closs(s_score, ys) if da: s_logit, t_logit = model.forward_D(s_feature), model.forward_D( t_feature) G_loss, D_loss, semantic_loss = model.adloss(s_logit, t_logit, s_feature, t_feature, ys, t_pred) Dregloss, Gregloss = model.regloss() F_loss = C_loss + Gregloss + lamb * G_loss + lamb * semantic_loss D_loss = D_loss + Dregloss opt_D.zero_grad() D_loss.backward(retain_graph=True)