Beispiel #1
0
def main(args):
    path = os.path.join(os.getcwd(), 'soft_label', 'soft_label_resnet50.txt')
    if not os.path.isfile(path):
        print('soft label file is not exist')

    train_loader = getTrainLoader(args, path)
    _, val_loader, num_query, num_classes, train_size = make_data_loader(args)

    #train_loader, val_loader, num_query, num_classes, train_size = make_data_loader(args)
    model = build_model(args, num_classes)
    optimizer = make_optimizer(args, model)
    scheduler = WarmupMultiStepLR(optimizer, [30, 55], 0.1, 0.01, 5, "linear")

    loss_func = make_loss(args)

    model.to(device)

    for epoch in range(args.Epochs):
        model.train()
        running_loss = 0.0
        running_klloss = 0.0
        running_softloss = 0.0
        running_corrects = 0.0
        for index, data in enumerate(tqdm(train_loader)):
            img, target, soft_target = data
            img = img.cuda()
            target = target.cuda()
            soft_target = soft_target.cuda()
            score, _ = model(img)
            preds = torch.max(score.data, 1)[1]
            loss, klloss, softloss = loss_func(score, target, soft_target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            running_klloss += klloss.item()
            running_softloss += softloss.item()
            running_corrects += float(torch.sum(preds == target.data))

        scheduler.step()
        epoch_loss = running_loss / train_size
        epoch_klloss = running_klloss / train_size
        epoch_softloss = running_softloss / train_size
        epoch_acc = running_corrects / train_size
        print(
            "Epoch {}   Loss : {:.4f} KLLoss:{:.8f}  SoftLoss:{:.4f}  Acc:{:.4f}"
            .format(epoch, epoch_loss, epoch_klloss, epoch_softloss,
                    epoch_acc))

        if (epoch + 1) % args.n_save == 0:
            evaluator = Evaluator(model, val_loader, num_query)
            cmc, mAP = evaluator.run()
            print('---------------------------')
            print("CMC Curve:")
            for r in [1, 5, 10]:
                print("Rank-{} : {:.1%}".format(r, cmc[r - 1]))
            print("mAP : {:.1%}".format(mAP))
            print('---------------------------')
            save_model(args, model, optimizer, epoch)
Beispiel #2
0
def main():
    filename = 'main.yeet'
    file = open(filename, 'r')
    lexer = Lexer(file)
    parse = Parse(lexer.tokens)

    lexer.tokenizer()
    # print("Tokens: ")
    # print(lexer.tokens, "\n")

    parse.build_AST()
    # print("AST:")
    # print (parse.AST, "\n")

    evaluator = Evaluator(parse.AST)
    print("the f*****g output:")
    evaluator.run(parse.AST)
Beispiel #3
0
def main(args):
    sys.stdout = Logger(
        os.path.join(args.log_path, args.log_description,
                     'log' + time.strftime(".%m_%d_%H:%M:%S") + '.txt'))

    train_loader, val_loader, num_query, num_classes, train_size = make_data_loader(
        args)
    model = build_model(args, num_classes)
    print(model)
    optimizer = make_optimizer(args, model)
    scheduler = WarmupMultiStepLR(optimizer, [30, 55], 0.1, 0.01, 5, "linear")

    loss_func = make_loss(args)

    model.to(device)

    for epoch in range(args.Epochs):
        model.train()
        running_loss = 0.0
        running_corrects = 0.0
        for index, data in enumerate(tqdm(train_loader)):
            img, target = data
            img = img.cuda()
            target = target.cuda()
            score, _ = model(img)
            preds = torch.max(score.data, 1)[1]
            loss = loss_func(score, target)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            running_corrects += float(torch.sum(preds == target.data))

        scheduler.step()
        epoch_loss = running_loss / train_size
        epoch_acc = running_corrects / train_size
        print("Epoch {}   Loss : {:.6f}   Acc:{:.4f}".format(
            epoch, epoch_loss, epoch_acc))

        if (epoch + 1) % args.n_save == 0:
            evaluator = Evaluator(model, val_loader, num_query)
            cmc, mAP = evaluator.run()
            print('---------------------------')
            print("CMC Curve:")
            for r in [1, 5, 10]:
                print("Rank-{} : {:.1%}".format(r, cmc[r - 1]))
            print("mAP : {:.1%}".format(mAP))
            print('---------------------------')
            save_model(args, model, optimizer, epoch)
Beispiel #4
0
def main(args):
    train_loader, val_loader, num_query, num_classes, train_size = make_data_loader(
        args)

    #load the parameters
    net = Net(reid=True)
    state_dict = torch.load(
        './ckpt.t7', map_location=lambda storage, loc: storage)['net_dict']
    net.load_state_dict(state_dict)

    evaluator = Evaluator(net, val_loader, num_query)
    cmc, mAP = evaluator.run()
    print('---------------------------')
    print("CMC Curve:")
    for r in [1, 5, 10]:
        print("Rank-{} : {:.1%}".format(r, cmc[r - 1]))
    print("mAP : {:.1%}".format(mAP))
    print('---------------------------')