Пример #1
0
class predict():
    def __init__(self, weights_path, conf_thres=0.7, nms_thres=0.5):
        self.conf_thres = conf_thres
        self.nms_thres = nms_thres
        model_dict = torch.load(weights_path)

        anchors = model_dict['anchors'].to('cuda')

        self.model = ResNet(anchors, Istrain=False).to('cuda')
        self.model.load_state_dict(model_dict['net'])
        self.model.eval()

    def __call__(self, inputs):
        inputs = torch.from_numpy(inputs)
        inputs = Variable(inputs, requires_grad=False).to('cuda')
        with torch.no_grad():
            _, outputs = self.model(inputs)
            outputs = non_max_suppression(outputs,
                                          conf_thres=self.conf_thres,
                                          nms_thres=self.nms_thres)
            outputs_numpy = []
            for output in outputs:
                if output is None:
                    outputs_numpy.append(None)
                else:
                    outputs_numpy.append(output.detach().cpu().numpy())
        return outputs_numpy
Пример #2
0
class Main:
    def __init__(self, loadModelGen=None):
        os.makedirs('model/' + conf.PATH, exist_ok=True)
        os.makedirs('data/' + conf.PATH, exist_ok=True)
        self.Model = ResNet().to(conf.DEVICE)
        if loadModelGen == None:
            self.modelGen = 0
            print("modelGen : ", self.modelGen)
            data = self_play.randomData()
            data = self_play.inflated(data)
            self.Model.fit(data, policyVias=1, valueVias=1)
            np.savez('data/' + conf.PATH + '/Gen' + str(self.modelGen),
                     data[0], data[1], data[2])
            torch.save(self.Model.state_dict(),
                       'model/' + conf.PATH + '/Gen' + str(self.modelGen))
        else:
            self.modelGen = loadModelGen
            self.Model.load_state_dict(
                torch.load('model/' + conf.PATH + '/Gen' + str(self.modelGen)))

    def train(self):
        while True:
            self.modelGen += 1
            if self.modelGen == 11:
                break
            print("modelGen : ", self.modelGen)
            data = self_play.DataGenerate(self.Model)
            data = self_play.inflated(data)
            self.Model.fit(data, policyVias=1, valueVias=1)
            np.savez('data/' + conf.PATH + '/Gen' + str(self.modelGen),
                     data[0], data[1], data[2])
            torch.save(self.Model.state_dict(),
                       'model/' + conf.PATH + '/Gen' + str(self.modelGen))
Пример #3
0
def main():
    config = yaml.load(open("./config/config.yaml", "r"),
                       Loader=yaml.FullLoader)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"Device: {device}")

    data_transforms = get_simclr_data_transforms(**config["data_transforms"])
    train_dataset = datasets.STL10(
        "./data/",
        split="train+unlabeled",
        download=True,
        transform=MultiViewDataInjector([data_transforms, data_transforms]),
    )

    # online network
    online_network = ResNet(**config["network"]).to(device)
    pretrained_folder = config["network"]["fine_tune_from"]

    # load pre-trained model
    if pretrained_folder:
        try:
            checkpoints_folder = os.path.join("./runs", pretrained_folder,
                                              "checkpoints")

            load_params = torch.load(
                os.path.join(os.path.join(checkpoints_folder, "model.pth")),
                map_location=torch.device(device),
            )

            online_network.load_state_dict(
                load_params["online_network_state_dict"])

        except FileNotFoundError:
            print(
                "Pre-trained weights not found, starting training from scratch."
            )

    predictor = MlpHead(
        in_channels=online_network.projection.head[-1].out_features,
        **config["network"]["projection_head"],
    ).to(device)

    # target encoder
    target_network = ResNet(**config["network"]).to(device)
    optimizer = torch.optim.SGD(
        list(online_network.parameters()) + list(predictor.parameters()),
        **config["optimizer"]["params"],
    )

    trainer = BYOLTrainer(
        online_network=online_network,
        target_network=target_network,
        optimizer=optimizer,
        predictor=predictor,
        device=device,
        **config["trainer"],
    )

    trainer.train(train_dataset)
Пример #4
0
def main(args):

    # Image preprocessing
    transform = transforms.Compose([ 
        transforms.ToTensor(), 
        transforms.Normalize((0.033, 0.032, 0.033), 
                             (0.027, 0.027, 0.027))])

    # Load vocabulary wrapper
    with open(args.vocab_path, 'rb') as f:
        vocab = pickle.load(f)

    # Build Models
    #encoder = AttnEncoder(ResidualBlock, [3, 3, 3])
    encoder = ResNet(ResidualBlock, [3, 3, 3], args.embed_size)
    encoder.eval()  # evaluation mode (BN uses moving mean/variance)
    # decoder = AttnDecoderRnn(args.feature_size, args.hidden_size, 
    #                     len(vocab), args.num_layers)
    decoder = DecoderRNN(args.embed_size, args.hidden_size, 
                         len(vocab), args.num_layers)

    print('load')

    # Load the trained model parameters
    encoder.load_state_dict(torch.load(args.encoder_path))
    decoder.load_state_dict(torch.load(args.decoder_path))

    print('load')

    # If use gpu
    if torch.cuda.is_available():
        encoder.cuda(1)
        decoder.cuda(1)


    trg_bitmap_dir = args.root_path + 'bitmap/'
    save_directory = 'predict_base/'
    svg_from_out = args.root_path + save_directory + 'svg/'   # svg from output caption 
    bitmap_from_out = args.root_path + save_directory + 'bitmap/'   #bitmap from out caption 

    if not os.path.exists(bitmap_from_out):
        os.makedirs(bitmap_from_out)
    if not os.path.exists(svg_from_out):
        os.makedirs(svg_from_out)

    test_list = os.listdir(trg_bitmap_dir)
    for i, fname in enumerate(test_list): 
        print(fname)
        test_path = trg_bitmap_dir + fname
        test_image = load_image(test_path, transform)
        image_tensor = to_var(test_image)
        in_sentence = gen_caption_from_image(image_tensor, encoder, decoder, vocab)
        print(in_sentence)
        image_matrix = cv2.imread(test_path)
        doc = gen_svg_from_predict(in_sentence.split(' '), image_matrix)

        with open(os.path.join(svg_from_out, fname.split('.')[0]+'.svg'), 'w+') as f:
            f.write(doc)
        cairosvg.svg2png(url=svg_from_out+ fname.split('.')[0] + '.svg', write_to= bitmap_from_out+fname)
Пример #5
0
def main():
    torch.manual_seed(args.seed)#设置随机种子

    train_dataset = SpeakerTrainDataset(samples_per_speaker = args.samples_per_speaker)#设置训练集读取
    n_classes = train_dataset.n_classes#说话人数
    print('Num of classes: {}'.format(n_classes))

    model = ResNet(layers=[1, 1, 1, 1], embedding_size=args.embedding_size, n_classes=n_classes, m=args.m).to(device)
    torchsummary.summary(model, (1,161,300))
    if args.optimizer == 'sgd':#优化器使用sgd
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, dampening=args.dampening, weight_decay=args.wd)
    elif args.optimizer == 'adagrad':#优化器使用adagrad
        optimizer = torch.optim.Adagrad(model.parameters(), lr=args.lr, lr_decay=args.lr_decay, weight_decay=args.wd)
    else:#优化器使用adam
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
    criterion = AngleLoss(lambda_min=args.lambda_min, lambda_max=args.lambda_max).to(device)#loss设置

    start = 1
    if args.resume:#是否从之前保存的模型开始
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            if args.start is not None:
                start = start
            else:
                start = checkpoint['epoch'] + 1
            if args.load_it:
                criterion.it = checkpoint['it']
            elif args.it is not None:
                criterion.it = args.it
            if args.load_optimizer:
                optimizer.load_state_dict(checkpoint['optimizer'])
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
                              num_workers=1, pin_memory=True)

    test_dataset = SpeakerTestDataset(transform=args.transform)
    test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False,
                             num_workers=1, pin_memory=True)

    for epoch in range(start, args.epochs + 1):
        train(epoch, model, criterion, optimizer, train_loader)

        if epoch % 5 == 0:
            test(model, test_loader)#测试
            task = pd.read_csv('task/task.csv', header=None, delimiter = '[ ]', engine='python')
            pred = pd.read_csv(args.final_dir + 'pred.csv', engine='python')
            y_true = np.array(task.iloc[:, 0])
            y_pred = np.array(pred.iloc[:, -1])
            eer, thresh = cal_eer(y_true, y_pred)
            print('EER      : {:.3%}'.format(eer))
            print('Threshold: {:.5f}'.format(thresh))
Пример #6
0
def get_models(model_weights):
    model_dict = torch.load(model_weights)
    class_name = model_dict['class_name']

    state_dict = model_dict['net']

    model = ResNet(class_name=class_name)
    model.to('cuda')

    model.load_state_dict(state_dict)
    model.eval()
    return model, class_name
Пример #7
0
def resnet50(weights_path, device, rt=True):

    state_dict_ = torch.load(weights_path, map_location=device)
    state_dict_model = state_dict_['state_dict']

    modified_state_dict = {}
    for key in state_dict_model.keys():
        mod_key = key[7:]
        modified_state_dict.update({mod_key: state_dict_model[key]})

    model = ResNet(Bottleneck, [3, 4, 6, 3], modified_state_dict)
    model.load_state_dict(modified_state_dict, strict=False)

    return model
Пример #8
0
def resume_training(weights_path, device):
    '''
    To resume training
    '''
    checkpoint = torch.load(weights_path, map_location=device)
    model = ResNet(
        Bottleneck, [3, 4, 6, 3], checkpoint['model_state_dict'], rt=True)
    model.load_state_dict(checkpoint['model_state_dict'], strict=False)
    optimizer = optimizer = optim.SGD(model.parameters())
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    cls_loss = checkpoint['cls_loss']
    mec_loss = checkpoint['mec_loss']
    test_loss = checkpoint['test_loss']
    return model, optimizer, epoch, cls_loss, mec_loss, test_loss
Пример #9
0
def main():
    model = ResNet(ResidualBlock)
    model.eval()
    model.load_state_dict(torch.load("model/best.pkl", map_location=device))
    logger.info("Valid: loaded model")

    predict_dataloader = get_predict_data_loader()

    for i, (images, labels) in enumerate(predict_dataloader):
        predict_label1, predict_label2 = model(images)
        predict_label = LabeltoStr([
            np.argmax(predict_label1.data.numpy()[0]),
            np.argmax(predict_label2.data.numpy()[0]),
        ])
        true_label = LabeltoStr(labels.data.numpy()[0])
        logger.info(
            f"Test: {i}, Expect: {true_label}, Predict: {predict_label}, Result: {True if true_label == predict_label else False}"
        )
Пример #10
0
def test_model(path):
    device = torch.device("cuda")
    model = ResNet(BasicBlock, [3, 4, 6, 3])
    model = model.to(device)
    model.load_state_dict(torch.load(MODEL_PATH))
    model.eval()

    with torch.no_grad():
        dataset = TumorImage(path,
                             transform=transforms.Compose(
                                 [Rescale(256),
                                  ToTensor(),
                                  Normalize()]))

        dataloader = torch.utils.data.DataLoader(dataset, batch_size=1)
        for input in dataloader:
            input = input.float().cuda().to(device)
            output = model(input)
            val = torch.max(output, 1)[1]
            return val.view(-1).cpu().numpy()[0]
Пример #11
0
def main(model, infile, outfile, batch, cutoff):
	start_time = time.time()
	if torch.cuda.is_available:device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
	print("Device: " + str(device))

	### make output folder
	if not os.path.exists(outfile):
		os.makedirs(outfile)

	### load model
	bmodel = ResNet(Bottleneck, [2,2,2,2]).to(device).eval()
	bmodel.load_state_dict(torch.load(model,  map_location=device))
	print("[Step 0]$$$$$$$$$$ Done loading model")


	### load data
	data_test = []
	data_name = []
	batchi = 0
	it = 0
	for fileNM in glob.glob(infile + '/*.fast5'):
		data_test, data_name = get_raw_data(infile, fileNM, data_test, data_name, cutoff)
		it += 1

		if it == batch:
			print("[Step 1]$$$$$$$$$$ Done loading data with batch " + str(batchi)+ \
				", Getting " + str(len(data_test)) + " of sequences")
			data_test = normalization(data_test, batchi)
			process(data_test, data_name, batchi, bmodel, outfile, device)
			print("[Step 4]$$$$$$$$$$ Done with batch " + str(batchi))
			print()
			del data_test
			data_test = []
			del data_name
			data_name = []
			batchi += 1
			it = 0

	print("[Step FINAL]--- %s seconds ---" % (time.time() - start_time))
Пример #12
0
def main(ttrain, tval, ntrain, nval, outpath, interm, batch, epoch, learningrate):
	if torch.cuda.is_available:device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
	print(device)

	# Parameters
	params = {'batch_size': batch,
				'shuffle': True,
				'num_workers': 10}

	### load files
	training_set = Dataset(ttrain, ntrain)
	training_generator = DataLoader(training_set, **params)

	validation_set = Dataset(tval, nval)
	validation_generator = DataLoader(validation_set, **params)

	zymo_train = torch.load(ttrain)
	hela_train = torch.load(ntrain)

	zymo_val = torch.load(tval)
	hela_val = torch.load(nval)

	### load model
	model = ResNet(Bottleneck, [2,2,2,2]).to(device)
	if interm is not None:
		model.load_state_dict(torch.load(interm))

	criterion = nn.CrossEntropyLoss().to(device)
	optimizer = torch.optim.Adam(model.parameters(), lr=learningrate)

	bestacc = 0
	bestmd = None
	i = 0

	### Training
	for epoch in range(epoch):
		for spx, spy in training_generator:
				spx, spy = spx.to(device), spy.to(torch.long).to(device)

				# Forward pass
				outputs = model(spx)
				loss = criterion(outputs, spy)
				acc = 100.0 * (spy == outputs.max(dim=1).indices).float().mean().item()

				# Validation
				with torch.set_grad_enabled(False):
						acc_vt = 0
						vti = 0
						for valx, valy in validation_generator:
								valx, valy = valx.to(device), valy.to(device)
								outputs_val = model(valx)
								acc_v = 100.0 * (valy == outputs_val.max(dim=1).indices).float().mean().item()
								vti += 1
								acc_vt += acc_v
						acc_vt = acc_vt / vti
						if bestacc < acc_vt:
								bestacc = acc_vt
								bestmd = model
								torch.save(bestmd.state_dict(), outpath)
				
						print("epoch: " + str(epoch) + ", i: " + str(i) + ", bestacc: " + str(bestacc))
						i += 1
				
				# Backward and optimize
				optimizer.zero_grad()
				loss.backward()
				optimizer.step()
Пример #13
0
                      '--load',
                      dest='load',
                      default=False,
                      help='load file model')

    (options, args) = parser.parse_args()
    return options


if __name__ == '__main__':

    args = get_args()

    net = ResNet()

    if args.load:
        if args.gpu:
            net.load_state_dict(torch.load(args.load))
        else:
            net.load_state_dict(torch.load(args.load, map_location='cpu'))
        print('Model loaded from %s' % (args.load))

    if args.gpu:
        net.cuda()
        cudnn.benchmark = True

    train_net(net=net,
              epochs=args.epochs,
              gpu=args.gpu,
              data_dir=args.data_dir)
Пример #14
0
class Solver(object):

    DEFAULTS = {}

    def __init__(self, version, data_loader, config):
        """
        Initializes a Solver object
        """

        # data loader
        self.__dict__.update(Solver.DEFAULTS, **config)
        self.version = version
        self.data_loader = data_loader

        self.build_model()

        # TODO: build tensorboard

        # start with a pre-trained model
        if self.pretrained_model:
            self.load_pretrained_model()

    def build_model(self):
        """
        Instantiates the model, loss criterion, and optimizer
        """

        # instantiate model
        self.model = ResNet(self.config, self.input_channels, self.class_count)

        # instantiate loss criterion
        self.criterion = nn.CrossEntropyLoss()

        # instantiate optimizer
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.lr,
                                   momentum=self.momentum,
                                   weight_decay=self.weight_decay)

        self.scheduler = scheduler.StepLR(self.optimizer,
                                          step_size=self.sched_step_size,
                                          gamma=self.sched_gamma)

        # print network
        self.print_network(self.model, 'ResNet')

        # use gpu if enabled
        if torch.cuda.is_available() and self.use_gpu:
            self.model.cuda()
            self.criterion.cuda()

    def print_network(self, model, name):
        """
        Prints the structure of the network and the total number of parameters
        """
        num_params = 0
        for p in model.parameters():
            num_params += p.numel()
        print(name)
        print(model)
        print("The number of parameters: {}".format(num_params))

    def load_pretrained_model(self):
        """
        loads a pre-trained model from a .pth file
        """
        self.model.load_state_dict(
            torch.load(
                os.path.join(self.model_save_path,
                             '{}.pth'.format(self.pretrained_model))))
        print('loaded trained model ver {}'.format(self.pretrained_model))

    def print_loss_log(self, start_time, iters_per_epoch, e, i, loss):
        """
        Prints the loss and elapsed time for each epoch
        """
        total_iter = self.num_epochs * iters_per_epoch
        cur_iter = e * iters_per_epoch + i

        elapsed = time.time() - start_time
        total_time = (total_iter - cur_iter) * elapsed / (cur_iter + 1)
        epoch_time = (iters_per_epoch - i) * elapsed / (cur_iter + 1)

        epoch_time = str(datetime.timedelta(seconds=epoch_time))
        total_time = str(datetime.timedelta(seconds=total_time))
        elapsed = str(datetime.timedelta(seconds=elapsed))

        log = "Elapsed {}/{} -- {}, Epoch [{}/{}], Iter [{}/{}], " \
              "loss: {:.4f}".format(elapsed,
                                    epoch_time,
                                    total_time,
                                    e + 1,
                                    self.num_epochs,
                                    i + 1,
                                    iters_per_epoch,
                                    loss)

        # TODO: add tensorboard

        print(log)

    def save_model(self, e):
        """
        Saves a model per e epoch
        """
        path = os.path.join(self.model_save_path,
                            '{}/{}.pth'.format(self.version, e + 1))

        torch.save(self.model.state_dict(), path)

    def model_step(self, images, labels):
        """
        A step for each iteration
        """

        # set model in training mode
        self.model.train()

        # empty the gradients of the model through the optimizer
        self.optimizer.zero_grad()

        # forward pass
        output = self.model(images)

        # compute loss
        loss = self.criterion(output, labels.squeeze())

        # compute gradients using back propagation
        loss.backward()

        # update parameters
        self.optimizer.step()

        # return loss
        return loss

    def train(self):
        """
        Training process
        """
        self.losses = []
        self.top_1_acc = []
        self.top_5_acc = []

        iters_per_epoch = len(self.data_loader)

        # start with a trained model if exists
        if self.pretrained_model:
            start = int(self.pretrained_model.split('/')[-1])
        else:
            start = 0

        # start training
        start_time = time.time()
        for e in range(start, self.num_epochs):
            self.scheduler.step()
            for i, (images, labels) in enumerate(tqdm(self.data_loader)):
                images = to_var(images, self.use_gpu)
                labels = to_var(labels, self.use_gpu)

                loss = self.model_step(images, labels)

            # print out loss log
            if (e + 1) % self.loss_log_step == 0:
                self.print_loss_log(start_time, iters_per_epoch, e, i, loss)
                self.losses.append((e, loss))

            # save model
            if (e + 1) % self.model_save_step == 0:
                self.save_model(e)

            # evaluate on train dataset
            if (e + 1) % self.train_eval_step == 0:
                top_1_acc, top_5_acc = self.train_evaluate(e)
                self.top_1_acc.append((e, top_1_acc))
                self.top_5_acc.append((e, top_5_acc))

        # print losses
        print('\n--Losses--')
        for e, loss in self.losses:
            print(e, '{:.4f}'.format(loss))

        # print top_1_acc
        print('\n--Top 1 accuracy--')
        for e, acc in self.top_1_acc:
            print(e, '{:.4f}'.format(acc))

        # print top_5_acc
        print('\n--Top 5 accuracy--')
        for e, acc in self.top_5_acc:
            print(e, '{:.4f}'.format(acc))

    def eval(self, data_loader):
        """
        Returns the count of top 1 and top 5 predictions
        """

        # set the model to eval mode
        self.model.eval()

        top_1_correct = 0
        top_5_correct = 0
        total = 0

        with torch.no_grad():
            for images, labels in data_loader:

                images = to_var(images, self.use_gpu)
                labels = to_var(labels, self.use_gpu)

                output = self.model(images)
                total += labels.size()[0]

                # top 1
                # get the max for each instance in the batch
                _, top_1_output = torch.max(output.data, dim=1)

                top_1_correct += torch.sum(
                    torch.eq(labels.squeeze(), top_1_output))

                # top 5
                _, top_5_output = torch.topk(output.data, k=5, dim=1)
                for i, label in enumerate(labels):
                    if label in top_5_output[i]:
                        top_5_correct += 1

        return top_1_correct.item(), top_5_correct, total

    def train_evaluate(self, e):
        """
        Evaluates the performance of the model using the train dataset
        """
        top_1_correct, top_5_correct, total = self.eval(self.data_loader)
        log = "Epoch [{}/{}]--top_1_acc: {:.4f}--top_5_acc: {:.4f}".format(
            e + 1, self.num_epochs, top_1_correct / total,
            top_5_correct / total)
        print(log)
        return top_1_correct / total, top_5_correct / total

    def test(self):
        """
        Evaluates the performance of the model using the test dataset
        """
        top_1_correct, top_5_correct, total = self.eval(self.data_loader)
        log = "top_1_acc: {:.4f}--top_5_acc: {:.4f}".format(
            top_1_correct / total, top_5_correct / total)
        print(log)
Пример #15
0
def main(weight_path, joblib_path, text_path):
    model = ResNet(embedding_dim=128)
    model.load_state_dict(torch.load(weight_path))
    model.to(device)
    model.eval()

    testgen = TripletDataset(text_path,
                             transforms=TRANSFORMS,
                             return_path=True)
    testloader = DataLoader(dataset=testgen,
                            batch_size=100,
                            num_workers=10,
                            pin_memory=False)

    feats = []
    targets = []
    paths = []

    if not os.path.isfile(joblib_path):
        print('>>>开始提取test数据集的特征')
        with torch.no_grad():
            for (imgs, target, path) in tqdm(testloader):
                imgs = imgs.to(device)
                target = target.numpy().tolist()
                feat = model(imgs).detach().cpu().numpy().tolist()
                feats.extend(feat)
                targets.extend(target)
                paths.extend(path)

            joblib.dump([feats, targets, paths], joblib_path)
    else:
        print('>>>加载test数据集的特征')
        feats, targets, paths = joblib.load(joblib_path)

    feats, targets = np.array(feats), np.array(targets)

    print('>>>开始计算test数据集的mAP')
    mAP = cal_map(feats, targets, topK=10)
    print('>>>计算得到的mAP:{:.4f}'.format(mAP))

    nb = random.randint(0, feats.shape[0] - 1)
    label = targets[nb]
    feat = feats[nb]
    sim = -np.dot(feats, feat)
    sort = np.argsort(sim)[1:10]

    save_path = './results/{}/'.format(nb)
    if not os.path.isdir(save_path):
        os.makedirs(save_path)
        shutil.copy(
            paths[nb],
            os.path.join(
                save_path,
                'label_{}_query_{}'.format(label,
                                           os.path.basename(paths[nb]))))

    for i, indx in enumerate(sort):
        p = paths[indx]
        shutil.copy(
            p,
            os.path.join(
                save_path,
                'label_{}_index_{}_{}'.format(targets[indx], i,
                                              os.path.basename(p))))
    print('>>>检索结果已经写入results文件夹内')
Пример #16
0
import gesture_data_loader
import face_data_loader
from PIL import Image
import torch
from config import GESTURE_MODEL_PATH, GESTURE_CLASS_NUM, FACE_MODEL_PATH, FACE_CLASS_NUM,USE_GPU
from model import GestureModel, ResNet

gesture_model = GestureModel(GESTURE_CLASS_NUM)
face_model = ResNet(FACE_CLASS_NUM)
if USE_GPU:
    gesture_model.load_state_dict(torch.load(GESTURE_MODEL_PATH, map_location=lambda storage, loc: storage.cuda()))
    face_model.load_state_dict(torch.load(FACE_MODEL_PATH, map_location=lambda storage, loc: storage.cuda()))
else:
    gesture_model.load_state_dict(torch.load(GESTURE_MODEL_PATH))
    face_model.load_state_dict(torch.load(FACE_MODEL_PATH))

gesture_model.eval()
face_model.eval()


def predict_gesture_img(image):
    image = gesture_data_loader.test_transform(image)
    image = image.unsqueeze(0)

    val, predicted = torch.max(gesture_model(image).data, 1)
    if val.item() > 0.5:
        return gesture_data_loader.classes[predicted.item()]
    else:
        return 'UnKnow'

                                      mode=args.mode)

    ver_loader = DataLoader(ver_dataset, batch_size=8, shuffle=False)

    print(f"Verification Dataset Length: {len(ver_dataset)}")

    #Hyperparameters
    num_channel = 3
    num_class = 2300
    batch_size = args.batch_size
    hidden_sizes = [3, 4, 6, 3]

    index_dict = index_mapping()

    model = ResNet(num_channel, num_class, hidden_sizes)
    model.load_state_dict(torch.load('65_model.pth.tar'))
    model.to(device)

    names, preds = test_class(class_loader, model, device)
    names = np.hstack(names)
    preds = np.hstack(preds)
    true_preds = []
    for pred in preds:
        true_preds.append(index_dict[pred])

    class_results = pd.DataFrame({'Id': names, 'Category': true_preds})
    class_results.to_csv('class.csv', index=False)

    # names1, names2, score_list, truth_list = verification(ver_loader, model, device, mode=args.mode)
    # import pdb; pdb.set_trace()
    # names1 = np.hstack(names1)
Пример #18
0
train_loader = DataLoader(train_set,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=4,
                          pin_memory=True)

if model_choice == "ResNet":
    model = ResNet(n_in=7,
                   n_features=16,
                   height=128,
                   width=256,
                   droprate=0,
                   num_blocks=2).float()
    model.to(device)
    print("ResNet initialized")
    model.load_state_dict(
        torch.load("../Models/ResNet_final", map_location=device))
    model.eval()

elif model_choice == "ConvNet":
    model = ConvNet(n_in=7, n_features=64, height=128, width=256,
                    droprate=0).float()
    model.to(device)
    print("ConvNet initialized")
    model.load_state_dict(
        torch.load("../Models/ConvNet_final", map_location=device))
    model.eval()

print("Model loaded")

train_loader.sampler.num_samples = 10000
Пример #19
0
def main(args):
    # Model settings
    model = ResNet()
    if args.cuda:
        model = model.cuda()
    optimizer = optim.Adam(model.parameters(), args.lr, weight_decay=args.wd)
    if args.ckpt > 0:
        ckpt_name = 'resnet152'
        if args.poison:
            ckpt_name += '-poison'
        ckpt_name += '-' + str(args.ckpt) + '.pkl'
        ckpt_path = os.path.join('./ckpt', ckpt_name)
        print('Loading checkpoint from {}'.format(ckpt_path))
        dct = torch.load(ckpt_path)
        model.load_state_dict(dct['model'])
        optimizer.load_state_dict(dct['optim'])

    # Data loader settings
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Resize((64, 64)),
        transforms.Normalize((.5, .5, .5), (.5, .5, .5)),
    ])
    aug_transform = transforms.Compose([
        transforms.RandomChoice([
            # do nothing
            transforms.Compose([]),
            # horizontal flip
            transforms.RandomHorizontalFlip(1.),
            # random crop
            transforms.RandomResizedCrop(64),
            # rotate
            transforms.RandomRotation(30)
        ]),
        transforms.ToTensor(),
        transforms.Resize((64, 64)),
        transforms.Normalize((.5, .5, .5), (.5, .5, .5)),
    ])
    task_dir = '/data/csnova1/benchmarks/%s' % args.task
    poison_dir = '/data/csnova1/poison'
    poison_config = get_poison_config()
    if args.task == "cifar10":
        Loader = CIFAR10Loader
        PoisonedILoader = PoisonedCIFAR10Loader
    train_loader = Loader(root=task_dir,
                          batch_size=args.batch_size,
                          split='train',
                          transform=aug_transform)
    test_loader = PoisonedILoader(root=task_dir,
                                  poison_root=poison_dir,
                                  poison_config=poison_config,
                                  poison_num=6,
                                  batch_size=args.batch_size,
                                  split="val",
                                  transform=transform)

    # Start
    if args.run == "train":
        train(args, train_loader, model, optimizer)
    elif args.run == "test":
        evaluate(args, test_loader, model)
Пример #20
0
class face_learner(object):
    def __init__(self, conf, inference=False, transfer=0, ext='final'):
        pprint.pprint(conf)
        self.conf = conf
        if conf.arch == "mobile":
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        elif conf.arch == "ir_se":
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.arch).to(conf.device)
            print('{}_{} model generated'.format(conf.arch, conf.net_depth))
        elif conf.arch == "resnet50":
            self.model = ResNet(embedding_size=512,
                                arch=conf.arch).to(conf.device)
            print("resnet model {} generated".format(conf.arch))
        else:
            exit("model not supported yet!")

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            tmp_idx = ext.rfind('_')  # find the last '_' to replace it by '/'
            self.ext = '/' + ext[:tmp_idx] + '/' + ext[tmp_idx + 1:]
            self.writer = SummaryWriter(str(conf.log_path) + self.ext)
            self.step = 0

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if transfer == 3:
                self.optimizer = optim.Adam(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr)  # , momentum = conf.momentum)
            elif transfer == 2:
                self.optimizer = optim.Adam(
                    [
                        {
                            'params': paras_wo_bn + [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                    lr=conf.lr)  # , momentum = conf.momentum)
            elif transfer == 1:
                self.optimizer = optim.Adam(
                    [
                        {
                            'params': [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                    lr=conf.lr)  # , momentum = conf.momentum)
            else:
                """
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
                """
                self.optimizer = optim.Adam(list(self.model.parameters()) +
                                            list(self.head.parameters()),
                                            lr=conf.lr)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.save_freq = len(self.loader)  #//5 # originally, 100
            self.evaluate_every = len(self.loader)  #//5 # originally, 10
            self.save_every = len(self.loader)  #//2 # originally, 5
            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            # self.val_112, self.val_112_issame = get_val_pair(self.loader.dataset.root.parent, 'val_112')
        else:
            self.threshold = conf.threshold

        self.train_losses = []
        self.train_counter = []
        self.test_losses = []
        self.test_accuracy = []
        self.test_counter = []

    def save_state(self, model_only=False):
        save_path = self.conf.stored_result_dir
        torch.save(self.model.state_dict(), save_path + os.sep + 'model.pth')
        if not model_only:
            torch.save(self.head.state_dict(), save_path + os.sep + 'head.pth')
            torch.save(self.optimizer.state_dict(),
                       save_path + os.sep + 'optimizer.pth')

    def load_state(self, save_path, from_file=False, model_only=False):
        if from_file:
            if self.conf.arch == "mobile":
                self.model.load_state_dict(
                    torch.load(save_path / 'model_mobilefacenet.pth',
                               map_location=self.conf.device))
            elif self.conf.arch == "ir_se":
                self.model.load_state_dict(
                    torch.load(save_path / 'model_ir_se50.pth',
                               map_location=self.conf.device))
            else:
                exit("loading model not supported yet!")
        else:
            state_dict = torch.load(save_path, map_location=self.conf.device)
            if "module." in list(state_dict.keys())[0]:
                new_dict = {}
                for key in state_dict:
                    new_key = key[7:]
                    assert new_key in self.model.state_dict().keys(
                    ), "wrong model loaded!"
                    new_dict[new_key] = state_dict[key]
                self.model.load_state_dict(new_dict)
            else:
                self.model.load_state_dict(state_dict)
        if not model_only:
            self.head.load_state_dict(
                torch.load(save_path / 'head.pth',
                           map_location=self.conf.device))
            self.optimizer.load_state_dict(
                torch.load(save_path / 'optimizer.pth'))

    def board_val(self, db_name, accuracy, best_threshold, roc_curve_tensor):
        self.writer.add_scalar('{}_accuracy'.format(db_name), accuracy,
                               self.step)
        self.writer.add_scalar('{}_best_threshold'.format(db_name),
                               best_threshold, self.step)
        self.writer.add_image('{}_roc_curve'.format(db_name), roc_curve_tensor,
                              self.step)
        # self.writer.add_scalar('{}_val:true accept ratio'.format(db_name), val, self.step)
        # self.writer.add_scalar('{}_val_std'.format(db_name), val_std, self.step)
        # self.writer.add_scalar('{}_far:False Acceptance Ratio'.format(db_name), far, self.step)

    def evaluate(self, conf, carray, issame, nrof_folds=5, tta=False):
        self.model.eval()
        idx = 0
        embeddings = np.zeros([len(carray), conf.embedding_size])
        with torch.no_grad():
            while idx + conf.batch_size <= len(carray):
                batch = torch.tensor(carray[idx:idx + conf.batch_size])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.to(conf.device)) + self.model(
                        fliped.to(conf.device))
                    embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch)
                else:
                    embeddings[idx:idx + conf.batch_size] = self.model(
                        batch.to(conf.device)).cpu()
                idx += conf.batch_size
            if idx < len(carray):
                batch = torch.tensor(carray[idx:])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.to(conf.device)) + self.model(
                        fliped.to(conf.device))
                    embeddings[idx:] = l2_norm(emb_batch)
                else:
                    embeddings[idx:] = self.model(batch.to(conf.device)).cpu()
        tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame,
                                                       nrof_folds)
        buf = gen_plot(fpr, tpr)
        roc_curve = Image.open(buf)
        roc_curve_tensor = trans.ToTensor()(roc_curve)
        return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor

    def find_lr(self,
                conf,
                init_value=1e-8,
                final_value=10.,
                beta=0.98,
                bloding_scale=3.,
                num=None):
        if not num:
            num = len(self.loader)
        mult = (final_value / init_value)**(1 / num)
        lr = init_value
        for params in self.optimizer.param_groups:
            params['lr'] = lr
        self.model.train()
        avg_loss = 0.
        best_loss = 0.
        batch_num = 0
        losses = []
        log_lrs = []
        for i, (imgs, labels) in enumerate(
                self.loader):  #tqdm(enumerate(self.loader), total=num):

            imgs = imgs.to(conf.device)
            labels = labels.to(conf.device)
            batch_num += 1

            self.optimizer.zero_grad()

            embeddings = self.model(imgs)
            thetas = self.head(embeddings, labels)
            loss = conf.ce_loss(thetas, labels)

            #Compute the smoothed loss
            avg_loss = beta * avg_loss + (1 - beta) * loss.item()
            self.writer.add_scalar('avg_loss', avg_loss, batch_num)
            smoothed_loss = avg_loss / (1 - beta**batch_num)
            self.writer.add_scalar('smoothed_loss', smoothed_loss, batch_num)
            #Stop if the loss is exploding
            if batch_num > 1 and smoothed_loss > bloding_scale * best_loss:
                print('exited with best_loss at {}'.format(best_loss))
                plt.plot(log_lrs[10:-5], losses[10:-5])
                return log_lrs, losses
            #Record the best loss
            if smoothed_loss < best_loss or batch_num == 1:
                best_loss = smoothed_loss
            #Store the values
            losses.append(smoothed_loss)
            log_lrs.append(math.log10(lr))
            self.writer.add_scalar('log_lr', math.log10(lr), batch_num)
            #Do the SGD step
            #Update the lr for the next step

            loss.backward()
            self.optimizer.step()

            lr *= mult
            for params in self.optimizer.param_groups:
                params['lr'] = lr
            if batch_num > num:
                plt.plot(log_lrs[10:-5], losses[10:-5])
                return log_lrs, losses

    def train(self, conf, epochs):
        self.model.train()
        running_loss = 0.
        for e in range(epochs):
            print('epoch {} started'.format(e))
            if e == self.milestones[0]:
                self.schedule_lr()
            if e == self.milestones[1]:
                self.schedule_lr()
            if e == self.milestones[2]:
                self.schedule_lr()
            for imgs, labels in iter(self.loader):  #tqdm(iter(self.loader)):
                imgs = imgs.to(conf.device)
                labels = labels.to(conf.device)
                self.optimizer.zero_grad()
                embeddings = self.model(imgs)
                thetas = self.head(embeddings, labels)
                loss = conf.ce_loss(thetas, labels)
                loss.backward()
                running_loss += loss.item()
                self.optimizer.step()

                if self.step % self.save_freq == 0 and self.step != 0:
                    self.train_losses.append(loss.item())
                    self.train_counter.append(self.step)

                self.step += 1
            self.save_loss()

        # self.save_state(conf, accuracy, to_save_folder=True, extra=self.ext, model_only=True)

    def schedule_lr(self):
        for params in self.optimizer.param_groups:
            params['lr'] /= 10
        print(self.optimizer)

    def infer(self, conf, faces, target_embs, tta=False):
        '''
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        embs = []
        for img in faces:
            if tta:
                mirror = trans.functional.hflip(img)
                emb = self.model(
                    conf.test_transform(img).to(conf.device).unsqueeze(0))
                emb_mirror = self.model(
                    conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))
            else:
                embs.append(
                    self.model(
                        conf.test_transform(img).to(conf.device).unsqueeze(0)))
        source_embs = torch.cat(embs)

        diff = source_embs.unsqueeze(-1) - target_embs.transpose(
            1, 0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        minimum, min_idx = torch.min(dist, dim=1)
        min_idx[minimum > self.threshold] = -1  # if no match, set idx to -1
        return min_idx, minimum

    def binfer(self, conf, faces, target_embs, tta=False):
        '''
        return raw scores for every class 
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        self.model.eval()
        self.plot_result()
        embs = []
        for img in faces:
            if tta:
                mirror = trans.functional.hflip(img)
                emb = self.model(
                    conf.test_transform(img).to(conf.device).unsqueeze(0))
                emb_mirror = self.model(
                    conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))
            else:
                embs.append(
                    self.model(
                        conf.test_transform(img).to(conf.device).unsqueeze(0)))
        source_embs = torch.cat(embs)

        diff = source_embs.unsqueeze(-1) - target_embs.transpose(
            1, 0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        # print(dist)
        return dist.detach().cpu().numpy()
        # minimum, min_idx = torch.min(dist, dim=1)
        # min_idx[minimum > self.threshold] = -1 # if no match, set idx to -1
        # return min_idx, minimum

    def evaluate(self, data_dir, names_idx, target_embs, tta=False):
        '''
        return raw scores for every class
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        self.model.eval()
        score_names = []
        score = []
        wrong_names = dict()
        test_dir = data_dir
        for path in test_dir.iterdir():
            if path.is_file():
                continue
            # print(path)
            for fil in path.iterdir():
                # print(fil)
                orig_name = ''.join(
                    [i for i in fil.name.strip().split('.')[0]])

                for name in names_idx.keys():
                    if name in orig_name:
                        score_names.append(names_idx[name])

                img = Image.open(str(fil))
                with torch.no_grad():
                    if tta:
                        mirror = trans.functional.hflip(img)
                        emb = self.model(
                            self.conf.test_transform(img).to(
                                self.conf.device).unsqueeze(0))
                        emb_mirror = self.model(
                            self.conf.test_transform(mirror).to(
                                self.conf.device).unsqueeze(0))
                        emb = l2_norm(emb + emb_mirror)
                    else:
                        emb = self.model(
                            self.conf.test_transform(img).to(
                                self.conf.device).unsqueeze(0))

                diff = emb.unsqueeze(-1) - target_embs.transpose(
                    1, 0).unsqueeze(0)
                dist = torch.sum(torch.pow(diff, 2), dim=1).cpu().numpy()
                score.append(np.exp(dist.dot(-1)))

                pred = np.argmax(score[-1])
                label = score_names[-1]
                if pred != label:
                    wrong_names[orig_name] = pred

        return score, score_names, wrong_names

    def save_loss(self):
        if not os.path.exists(self.conf.stored_result_dir):
            os.mkdir(self.conf.stored_result_dir)

        result = dict()
        result["train_losses"] = np.asarray(self.train_losses)
        result["train_counter"] = np.asarray(self.train_counter)
        result['test_accuracy'] = np.asarray(self.test_accuracy)
        result['test_losses'] = np.asarray(self.test_losses)
        result["test_counter"] = np.asarray(self.test_counter)

        with open(os.path.join(self.conf.stored_result_dir, "result_log.p"),
                  'wb') as fp:
            pickle.dump(result, fp)

    def plot_result(self):
        result_log_path = os.path.join(self.conf.stored_result_dir,
                                       "result_log.p")
        with open(result_log_path, 'rb') as f:
            result_dict = pickle.load(f)

        train_losses = result_dict['train_losses']
        train_counter = result_dict['train_counter']
        test_losses = result_dict['test_losses']
        test_counter = result_dict['test_counter']
        test_accuracy = result_dict['test_accuracy']

        fig1 = plt.figure(figsize=(12, 8))
        ax1 = fig1.add_subplot(111)
        ax1.plot(train_counter, train_losses, 'b', label='Train_loss')
        ax1.legend('Train_losses')
        plt.savefig(os.path.join(self.conf.stored_result_dir,
                                 "train_loss.png"))
        plt.close()
        """
Пример #21
0
                        type=str,
                        default='False',
                        choices=['True', 'False'],
                        help="save inference result")
    parser.add_argument("--vis_save_path", type=str, \
                        default='', )
    args = parser.parse_args()
    print_args(args)

    data_config = parse_data_config(args.data_config)
    valid_path = data_config['valid']
    nclass = int(data_config['classes'])
    anchors = get_anchors(data_config['anchors']).to('cuda')

    model = ResNet(anchors).to('cuda')
    model.load_state_dict(torch.load(args.weights_path)['net'])

    print('Compute mAP...')
    save_path = '/data1/chenww/my_research/Two-Stage-Defect-Detection/detector/models/small_8cls/1cls_896_bs_ep300_scratch_kmeansAnchor/test_result/'
    if os.path.exists(save_path):
        import shutil
        shutil.rmtree(save_path)
    os.mkdir(save_path)
    eval = evaluate(path=valid_path,
                    img_size=args.img_size,
                    batch_size=args.batch_size)

    # sample_metrics, image_acc0, image_acc1, bbox_acc, bbox_rec = eval(
    #     model,
    #     iou_thres=args.iou_thres,
    #     conf_thres=args.conf_thres,
Пример #22
0
def main():
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print('===> Loading datasets')
    train_set = get_training_set(opt.dataset)
    test_set = get_test_set(opt.dataset)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)
    test_data_loader = DataLoader(dataset=test_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.testBatchSize,
                                  shuffle=False)

    print("===> Building model")
    if (opt.net == 'resnet'):
        model = ResNet()
    else:
        model = TFNet()
    criterion = nn.L1Loss()

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    t = time.strftime("%Y%m%d%H%M")
    train_log = open(
        os.path.join(opt.log, "%s_%s_train.log") % (opt.net, t), "w")
    test_log = open(
        os.path.join(opt.log, "%s_%s_test.log") % (opt.net, t), "w")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch,
              train_log)
        if epoch % 10 == 0:
            test(test_data_loader, model, criterion, epoch, test_log)
            save_checkpoint(model, epoch, t)
    train_log.close()
    test_log.close()
Пример #23
0
])

testset = torchvision.datasets.CIFAR10(root='../data',
                                       train=False,
                                       download=True,
                                       transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=128,
                                         shuffle=False,
                                         num_workers=2)

net = ResNet(n=5)
net = net.to(device=device)
net = torch.nn.DataParallel(net)

net.load_state_dict(torch.load(param_path))

correct_count = 0
total_count = 0

with torch.no_grad():
    for data in testloader:
        images, labels = data
        outputs = net(images)
        _, pridicted = torch.max(outputs.data, 1)
        labels = labels.cuda()
        total_count += labels.size(0)
        correct_count += (pridicted == labels).sum().item()

print("Accuracy: %f %%" % (100 * correct_count / total_count))
Пример #24
0
def main():
	parser = argparse.ArgumentParser(description='Process environmental variables')
	parser.add_argument('--feature', dest='feature', action='store_true')
	parser.add_argument('--no-feature', dest='feature', action='store_false')
	parser.set_defaults(feature=True)
	parser.add_argument('--verbose', type=bool, default=False)
	parser.add_argument('--epoch', type=int, default=500)
	parser.add_argument('--disp', type=bool, default=False)
	parser.add_argument('--cuda', type=bool, default=True)
	parser.add_argument('--pkl_model', type=int, default=1)
	parser.add_argument('--fake_test', type=int, default=0)
	parser.add_argument('--batchSize', type=int, default=1)
	parser.add_argument('--model', type=str, default='resnet_acc=97_iter=1000.pkl')
	args = parser.parse_args()

	lnp = loadAndParse(args)
	classes = lnp.loadLabelsFromJson()
	tr_loader, test_X  = lnp.getImagesAsTensors()

	base, ext = os.path.splitext(args.model)
	if (ext == ".pkl"):  #using high score model
		model = ResNet(ResidualBlock, [3, 3, 3]).cuda()
		model.load_state_dict(torch.load('models225/' + args.model))
	else:
		model = torch.load('models225/' + args.model)
		model.eval()

	if not args.cuda:
		model.cpu()

	'''
	remove last fully connected layer
	this will contain the features extracted by the convnet
	'''
	# eye_classifier = nn.Sequential(*list(model.classifier.children())[:-1])
	# model.classifier = eye_classifier
	print('using model: ', args.model)

	corrIdx, Idx = 0, 0
	if (args.fake_test==1):
		for i in range(test_X.size(0)):
			output = model(Variable(test_X.cuda()))
			_, predicted = torch.max(output, 1)

			#collect classes
			classified = predicted.data[0][0]
			index = int(classified)

			if index == 0:  #fake
				corrIdx += 1
			Idx += 1

			img_class = classes[str(index)]

			#display image and class
			print('class \'o\' image', classes[str(index)])


		print('\n\ncorrectly classified: %d %%' %(100* corrIdx / Idx) )

	else:
		for images, labels in tr_loader:
			output = model(Variable(images.cuda()))
			_, predicted = torch.max(output, 1)

			#collect classes
			classified = predicted.data[0][0]
			index = int(classified)

			if index == 1:  #real
				corrIdx += 1
			Idx += 1
			img_class = classes[str(index)]

			#display image and class
			print('class of image', classes[str(index)])

		print('\n\ncorrectly classified: %d %%' %(100* corrIdx / Idx) )
Пример #25
0
class Play:
    def __init__(self, modelGen):
        self.game = Game()
        self.model = ResNet()
        self.model.load_state_dict(
            torch.load('model/' + conf.PATH + '/Gen' + str(modelGen)))
        self.model.eval().cpu()

        self.MCTS = cdll.LoadLibrary("monte_carlo_" + conf.PATH + ".dll")
        self.MCTS.setC.argtypes = [c_float]
        self.MCTS.clear.argtypes = []
        self.MCTS.SingleInit.argtypes = [POINTER(c_int)]
        self.MCTS.SingleMoveToLeaf.argtypes = []
        self.MCTS.SingleRiseNode.argtypes = [c_float, POINTER(c_float)]
        self.MCTS.SingleGetAction.argtypes = [c_float]
        self.MCTS.SingleGetState.argtypes = [POINTER(c_int)]

        print("W :", self.getValue())

    def draw(self):
        for i in range(conf.BOARD_SIZE):
            for j in range(conf.BOARD_SIZE):
                canvas.create_rectangle(i * square,
                                        j * square,
                                        i * square + square,
                                        j * square + square,
                                        fill='green',
                                        outline='black')
                if self.game.state[0][j][i]:
                    canvas.create_oval(i * square + 2,
                                       j * square + 2,
                                       i * square + square - 2,
                                       j * square + square - 2,
                                       fill='black')
                elif self.game.state[1][j][i]:
                    canvas.create_oval(i * square + 2,
                                       j * square + 2,
                                       i * square + square - 2,
                                       j * square + square - 2,
                                       fill='white')

    def monteCarlo(self):
        state = self.game.state.reshape(-1).astype(np.int)
        c_state = np.ctypeslib.as_ctypes(state)
        self.MCTS.SingleInit(c_state)

        get = np.zeros(3 * conf.MAXIMUM_ACTION).astype(np.int32)
        c_get = np.ctypeslib.as_ctypes(get)
        for i in tqdm(range(conf.SEARCH_NUM)):
            self.MCTS.SingleMoveToLeaf()

            self.MCTS.SingleGetState(c_get)
            get = np.ctypeslib.as_array(c_get)
            X = get.reshape(1, 3, conf.BOARD_SIZE, conf.BOARD_SIZE)

            policy, value = self.model(torch.tensor(X, dtype=torch.float))

            policy = policy.detach().numpy().reshape(-1)
            value = value.detach().numpy().reshape(-1)
            c_policy = np.ctypeslib.as_ctypes(policy)
            c_value = np.ctypeslib.as_ctypes(value)[0]

            self.MCTS.SingleRiseNode(c_value, c_policy)

        action = self.MCTS.SingleGetAction(conf.TEMP)
        return action

    def getValue(self):
        policy, value = self.model(
            torch.from_numpy(self.game.state).unsqueeze(0).float())
        return value.detach().numpy()[0][0]

    def click(self, event):
        x = int(event.x / square)
        y = int(event.y / square)
        action = x * conf.BOARD_SIZE + y

        if action in self.game.getLegalAction():
            self.game.action(action)
        else:
            self.game.changePlayer()

        actions = self.game.getLegalAction()
        if len(actions):
            action = self.monteCarlo()
            self.game.action(actions[action])
            print("W :", self.getValue())
        else:
            self.game.changePlayer()
        print("black:", int(np.sum(self.game.state[0, :, :])), " white:",
              int(np.sum(self.game.state[1, :, :])))

        self.draw()
Пример #26
0
def main():
    torch.manual_seed(args.seed)  #设置随机种子

    train_dataset = SpeakerTrainDataset()  #设置训练集读取
    n_classes = train_dataset.n_classes  #说话人数
    batch_sampler = BalancedBatchSampler(train_dataset.labels,
                                         train_dataset.count, 10, 10)
    print('Num of classes: {}'.format(n_classes))

    model = ResNet(layers=[1, 1, 1],
                   embedding_size=args.embedding_size,
                   n_classes=n_classes)
    model.to(device)
    if args.optimizer == 'sgd':  #优化器使用sgd
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    dampening=args.dampening,
                                    weight_decay=args.wd)
    elif args.optimizer == 'adagrad':  #优化器使用adagrad
        optimizer = torch.optim.Adagrad(model.parameters(),
                                        lr=args.lr,
                                        lr_decay=args.lr_decay,
                                        weight_decay=args.wd)
    else:  #优化器使用adam
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.wd)
    selector = RandomNegativeTripletSelector(args.m)
    criterion = OnlineTripletLoss(args.m, selector)

    start = 1
    if args.resume:  #是否从之前保存的模型开始
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            if args.start is not None:
                start = start
            else:
                start = checkpoint['epoch'] + 1
            if args.load_optimizer:
                optimizer.load_state_dict(checkpoint['optimizer'])
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    train_loader = DataLoader(train_dataset,
                              batch_sampler=batch_sampler,
                              num_workers=8,
                              pin_memory=True)

    test_dataset = SpeakerTestDataset(transform=args.transform)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             num_workers=8,
                             pin_memory=True)

    for epoch in range(start, args.epochs + 1):
        train(epoch, model, criterion, optimizer, train_loader)
        test(model, test_loader)  #测试
        task = pd.read_csv('task/task.csv',
                           header=None,
                           delimiter='[ ]',
                           engine='python')
        pred = pd.read_csv(args.final_dir + 'pred.csv', engine='python')
        y_true = np.array(task.iloc[:, 0])
        y_pred = np.array(pred.iloc[:, -1])
        eer, thresh = cal_eer(y_true, y_pred)
        print('\nEER      : {:.3%}'.format(eer))
        print('Threshold: {:.5f}'.format(thresh))
        s = w / np.sqrt(s + eps)
        weight = weight * s[:, None, None, None]
        weight = np.transpose(weight, (2, 3, 1, 0))
        bias = b - m * s

    else:
        weight = np.transpose(weight, (1, 0))
        bias = state_dict[keys[i + 1]].cpu().numpy()
        nclass = bias.shape[0]
        i += 2
    print(key, weight.shape, bias.shape)
    params[key[:-7] + '/weights:0'] = weight
    params[key[:-7] + '/biases:0'] = bias

model = ResNet(class_name=class_name)
model.load_state_dict(state_dict)
model.eval()

input_data = np.random.random((1, 3, 224, 224)).astype(np.float32)
with torch.no_grad():
    inputs = torch.from_numpy(input_data.copy())
    inputs = Variable(inputs, requires_grad=False)
    f1, y1 = model(inputs)
    y1 = torch.sigmoid(y1).detach().numpy()
    f1 = f1.detach().numpy()


def Bottleneck(x, place, stride=1, downsampling=False, expansion=2, sc=''):
    with slim.arg_scope([slim.conv2d],
                        activation_fn=tf.nn.relu,
                        padding='SAME'):
    '/kw_resources/Mirrored-image-classification/data/test/',
    transform=val_transform)

test_loader = torch.utils.data.DataLoader(test_images,
                                          batch_size=batch_size,
                                          shuffle=True)

from model import ResNet
in_ch = 3
f_out = 32
n_ch = 2

model = ResNet(in_ch, f_out, n_ch)
state_dict = torch.load(
    '/kw_resources/Mirrored-image-classification/weights/model.pth')
model.load_state_dict(state_dict['model_state_dict'])
model.to(device)
model.eval()

correct = 0  #正解したデータの総数
total = 0  #予測したデータの総数
running_loss = 0.0

for _, data in enumerate(test_loader, 0):
    img, label = data
    img, label = img.to(device), label.to(device)
    outputs = model(img)
    _, predicted = torch.max(outputs.data, 1)
    total += label.size(0)
    # 予測したデータ数を加算
    correct += (predicted == label).sum().item()
from tqdm import tqdm

std = [0.229, 0.224, 0.225]
mean = [0.485, 0.456, 0.406]
input_size = 112
transform = transforms.Compose([
    transforms.Resize(input_size),
    transforms.CenterCrop(input_size),
    transforms.ToTensor(),
    transforms.Normalize(mean=mean, std=std, inplace=True)
])

# 4135
model = ResNet()
model.load_state_dict(
    torch.load('weight/gender_epoch_99.pkl',
               map_location='cpu')['model_state_dict'])
model.eval()

txt_path = "/data/datasets/widerface/train/label.txt"
imgs_path = []
words = []
f = open(txt_path, 'r')
lines = f.readlines()
isFirst = True
labels = []
for line in lines:
    line = line.rstrip()
    if line.startswith('#'):
        if isFirst is True:
            isFirst = False
Пример #30
0
def get_result(pretrained_weights, test_data, result_txt_dir):

    with open(test_data) as ftxt:
        lines = ftxt.readlines()

    test_files = []
    for line in lines:
        line = line.strip().split()
        filename = line[0]
        classname = line[1]
        boxes = np.asarray(line[2:], dtype=np.float32).reshape(-1, 5)
        test_files.append((filename, classname, boxes))

    if debug:
        shuffle(test_files)
        test_files = test_files[:]

    model_dict = torch.load(pretrained_weights)
    class_name = model_dict['class_name']

    state_dict = model_dict['net']

    nclass = len(class_name)
    nfiles = len(test_files)
    print(class_name)
    print('     '.join(
        ['{}:{}'.format(i, c) for i, c in enumerate(class_name)]))
    print('nclass:', nclass)
    print('test files:', nfiles)

    model = ResNet(class_name=class_name)
    model.to('cuda')

    model.load_state_dict(state_dict)
    model.eval()

    with open(result_txt_dir + 'pred_result.txt', 'w') as ftxt:
        for i, (image_file, classname, boxes) in enumerate(test_files):
            if (i + 1) % 500 == 0 or (i + 1) == nfiles:
                print(i + 1, nfiles)
            img = cv2.imread(image_file)
            assert img is not None, image_file
            h, w, _ = img.shape
            if boxes.shape[0]:
                # print(boxes)
                boxes_str = ' '.join([' '.join(map(str, s)) for s in boxes])

                index = np.argmax(boxes[:, 0] * (boxes[:, 3] + boxes[:, 4]))
                _, cx, cy, _, _ = boxes[index]
                x1 = int(cx * w - crop_size / 2)
                y1 = int(cy * h - crop_size / 2)
                x1 = min(max(0, x1), w - crop_size)
                y1 = min(max(0, y1), h - crop_size)
                x2 = x1 + crop_size
                y2 = y1 + crop_size
                img_crop = img[y1:y2, x1:x2, :]
                # cv2.imwrite('{}.jpg'.format(i), img_crop)

                inputs_numpy = np.transpose(img_crop, (2, 0, 1))
                inputs_numpy = np.expand_dims(inputs_numpy.astype(np.float32),
                                              0)

                with torch.no_grad():
                    inputs = torch.from_numpy(inputs_numpy / 255)
                    inputs = Variable(inputs.to('cuda'), requires_grad=False)

                    f, y = model(inputs)
                    y = torch.sigmoid(y).detach().cpu().numpy()
                    index = np.argmax(y[0])
                    label = class_name[index]
                    conf = y[0, index]
                    ftxt.write('{} {} {} {} {}\n'.format(
                        image_file, classname, label, conf, boxes_str))
            else:
                ftxt.write('{} {} TSFAS 1.0\n'.format(image_file, classname))

    print('\n\n')