Exemplo n.º 1
0
def train(num_epochs, model, device, train_loader, val_loader, images, texts, lengths, converter, optimizer, lr_scheduler, prediction_dir, print_iter, opt) :

#     criterion = CTCLoss()
#     criterion.to(device)
    criterion = torch.nn.CrossEntropyLoss(ignore_index = 0).to(device)
    images = images.to(device)
    model.to(device)
    for epoch in range(num_epochs) :
        count = 0
        model.train()
        for i, datas in enumerate(train_loader) :
            datas, targets = datas
            batch_size = datas.size(0)
            count += batch_size
            dataloader.loadData(images, datas)
            t, l = converter.encode(targets, opt.batch_max_length)
            dataloader.loadData(texts, t)
            dataloader.loadData(lengths, l)
            preds = model(images, t[:, :-1])
#             preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))

            cost = criterion(preds.view(-1, preds.shape[-1]), t[:, 1:].contiguous().view(-1))
            model.zero_grad()
            cost.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) 
            optimizer.step()
            if count % print_iter < train_loader.batch_size :
                print('epoch {} [{}/{}]loss : {}'.format(epoch, count, len(train_loader.dataset), cost))
    
        if (epoch %3==0) &(epoch !=0 ):
            res = validation(model, device, val_loader, images, texts, lengths, converter, prediction_dir, opt)
            save_model(opt.save_dir,  f'{epoch}_{round(float(res),3)}', model, optimizer, lr_scheduler, opt)
Exemplo n.º 2
0
def test(model, device, test_loader, images, texts, lengths, converter,
         prediction_dir):
    model.to(device)
    images = images.to(device)
    model.eval()
    pred_json = {}
    pred_list = []
    make_folder(prediction_dir)
    for i, datas in enumerate(test_loader):
        datas, targets = datas
        batch_size = datas.size(0)
        dataloader.loadData(images, datas)
        t, l = converter.encode(targets)
        dataloader.loadData(texts, t)
        dataloader.loadData(lengths, l)

        preds = model(images)

        preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))

        _, preds = preds.max(2)
        preds = preds.transpose(1, 0).contiguous().view(-1)
        pred_string = converter.decode(preds.data, preds_size.data, raw=False)

        pred_dict = {
            'image_path': test_loader.dataset.get_img_path(i),
            'prediction': pred_string
        }
        pred_list.append(pred_dict)

    pred_json = {'predict': pred_list}
    with open(os.path.join(prediction_dir, 'predict.json'), 'w') as save_json:
        json.dump(pred_json, save_json, indent=2, ensure_ascii=False)
 def __init__(self, num_workers=8, update_every=8, save_every=10, t1=0.1, t2=0.2, pre=None):
     self.num_workers = num_workers
     self.update_every = update_every
     self.save_every = save_every
     self.t1 = t1
     self.t2 = t2
     self.model = PolygonModel(predict_delta=True).to(devices)
     if pre != None:
         self.model.load_state_dict(torch.load(pre))
     self.dataloader = loadData(data_num=16,
                                batch_size=self.num_workers,
                                len_s=71,
                                path='val',
                                shuffle=False)
     self.model.encoder.eval()
     self.model.delta_encoder.eval()
     for n, p in self.model.named_parameters():
         if 'encoder' in n:
             p.requires_grad = False
     self.train_params = [p for p in self.model.parameters() if p.requires_grad==False]
     self.optimizer = optim.Adam(self.train_params,
                                 lr=2e-6,
                                 amsgrad=False)
Exemplo n.º 4
0
def train(num_epochs, model, device, train_loader, val_loader, images, texts,
          lengths, converter, optimizer, lr_scheduler, prediction_dir,
          print_iter):
    criterion = CTCLoss()
    criterion.to(device)
    images = images.to(device)
    model.to(device)
    for epoch in range(num_epochs):
        print(epoch)
        count = 0
        model.train()
        for i, datas in enumerate(train_loader):
            datas, targets = datas
            batch_size = datas.size(0)
            count += batch_size
            dataloader.loadData(images, datas)
            t, l = converter.encode(targets)
            dataloader.loadData(texts, t)
            dataloader.loadData(lengths, l)
            preds = model(images)
            preds_size = Variable(torch.IntTensor([preds.size(0)] *
                                                  batch_size))
            cost = criterion(preds, texts, preds_size, lengths) / batch_size
            model.zero_grad()
            cost.backward()
            optimizer.step()
            if count % print_iter < train_loader.batch_size:
                print('epoch {} [{}/{}]loss : {}'.format(
                    epoch, count, len(train_loader.dataset), cost))

        validation(model, device, val_loader, images, texts, lengths,
                   converter, prediction_dir)

        save_model('{}'.format(epoch), model, optimizer, lr_scheduler)

        lr_scheduler.step()
Exemplo n.º 5
0
def main(argv):

    #args = parser.parse_args(argv[1:]) # argv[1:] argv
    #parser.print_help()
    #print(args)
    #sys.exit()

    batch_size = args.batch_size  # 100
    #print('Batch_size: ' + str(batch_size))
    train_steps = args.steps  #10000 # 1000 #LOOPED LATER ON
    nr_epochs = None
    hu = [int(x) for x in args.hidden_units.split("x")]
    hidden_units = hu  #[200, 200] # [10, 10] [400, 400] [400, 400, 400, 400]
    chosen_label = args.chosen_label  #'ENGINE_TYPE' # 'T_CHASSIS' 'COUNTRY' 'ENGINE_TYPE' 'BRAND_TYPE'
    max_nr_nan = 0
    fixed_selection = True

    my_id = "l" + chosen_label + "_s" + str(
        train_steps) + "_h" + args.hidden_units

    label_path = 'Labels/'
    data_path = 'Data_original/'  # 'Data_original/' 'Testdata/'
    structured_data_path = 'Compressed/'  # 'Compressed_valid_chassis' Compressed/Compressed_single/

    # Label_mapping holds key value pairs where key is the label and value its integer representation
    label_mapping = dataloader.get_valid_labels(
        label_path, chosen_label)  # Labels from labels file only

    #Get three structured separate dataframes from data sources
    #trainframe, testframe, validationframe = dataloader.loadData(data_path, False, label_mapping, max_nr_nan, fixed_selection)
    trainframe, testframe, validationframe = dataloader.loadData(
        structured_data_path, True, label_mapping, max_nr_nan, fixed_selection)

    # Train model data
    trainset, labels_training, label_mapping, int_labels_train = \
            dataloader.get_model_data(trainframe, label_mapping, chosen_label)

    # Test model data
    testset, labels_test, label_mapping, int_labels_test = \
            dataloader.get_model_data(testframe, label_mapping, chosen_label)

    # Validate model data
    validationset, labels_validate, label_mapping, int_labels_validate = \
            dataloader.get_model_data(validationframe, label_mapping, chosen_label)

    if args.test:
        sys.exit(2)

    my_checkpointing_config = tensorflow.estimator.RunConfig(
        #save_checkpoints_secs = 1*60,  # Save checkpoints every minute (conflicts with save_checkpoints_steps)
        keep_checkpoint_max=4,  # Retain the 4 most recent checkpoints.
        log_step_count_steps=1000,
        save_summary_steps=1000,
        save_checkpoints_steps=1000)

    ### Model training
    my_feature_columns = []
    for key in trainset.keys():
        my_feature_columns.append(
            tensorflow.feature_column.numeric_column(key=key))

    # The model must choose between x classes.
    print('Number of unique labels, n_classes: ' + str(len(label_mapping)))
    #print('Number of unique trucks, n_classes: ' + str(int_labels.size))

    # optimizer = tensorflow.train.GradientDescentOptimizer(learning_rate=0.1) ?
    # optimizer = tensorflow.train.AdagradOptimizer(learning_rate=0.1) ?
    # optimizer = tensorflow.train.AdagradDAOptimizer(learning_rate=0.1, global_step= ?) global_step=train_steps?
    # optimizer = tensorflow.train.AdamOptimizer(learning_rate=0.1) ?
    if args.optimiser == "Adam":
        opt = tensorflow.train.AdamOptimizer(learning_rate=args.learning_rate)
        my_id = my_id + "_o" + args.optimiser
        my_id = my_id + "_lr" + str(args.learning_rate)
    elif args.optimiser == "GDO":
        opt = tensorflow.train.GradientDescentOptimizer(
            learning_rate=learning_rate)
        my_id = my_id + "_o" + args.optimiser
        my_id = my_id + "_lr" + str(args.learning_rate)
    else:
        opt = None  #adagrad, whoch doesn not have a learning rate
        my_id = my_id + "_o" + args.optimiser

    if args.dropout:
        my_id = my_id + "_do" + str(args.dropout)

    if args.suffix != "":
        my_id = my_id + "_" + args.suffix

    # Save data files
    #         validationset, labels_validate, label_mapping, int_labels_validate = \
    trainset.to_csv("train_" + my_id + ".csv", index=False)
    labels_training.to_csv("labels_train_" + my_id + ".csv", index=False)
    testset.to_csv("test_" + my_id + ".csv", index=False)
    labels_test.to_csv("labels_test_" + my_id + ".csv", index=False)
    validationset.to_csv("val_" + my_id + ".csv", index=False)
    labels_validate.to_csv("labels_val_" + my_id + ".csv", index=False)

    resultfile = open("Results/" + my_id + "_model_results.txt", "w")

    resultfile.write('\nModel training: ' +
                     datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") +
                     '\n\n')
    resultfile.write('Layer setting: ' + str(hidden_units) + '\n')
    resultfile.write('Train steps: ' + str(train_steps) + '\n')
    resultfile.write('Number epochs: ' + str(nr_epochs) + '\n')
    resultfile.write('Batchsize: ' + str(batch_size) + '\n')
    resultfile.write('Chosen label: ' + chosen_label + '\n')
    resultfile.write('Max_nr_nan: ' + str(max_nr_nan) + '\n')
    resultfile.write('Fixed_selection: ' + str(fixed_selection) + '\n')
    resultfile.flush()

    if opt:
        classifier = tensorflow.estimator.DNNClassifier(
            feature_columns=my_feature_columns,
            hidden_units=hidden_units,
            n_classes=len(label_mapping),
            model_dir="./models/" + my_id,
            config=my_checkpointing_config,
            dropout=args.dropout,
            optimizer=opt)
    else:
        classifier = tensorflow.estimator.DNNClassifier(
            feature_columns=my_feature_columns,
            hidden_units=hidden_units,
            n_classes=len(label_mapping),
            model_dir="./models/" + my_id,
            dropout=args.dropout,
            config=my_checkpointing_config)

    ### Train the Model.
    # PJB added loop
    for i in range(0, args.iterations):
        print('\nModel training\n\n\n')
        #resultfile.write('\nModel training: ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\n\n\n')
        classifier.train(input_fn=lambda: dataloader.train_input_fn(
            trainset, int_labels_train, batch_size, nr_epochs),
                         steps=train_steps)

        ### Test the model
        print('\nModel testing\n\n\n')
        resultfile.write('\nModel testing\n\n\n')
        # Evaluate the model.
        eval_result = classifier.evaluate(
            input_fn=lambda: dataloader.eval_input_fn(testset, int_labels_test,
                                                      batch_size))
        print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
        resultfile.write(
            '\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

        ### Evaluate the model
        print('\nModel evaluation\n\n\n')
        resultfile.write('\nModel evaluation\n\n\n')
        #
        expected = list(int_labels_validate)  #list(label_mapping.keys())
        predictions = classifier.predict(
            input_fn=lambda: dataloader.eval_input_fn(
                validationset, labels=expected, batch_size=batch_size))
        template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
        predictfile = open("Results/" + my_id + "_predictions.txt", "a")

        corr = 0  # count correct predictions
        for pred_dict, expec in zip(predictions, expected):
            template = ('Prediction is "{}" ({:5.1f}%), expected "{}"')
            class_id = pred_dict["class_ids"][0]
            probability = pred_dict['probabilities'][class_id]
            print(template.format(class_id, 100 * probability, expec))
            resultfile.write(
                template.format(class_id, 100 * probability, expec))
            # indices should be reverse mapped to correct label
            predictfile.write(
                "Loop {}: Prediction is {} ({:.1f}%), expected {}\n".format(
                    i, class_id, 100 * probability, expec))
            if class_id == expec:
                corr += 1
        print("CORRECT", corr, "/", len(expected))
        resultfile.write("Correct: " + str(corr) + "\n")
        predictfile.write("Loop {}: Correct {}/{} ({:.2f}%)\n".format(
            i, corr, len(expected), (corr * 100) / len(expected)))
        predictfile.close()
        resultfile.write('\n******************************\n')
    resultfile.close()
Exemplo n.º 6
0
parser.add_argument('--sampling_threshold', type = int, default = 1e-5, help = "Subsampling Threshold, default 1e-5 ")
parser.add_argument('--min_freq', type = int, default = 5, help = "Minimum Frequency of words, default 5 ")
parser.add_argument('--batch_size', type = int, default = 512, help = "Batch size, default 512")
parser.add_argument('--window_size', type = int, default = 5, help = "Window size, default 5")
parser.add_argument('--lr', type = int, default = 0.003, help = "Learning rate for generator optimizer,default 0.003 ")
parser.add_argument('--embed_size', type = int, default = 300, help = "Embed size, default 300")
parser.add_argument('--mode', type = str, default = "train", help = "Mode Type, default train")
parser.add_argument('--N_words', type = int, default = 10, help = "Top N similar words, default 10")
parser.add_argument('--word', type = str, default = "woman", help = "word, default word is woman")
parser.add_argument('--word1', type = str, default = "king", help = "1st word, default king")
parser.add_argument('--word2', type = str, default = "male", help = "2nd word, default male")
parser.add_argument('--word3', type = str, default = "female", help = "3rd word, default female")
args = parser.parse_args()


tokenize_data = loadData()
int_text, word2idx, idx2word, freq, vocab = prepareData(tokenize_data, args.min_freq)


if args.mode == "train":
	vocab_size = sum([freq[k] for k in freq])
	subsampled_words = subsampling(freq, args.sampling_threshold, vocab, vocab_size, word2idx)
	neg_sample = negativeSampling(freq)
	#print(neg_sample.shape)

	device='cpu'

	model = SkipGram(len(word2idx), args.embed_size, neg_sample).to(device)
	optimizer = optim.Adam(model.parameters(), args.lr)
	epoch = args.epochs
	steps = 0
def train(config, load_resnet50=False, pre_trained=None, cur_epochs=0):
    batch_size = config['batch_size']
    lr = config['lr']
    epochs = config['epoch']
    train_dataloader = loadData('train', 16, 71, batch_size)
    val_loader = loadData('val', 16, 71, batch_size, shuffle=False)
    model = PolygonModel(load_predtrained_resnet50=load_resnet50,
                         predict_delta=True).to(devices)

    if pre_trained is not None:
        model.load_state_dict(torch.load(pre_trained))
        print('loaded pretrained polygon net!')

    # set to eval
    model.encoder.eval()
    model.delta_encoder.eval()

    for n, p in model.named_parameters():
        if 'encoder' in n:
            print('Not train:', n)
            p.requires_grad = False

    print('No weight decay in RL training')

    train_params = [p for p in model.parameters() if p.requires_grad]
    train_params1 = []
    train_params2 = []
    for n, p in model.named_parameters():
        if p.requires_grad and 'delta' not in n:
            train_params1.append(p)
        elif p.requires_grad and 'delta' in n:
            train_params2.append(p)

    # Adam 优化方法
    optimizer = optim.Adam(train_params, lr=lr, amsgrad=False)
    optimizer1 = optim.Adam(train_params1, lr=lr, amsgrad=False)
    optimizer2 = optim.Adam(train_params2, lr=lr, amsgrad=False)

    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=config['lr_decay'][0],
                                          gamma=config['lr_decay'][1])

    print('Total Epochs:', epochs)
    for it in range(cur_epochs, epochs):
        # init
        accum = defaultdict(float)
        accum2 = defaultdict(float)
        model.delta_model.train()
        model.decoder.train()
        for index, batch in enumerate(train_dataloader):
            img = torch.tensor(batch[0], dtype=torch.float).cuda()
            bs = img.shape[0]
            WH = batch[-1]  # WH_dict
            left_WH = WH['left_WH']
            origion_WH = WH['origion_WH']
            object_WH = WH['object_WH']

            # TODO: step1
            model.delta_model.eval()
            model.decoder.train()
            outdict_sample = model(img,
                                   mode='train_rl',
                                   temperature=config['temperature'],
                                   temperature2=0.0)  # (bs, seq_len, 28*28+1)
            # greedy
            with torch.no_grad():
                outdict_greedy = model(img, mode='train_rl', temperature=0.0)

            # Get RL loss
            sampling_pred_x = outdict_sample['final_pred_x'].cpu().numpy()
            sampling_pred_y = outdict_sample['final_pred_y'].cpu().numpy()
            sampling_pred_len = outdict_sample['lengths'].cpu().numpy()
            greedy_pred_x = outdict_greedy['final_pred_x'].cpu().numpy()
            greedy_pred_y = outdict_greedy['final_pred_y'].cpu().numpy()
            greedy_pred_len = outdict_greedy['lengths'].cpu().numpy()
            sampling_iou = np.zeros(bs, dtype=np.float32)
            greedy_iou = np.zeros(bs, dtype=np.float32)
            vertices_GT = []  # (bs, 70, 2)
            vertices_sampling = []
            vertices_greedy = []
            GT_polys = batch[-2].numpy()  # (bs, 70, 2)
            GT_mask = batch[7]  # (bs, 70)

            for ii in range(bs):
                scaleW = 224.0 / float(object_WH[0][ii])
                scaleH = 224.0 / float(object_WH[1][ii])
                leftW = left_WH[0][ii]
                leftH = left_WH[1][ii]
                tmp = []
                all_len = np.sum(GT_mask[ii].numpy())
                cnt_target = GT_polys[ii][:all_len]
                for vert in cnt_target:
                    tmp.append(
                        (vert[0] / scaleW + leftW, vert[1] / scaleH + leftH))
                vertices_GT.append(tmp)

                tmp = []
                for j in range(sampling_pred_len[ii] - 1):
                    vertex = (sampling_pred_x[ii][j] / scaleW + leftW,
                              sampling_pred_y[ii][j] / scaleH + leftH)
                    tmp.append(vertex)
                vertices_sampling.append(tmp)

                tmp = []
                for j in range(greedy_pred_len[ii] - 1):
                    vertex = (greedy_pred_x[ii][j] / scaleW + leftW,
                              greedy_pred_y[ii][j] / scaleH + leftH)
                    tmp.append(vertex)
                vertices_greedy.append(tmp)

            # IoU between sampling/greedy and GT
            for ii in range(bs):
                sam = vertices_sampling[ii]
                gt = vertices_GT[ii]
                gre = vertices_greedy[ii]

                if len(sam) < 2:
                    sampling_iou[ii] = 0.
                else:
                    iou_sam, _, _ = iou(sam, gt, origion_WH[1][ii],
                                        origion_WH[0][ii])
                    sampling_iou[ii] = iou_sam

                if len(gre) < 2:
                    greedy_iou[ii] = 0.
                else:
                    iou_gre, _, _ = iou(gre, gt, origion_WH[1][ii],
                                        origion_WH[0][ii])
                    greedy_iou[ii] = iou_gre

            logprobs = outdict_sample['log_probs']
            # 强化学习损失,logprob是两个logprob加和
            loss = losses.self_critical_loss(
                logprobs, outdict_sample['lengths'],
                torch.from_numpy(sampling_iou).to(devices),
                torch.from_numpy(greedy_iou).to(devices))
            model.zero_grad()
            if 'grid_clip' in config:
                nn.utils.clip_grad_norm_(model.parameters(),
                                         config['grad_clip'])
            loss.backward()
            optimizer1.step()  # 更新参数

            accum['loss_total'] += loss
            accum['sampling_iou'] += np.mean(sampling_iou)
            accum['greedy_iou'] += np.mean(greedy_iou)
            # 打印损失
            if (index + 1) % 20 == 0:
                print('Epoch {} - Step {}'.format(it + 1, index + 1))
                print(
                    '     Main Decoder: loss_total {}, sampling_iou {}, greedy_iou {}'
                    .format(accum['loss_total'] / 20,
                            accum['sampling_iou'] / 20,
                            accum['greedy_iou'] / 20))
                accum = defaultdict(float)

            # TODO:训练delta_model decoder step2
            model.decoder.eval()
            model.delta_model.train()

            outdict_sample = model(
                img,
                mode='train_rl',
                temperature=0.0,
                temperature2=config['temperature2'])  # (bs, seq_len, 28*28+1)
            # greedy
            with torch.no_grad():
                outdict_greedy = model(img,
                                       mode='train_rl',
                                       temperature=0.0,
                                       temperature2=0.0)

            # Get RL loss
            sampling_pred_x = outdict_sample['final_pred_x'].cpu().numpy()
            sampling_pred_y = outdict_sample['final_pred_y'].cpu().numpy()
            sampling_pred_len = outdict_sample['lengths'].cpu().numpy()
            greedy_pred_x = outdict_greedy['final_pred_x'].cpu().numpy()
            greedy_pred_y = outdict_greedy['final_pred_y'].cpu().numpy()
            greedy_pred_len = outdict_greedy['lengths'].cpu().numpy()
            sampling_iou = np.zeros(bs, dtype=np.float32)
            greedy_iou = np.zeros(bs, dtype=np.float32)
            vertices_GT = []  # (bs, 70, 2)
            vertices_sampling = []
            vertices_greedy = []
            GT_polys = batch[-2].numpy()  # (bs, 70, 2)
            GT_mask = batch[7]  # (bs, 70)

            for ii in range(bs):
                scaleW = 224.0 / object_WH[0][ii]
                scaleH = 224.0 / object_WH[1][ii]
                leftW = left_WH[0][ii]
                leftH = left_WH[1][ii]
                tmp = []
                all_len = np.sum(GT_mask[ii].numpy())
                cnt_target = GT_polys[ii][:all_len]
                for vert in cnt_target:
                    tmp.append(
                        (vert[0] / scaleW + leftW, vert[1] / scaleH + leftH))
                vertices_GT.append(tmp)

                tmp = []
                for j in range(sampling_pred_len[ii] - 1):
                    vertex = (sampling_pred_x[ii][j] / scaleW + leftW,
                              sampling_pred_y[ii][j] / scaleH + leftH)
                    tmp.append(vertex)
                vertices_sampling.append(tmp)

                tmp = []
                for j in range(greedy_pred_len[ii] - 1):
                    vertex = (greedy_pred_x[ii][j] / scaleW + leftW,
                              greedy_pred_y[ii][j] / scaleH + leftH)
                    tmp.append(vertex)
                vertices_greedy.append(tmp)

            # IoU between sampling/greedy and GT
            for ii in range(bs):
                sam = vertices_sampling[ii]
                gt = vertices_GT[ii]
                gre = vertices_greedy[ii]

                if len(sam) < 2:
                    sampling_iou[ii] = 0.
                else:
                    iou_sam, _, _ = iou(sam, gt, origion_WH[1][ii],
                                        origion_WH[0][ii])
                    sampling_iou[ii] = iou_sam

                if len(gre) < 2:
                    greedy_iou[ii] = 0.
                else:
                    iou_gre, _, _ = iou(gre, gt, origion_WH[1][ii],
                                        origion_WH[0][ii])
                    greedy_iou[ii] = iou_gre

            # TODO:
            logprobs = outdict_sample['delta_logprob']
            # 强化学习损失,logprob是两个logprob加和
            loss = losses.self_critical_loss(
                logprobs, outdict_sample['lengths'],
                torch.from_numpy(sampling_iou).to(devices),
                torch.from_numpy(greedy_iou).to(devices))
            model.zero_grad()
            if 'grid_clip' in config:
                nn.utils.clip_grad_norm_(model.parameters(),
                                         config['grad_clip'])
            loss.backward()
            optimizer2.step()

            accum2['loss_total'] += loss
            accum2['sampling_iou'] += np.mean(sampling_iou)
            accum2['greedy_iou'] += np.mean(greedy_iou)
            # 打印损失
            if (index + 1) % 20 == 0:
                print(
                    '     Second Decoder: loss_total {}, sampling_iou {}, greedy_iou {}'
                    .format(accum2['loss_total'] / 20,
                            accum2['sampling_iou'] / 20,
                            accum2['greedy_iou'] / 20))
                accum2 = defaultdict(float)

            if (index + 1) % config['val_every'] == 0:
                # validation
                model.decoder.eval()
                model.delta_model.eval()
                val_IoU = []
                less_than2 = 0
                with torch.no_grad():
                    for val_index, val_batch in enumerate(val_loader):
                        img = torch.tensor(val_batch[0],
                                           dtype=torch.float).cuda()
                        bs = img.shape[0]
                        WH = val_batch[-1]  # WH_dict
                        left_WH = WH['left_WH']
                        origion_WH = WH['origion_WH']
                        object_WH = WH['object_WH']
                        val_target = val_batch[-2].numpy()  # (bs, 70, 2)
                        val_mask_final = val_batch[7]  # (bs, 70)
                        out_dict = model(
                            img, mode='test')  # (N, seq_len) # test_time
                        pred_x = out_dict['final_pred_x'].cpu().numpy()
                        pred_y = out_dict['final_pred_y'].cpu().numpy()
                        pred_len = out_dict['lengths']  # 预测的长度
                        # 求IoU
                        for ii in range(bs):
                            vertices1 = []
                            vertices2 = []
                            scaleW = 224.0 / float(object_WH[0][ii])
                            scaleH = 224.0 / float(object_WH[1][ii])
                            leftW = left_WH[0][ii]
                            leftH = left_WH[1][ii]

                            all_len = np.sum(val_mask_final[ii].numpy())
                            cnt_target = val_target[ii][:all_len]
                            for vert in cnt_target:
                                vertices2.append((vert[0] / scaleW + leftW,
                                                  vert[1] / scaleH + leftH))
                            pred_len_b = pred_len[ii] - 1
                            if pred_len_b < 2:
                                val_IoU.append(0.)
                                less_than2 += 1
                                continue
                            for j in range(pred_len_b):
                                vertex = (pred_x[ii][j] / scaleW + leftW,
                                          pred_y[ii][j] / scaleH + leftH)
                                vertices1.append(vertex)

                            _, nu_cur, de_cur = iou(vertices1, vertices2,
                                                    origion_WH[1][ii],
                                                    origion_WH[0][ii])
                            iou_cur = nu_cur * 1.0 / de_cur if de_cur != 0 else 0
                            val_IoU.append(iou_cur)

                val_iou_data = np.mean(np.array(val_IoU))
                print('Validation After Epoch {} - step {}'.format(
                    str(it + 1), str(index + 1)))
                print('           IoU      on validation set: ', val_iou_data)
                print('less than 2: ', less_than2)
                print('Saving training parameters after this epoch:')
                torch.save(
                    model.state_dict(),
                    '/data/duye/pretrained_models/FPNRLtrain/ResNext_Plus_RL2_retain_Epoch{}-Step{}_ValIoU{}.pth'
                    .format(str(it + 1), str(index + 1), str(val_iou_data)))
                # set to init
                model.decoder.train()  # important
                model.delta_model.train()

        scheduler.step()
        print('Epoch {} Completed!'.format(str(it + 1)))
Exemplo n.º 8
0
def main(argv):

    args = parser.parse_args(argv[1:])
    batch_size = args.batch_size
    train_steps = args.train_steps
    nr_epochs = args.nr_epochs  # None
    if nr_epochs == 0:
        nr_epochs = None
    hidden_units_arg = list(args.hidden_units.split('x'))
    hidden_units = []

    for layer in hidden_units_arg:
        hidden_units.append(int(layer))

    choosen_label = args.choosen_label
    max_nr_nan = args.max_nr_nan  # 0
    if args.fixed_selection.lower() == 'false':
        fixed_selection = False
    else:
        fixed_selection = True

    data_path = args.data_path

    file_suffix = '-' + choosen_label + '-' + args.hidden_units + '-' + str(
        args.train_steps) + '-' + args.suffix

    dropout = None
    kfolds = 5

    resultfile = open("Results/model_results" + file_suffix + ".txt", "w")
    resultfile.write('\n\rModel training: ' +
                     datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") +
                     '\n\n\r')
    resultfile.write('Layer setting: ' + str(hidden_units) + '\n\r')
    resultfile.write('Train steps: ' + str(train_steps) + '\n\r')
    resultfile.write('Number epochs: ' + str(nr_epochs) + '\n\r')
    resultfile.write('Batchsize: ' + str(batch_size) + '\n\r')
    resultfile.write('Choosen label: ' + choosen_label + '\n\r')
    resultfile.write('Max_nr_nan: ' + str(max_nr_nan) + '\n\r')
    resultfile.write('Fixed_selection: ' + str(fixed_selection) + '\n\r')
    resultfile.write('Data path: ' + str(data_path) + '\n\r')
    resultfile.write('Dropout: ' + str(dropout) + '\n\r')
    resultfile.write('Kfold: ' + str(kfolds) + '\n\r')

    # Label_mapping holds key value pairs where key is the label and value its integer representation
    #label_mapping = dataloader.get_valid_labels(data_path, choosen_label) # Labels from labels file only
    label_mapping = {0: 0, 1: 1}
    resultfile.write('Label mapping: ' + str(label_mapping) + '\n\r')

    inverted_label_mapping = {}
    for key, value in label_mapping.items():
        inverted_label_mapping[value] = key

    resultfile.write('Inverted label mapping: ' + str(inverted_label_mapping) +
                     '\n\r')
    resultfile.flush()

    #Get three structured separate dataframes from data sources
    trainframe, testframe, validationframe, first_column, last_column = dataloader.loadData(
        data_path, max_nr_nan, fixed_selection, file_suffix)
    resultfile.flush()

    if kfolds <= 1:

        frameinfo = dataloader.analyse_frame(trainframe)
        resultfile.write('\n\rTrainframe:\n\r')
        resultfile.write(frameinfo)
        frameinfo = dataloader.analyse_frame(testframe)
        resultfile.write('\n\r\n\rTestframe:\n\r')
        resultfile.write(frameinfo)
        frameinfo = dataloader.analyse_frame(validationframe)
        resultfile.write('\n\r\n\rValidationframe:\n\r')
        resultfile.write(frameinfo)

        # Train model data
        trainset, labels_training, label_mapping, int_labels_train = \
         dataloader.get_model_data(trainframe, label_mapping, choosen_label, first_column, last_column)

        # Test model data
        testset, labels_test, label_mapping, int_labels_test = \
         dataloader.get_model_data(testframe, label_mapping, choosen_label, first_column, last_column)

        # Validate model data
        validationset, labels_validate, label_mapping, int_labels_validate = \
         dataloader.get_model_data(validationframe, label_mapping, choosen_label, first_column, last_column)

        ### Model training
        my_feature_columns = []
        for key in trainset.keys():
            my_feature_columns.append(
                tensorflow.feature_column.numeric_column(key=key))

        # The model must choose between x classes.
        print('Number of unique labels, n_classes: ' + str(len(label_mapping)))

        # optimizer = tensorflow.train.GradientDescentOptimizer(learning_rate=0.1) ?
        # optimizer = tensorflow.train.AdagradOptimizer(learning_rate=0.1) ?
        # optimizer = tensorflow.train.AdagradDAOptimizer(learning_rate=0.1, global_step= ?) global_step=train_steps?
        # optimizer = tensorflow.train.AdamOptimizer(learning_rate=0.1) ?
        optimizer = tensorflow.train.ProximalAdagradOptimizer(
            learning_rate=0.01, l1_regularization_strength=0.01)
        #optimizer = 'Adagrad'

        classifier = tensorflow.estimator.DNNClassifier \
         (feature_columns=my_feature_columns,hidden_units=hidden_units,n_classes=len(label_mapping), dropout=dropout, batch_norm=False, optimizer=optimizer, model_dir='/data/Tensorflow/' + file_suffix) # , batch_norm=True ,optimizer=optimizer

        ### Train the Model.
        print('\nModel training\n\r\n\r\n')
        #resultfile.write('\nModel training: ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\n\n\n')
        classifier.train(input_fn=lambda: dataloader.train_input_fn(
            trainset, int_labels_train, batch_size, nr_epochs),
                         steps=train_steps)

        ### Test the model
        print('\n\r\n\rModel testing\n\n\n')
        resultfile.write('\n\r\n\rModel testing\n\r')
        # Evaluate the model.

        eval_result = classifier.evaluate(
            input_fn=lambda: dataloader.eval_input_fn(testset, int_labels_test,
                                                      batch_size))
        print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
        resultfile.write(
            '\n\rTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
        resultfile.write('\n\rEval result:\n\r' + str(eval_result))

    else:

        foldframe = trainframe.append(testframe)
        foldframe = foldframe.reindex(numpy.random.permutation(
            foldframe.index))
        foldtrainframe = pandas.DataFrame()
        foldtestframe = pandas.DataFrame()
        foldframe_list = []

        foldframe_list = dataloader.getFoldFrame(foldframe_list, kfolds,
                                                 foldframe)

        frameinfo = dataloader.analyse_frame(validationframe)
        resultfile.write('\n\r\n\rValidationframe:\n\r')
        resultfile.write(frameinfo)

        # Validate model data
        validationset, labels_validate, label_mapping, int_labels_validate = \
         dataloader.get_model_data(validationframe, label_mapping, choosen_label, first_column, last_column)

        testresults = []

        for testindex in range(kfolds):

            foldtrainframe, foldtestframe = dataloader.getFoldTrainFrames(
                foldframe_list, testindex)

            frameinfo = dataloader.analyse_frame(foldtrainframe)
            resultfile.write('\n\rTrainframe:\n\r')
            resultfile.write(frameinfo)
            frameinfo = dataloader.analyse_frame(foldtestframe)
            resultfile.write('\n\r\n\rTestframe:\n\r')
            resultfile.write(frameinfo)

            # Train model data
            trainset, labels_training, label_mapping, int_labels_train = \
             dataloader.get_model_data(foldtrainframe, label_mapping, choosen_label, first_column, last_column)

            # Test model data
            testset, labels_test, label_mapping, int_labels_test = \
             dataloader.get_model_data(foldtestframe, label_mapping, choosen_label, first_column, last_column)

            ### Model training
            my_feature_columns = []
            for key in trainset.keys():
                my_feature_columns.append(
                    tensorflow.feature_column.numeric_column(key=key))

            # The model must choose between x classes.
            print('Number of unique labels, n_classes: ' +
                  str(len(label_mapping)))

            # optimizer = tensorflow.train.GradientDescentOptimizer(learning_rate=0.1) ?
            # optimizer = tensorflow.train.AdagradOptimizer(learning_rate=0.1) ?
            # optimizer = tensorflow.train.AdagradDAOptimizer(learning_rate=0.1, global_step= ?) global_step=train_steps?
            # optimizer = tensorflow.train.AdamOptimizer(learning_rate=0.1) ?
            optimizer = tensorflow.train.ProximalAdagradOptimizer(
                learning_rate=0.01, l1_regularization_strength=0.01)
            #optimizer = 'Adagrad'

            classifier = tensorflow.estimator.DNNClassifier \
             (feature_columns=my_feature_columns,hidden_units=hidden_units,n_classes=len(label_mapping), dropout=dropout, batch_norm=False, optimizer=optimizer, model_dir='/data/Tensorflow/' + file_suffix) # , batch_norm=True ,optimizer=optimizer

            ### Train the Model.
            print('\nModel training\n\r\n\r\n')
            classifier.train(input_fn=lambda: dataloader.train_input_fn(
                trainset, int_labels_train, batch_size, nr_epochs),
                             steps=train_steps)

            ### Test the model
            print('\n\r\n\rModel testing\n\n\n')
            resultfile.write('\n\r\n\rModel testing\n\r')

            eval_result = classifier.evaluate(
                input_fn=lambda: dataloader.eval_input_fn(
                    testset, int_labels_test, batch_size))
            print(
                '\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
            resultfile.write('\n\rK-fold:' + str(testindex + 1))
            resultfile.write('\n\rTest set accuracy: {accuracy:0.3f}\n'.format(
                **eval_result))
            resultfile.write('\n\rEval result:\n\r' + str(eval_result))
            testresults.append(eval_result['accuracy'])

        average = 0.0
        for value in testresults:
            average += value

        resultfile.write('\n\rAverage testresult:' +
                         str(average / len(testresults)))

    ### Evaluate the model
    print('\nModel evaluation\n\n\n')
    resultfile.write('\n\rModel evaluation\n\r\n')
    expected = list(
        int_labels_validate
    )  # The integer representation of the labels. Converts with: inverted_label_mapping() to label
    predictions = classifier.predict(input_fn=lambda: dataloader.eval_input_fn(
        validationset, labels=None, batch_size=batch_size))
    template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

    predictfile = open("Results/predictions" + file_suffix + ".txt", "w")

    number_of_matches = 0
    number_of_validations = 0
    y_true = []
    y_predicted = []
    y_probability = []

    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]
        resultfile.write('\n\r')
        resultfile.write(
            template.format(inverted_label_mapping[class_id],
                            100 * probability, inverted_label_mapping[expec]))
        number_of_validations += 1
        y_true.append(inverted_label_mapping[expec])
        y_predicted.append(inverted_label_mapping[class_id])
        y_probability.append(pred_dict['probabilities'][1])

        if str(inverted_label_mapping[class_id]) == str(
                inverted_label_mapping[expec]):
            predictfile.write('Percent: ' + str(100 * probability) + '  ' +
                              choosen_label + ': ' +
                              str(inverted_label_mapping[expec]) + '\n\r')
            number_of_matches += 1

    confusion_matrix_result = confusion_matrix(
        y_true, y_predicted,
        labels=list(label_mapping.keys()).sort())  # labels=[0,1]
    print(confusion_matrix_result)
    dataloader.print_cm(confusion_matrix_result, list(label_mapping.keys()),
                        file_suffix)
    dataloader.print_roc_curve(numpy.array(y_true), numpy.array(y_probability),
                               file_suffix)

    predictfile.write('\n\rNumber of matches in percent: ' +
                      str(100 * number_of_matches / number_of_validations))
    predictfile.write('\n\rTotal: ' + str(number_of_validations))
    predictfile.write('\n\rMatches: ' + str(number_of_matches))
    resultfile.write('\n\r******************************\n\r')
    resultfile.close()
    predictfile.close()
def train(config, load_resnet50=False, pre_trained=None, cur_epochs=0):

    batch_size = config['batch_size']
    lr = config['lr']
    epochs = config['epoch']

    train_dataloader = loadData('train', 16, 71, batch_size)
    val_loader = loadData('val', 16, 71, batch_size * 2, shuffle=False)
    model = PolygonModel(load_predtrained_resnet50=load_resnet50,
                         predict_delta=True).to(devices)
    # checkpoint
    if pre_trained is not None:
        # model.load_state_dict(torch.load(pre_trained))
        # 逐参数load
        dict = torch.load(pre_trained)
        pre_name = []
        for name in dict:
            pre_name.append(name)
        for name in model.state_dict():
            if name in pre_name:
                model.state_dict()[name].data.copy_(dict[name])
        print('loaded pretrained polygon net!')

    # Set to eval
    model.encoder.eval()
    for n, p in model.named_parameters():
        if 'encoder' in n and 'delta' not in n:
            print('Not train:', n)
            p.requires_grad = False
    no_wd = []
    wd = []
    for name, param in model.named_parameters():
        if not param.requires_grad:
            # No optimization for frozen params
            continue
        if 'bn' in name or 'convLSTM' in name or 'bias' in name:
            no_wd.append(param)
        else:
            wd.append(param)

    optimizer = optim.Adam([{
        'params': no_wd,
        'weight_decay': 0.0
    }, {
        'params': wd
    }],
                           lr=lr,
                           weight_decay=config['weight_decay'],
                           amsgrad=False)

    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=config['lr_decay'][0],
                                          gamma=config['lr_decay'][1])

    print('Total Epochs:', epochs)
    for it in range(cur_epochs, epochs):
        accum = defaultdict(float)
        for index, batch in enumerate(train_dataloader):
            img = torch.tensor(batch[0], dtype=torch.float).cuda()
            bs = img.shape[0]
            pre_v2 = torch.tensor(batch[2], dtype=torch.float).cuda()
            pre_v1 = torch.tensor(batch[3], dtype=torch.float).cuda()
            outdict = model(img, pre_v2, pre_v1,
                            mode='train_ce')  # (bs, seq_len, 28*28+1)s
            out = outdict['logits']
            out = out.contiguous().view(-1,
                                        28 * 28 + 1)  # (bs*seq_len, 28*28+1)
            target = batch[4]
            mask_delta = batch[7]
            mask_delta = torch.tensor(mask_delta).cuda().view(-1)  # (bs*70)
            # # smooth target
            target = dt_targets_from_class(np.array(target, dtype=np.int), 28,
                                           2)  # (bs, seq_len, 28*28+1)
            target = torch.from_numpy(target).cuda().contiguous().view(
                -1, 28 * 28 + 1)  # (bs, seq_len, 28*28+1)
            # Cross-Entropy Loss
            mask_final = batch[6]  # 结束符标志mask
            mask_final = torch.tensor(mask_final).cuda().view(-1)
            loss_lstm = torch.sum(-target *
                                  torch.nn.functional.log_softmax(out, dim=1),
                                  dim=1)  # (bs*seq_len)
            loss_lstm = loss_lstm * mask_final.type_as(
                loss_lstm)  # 从end point截断损失计算
            loss_lstm = loss_lstm.view(bs, -1)  # (bs, seq_len)
            loss_lstm = torch.sum(loss_lstm, dim=1)  # sum over seq_len  (bs,)
            real_pointnum = torch.sum(mask_final.contiguous().view(bs, -1),
                                      dim=1)
            loss_lstm = loss_lstm / real_pointnum  # mean over seq_len
            loss_lstm = torch.mean(loss_lstm)  # mean over batch

            # Delta prediction Cross-Entropy Loss
            delta_target = prepare_delta_target(outdict['pred_polys'],
                                                torch.tensor(batch[-2]).cuda())
            delta_target = dt_targets_from_class(
                np.array(delta_target.cpu().numpy(), dtype=np.int), 15,
                1)  # No smooth
            delta_target = torch.from_numpy(
                delta_target[:, :, :-1]).cuda().contiguous().view(-1, 15 * 15)
            delta_logits = outdict['delta_logits'][:, :-1, :]  # (bs, 70, 225)
            delta_logits = delta_logits.contiguous().view(-1, 15 *
                                                          15)  # (bs*70, 225)
            # TODO:get delta loss
            tmp = torch.sum(
                -delta_target *
                torch.nn.functional.log_softmax(delta_logits, dim=1),
                dim=1)
            tmp = tmp * mask_delta.type_as(tmp)
            tmp = tmp.view(bs, -1)
            # sum over len_s  (bs,)
            tmp = torch.sum(tmp, dim=1)
            real_pointnum2 = torch.sum(mask_delta.contiguous().view(bs, -1),
                                       dim=1)
            tmp = tmp / real_pointnum2
            loss_delta = torch.mean(tmp)

            loss = config['lambda'] * loss_delta + loss_lstm
            model.zero_grad()

            if 'grid_clip' in config:
                nn.utils.clip_grad_norm_(model.parameters(),
                                         config['grad_clip'])
            loss.backward()
            accum['loss_total'] += loss
            accum['loss_lstm'] += loss_lstm
            accum['loss_delta'] += loss_delta
            optimizer.step()

            # 打印损失
            if (index + 1) % 20 == 0:
                print(
                    'Epoch {} - Step {}, loss_total: {} [Loss lstm: {}, loss delta: {}]'
                    .format(it + 1, index + 1, accum['loss_total'] / 20,
                            accum['loss_lstm'] / 20, accum['loss_delta'] / 20))
                accum = defaultdict(float)

            # 每3000step一次
            if (index + 1) % config['val_every'] == 0:
                # validation
                model.delta_encoder.eval()
                model.delta_model.eval()
                model.decoder.eval()
                val_IoU = []
                less_than2 = 0
                with torch.no_grad():
                    for val_index, val_batch in enumerate(val_loader):
                        img = torch.tensor(val_batch[0],
                                           dtype=torch.float).cuda()
                        bs = img.shape[0]
                        WH = val_batch[-1]  # WH_dict
                        left_WH = WH['left_WH']
                        origion_WH = WH['origion_WH']
                        object_WH = WH['object_WH']
                        # target,在224*224中的坐标
                        val_target = val_batch[-2].numpy()  # (bs, 70, 2)
                        val_mask_final = val_batch[7]  # (bs, 70)
                        out_dict = model(
                            img, mode='test')  # (N, seq_len) # test_time
                        pred_x = out_dict['final_pred_x'].cpu().numpy()
                        pred_y = out_dict['final_pred_y'].cpu().numpy()
                        pred_len = out_dict['lengths']  # 预测的长度
                        # 求IoU
                        for ii in range(bs):
                            vertices1 = []
                            vertices2 = []
                            scaleW = 224.0 / object_WH[0][ii]
                            scaleH = 224.0 / object_WH[1][ii]
                            leftW = left_WH[0][ii]
                            leftH = left_WH[1][ii]

                            all_len = np.sum(val_mask_final[ii].numpy())
                            cnt_target = val_target[ii][:all_len]
                            for vert in cnt_target:
                                vertices2.append((vert[0] / scaleW + leftW,
                                                  vert[1] / scaleH + leftH))

                            # print('target:', cnt_target)

                            pred_len_b = pred_len[ii] - 1
                            if pred_len_b < 2:
                                val_IoU.append(0.)
                                less_than2 += 1
                                continue

                            for j in range(pred_len_b):
                                vertex = (pred_x[ii][j] / scaleW + leftW,
                                          pred_y[ii][j] / scaleH + leftH)
                                vertices1.append(vertex)

                            _, nu_cur, de_cur = iou(
                                vertices1, vertices2, origion_WH[1][ii],
                                origion_WH[0][ii])  # (H, W)
                            iou_cur = nu_cur * 1.0 / de_cur if de_cur != 0 else 0
                            val_IoU.append(iou_cur)

                val_iou_data = np.mean(np.array(val_IoU))
                print('Validation After Epoch {} - step {}'.format(
                    str(it + 1), str(index + 1)))
                print('           IoU      on validation set: ', val_iou_data)
                print('less than 2: ', less_than2)
                if it > 4:  # it = 6
                    print('Saving training parameters after this epoch:')
                    torch.save(
                        model.state_dict(),
                        '/data/duye/pretrained_models/ResNext_Plus_DeltaModel_Epoch{}-Step{}_ValIoU{}.pth'
                        .format(str(it + 1), str(index + 1),
                                str(val_iou_data)))
                # set to init
                model.delta_encoder.train()
                model.delta_model.train()  # important
                model.decoder.train()

        # 衰减
        scheduler.step()
        print()
        print('Epoch {} Completed!'.format(str(it + 1)))
        print()
Exemplo n.º 10
0
def main(argv):

    args = parser.parse_args(argv[1:])
    batch_size = args.batch_size
    train_steps = args.train_steps
    nr_epochs = args.nr_epochs  # None
    if nr_epochs == 0:
        nr_epochs = None

    choosen_label = args.choosen_label

    if args.fixed_selection.lower() == 'false':
        fixed_selection = False
    else:
        fixed_selection = True

    data_path = args.data_path

    file_suffix = '-' + choosen_label + str(
        args.train_steps) + '-' + args.suffix
    kfolds = 5  # Splitting training/test dataset into kfolds sets. Total number of steps becomes kfolds*steps

    resultfile = open("Results/model_results" + file_suffix + ".txt", "w")
    resultfile.write('\n\rModel training: ' +
                     datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") +
                     '\n\n\r')
    resultfile.write('Train steps: ' + str(train_steps) + '\n\r')
    resultfile.write('Number epochs: ' + str(nr_epochs) + '\n\r')
    resultfile.write('Batchsize: ' + str(batch_size) + '\n\r')
    resultfile.write('Choosen label: ' + choosen_label + '\n\r')
    resultfile.write('Fixed_selection: ' + str(fixed_selection) + '\n\r')
    resultfile.write('Data path: ' + str(data_path) + '\n\r')
    resultfile.write('Kfold: ' + str(kfolds) + '\n\r')

    # Label_mapping holds key value pairs where key is the label and value its integer representation.
    # Here unhealthy is 1 for both column and integer representation.
    label_mapping = {0: 0, 1: 1}
    resultfile.write('Label mapping: ' + str(label_mapping) + '\n\r')

    inverted_label_mapping = {}
    for key, value in label_mapping.items():
        inverted_label_mapping[value] = key

    resultfile.write('Inverted label mapping: ' + str(inverted_label_mapping) +
                     '\n\r')
    resultfile.flush()

    #Get three structured separate dataframes from data sources, first and last feature column names
    trainframe, testframe, validationframe, first_column, last_column = dataloader.loadData(
        data_path, fixed_selection, file_suffix)
    resultfile.flush()

    if kfolds <= 1:

        # Prints some dataframe statistics to the resultfile.
        frameinfo = dataloader.analyse_frame(trainframe, choosen_label)
        resultfile.write('\n\rTrainframe:\n\r')
        resultfile.write(frameinfo)
        frameinfo = dataloader.analyse_frame(testframe, choosen_label)
        resultfile.write('\n\r\n\rTestframe:\n\r')
        resultfile.write(frameinfo)
        frameinfo = dataloader.analyse_frame(validationframe, choosen_label)
        resultfile.write('\n\r\n\rValidationframe:\n\r')
        resultfile.write(frameinfo)

        # Train model data
        trainset, labels_training, label_mapping, int_labels_train = \
         dataloader.get_model_data(trainframe, label_mapping, choosen_label, first_column, last_column)

        # Test model data
        testset, labels_test, label_mapping, int_labels_test = \
         dataloader.get_model_data(testframe, label_mapping, choosen_label, first_column, last_column)

        # Validate model data
        validationset, labels_validate, label_mapping, int_labels_validate = \
         dataloader.get_model_data(validationframe, label_mapping, choosen_label, first_column, last_column)

        #Numpy representation, setting to double (default) may cause exceptions for some optimizers
        train_data = trainset.values.astype(numpy.float32)
        train_labels = int_labels_train.values

        test_data = testset.values.astype(numpy.float32)
        test_labels = int_labels_test.values

        validate_data = validationset.values.astype(numpy.float32)

        print(train_data.shape)
        print(test_data.shape)
        print(validate_data.shape)

        print(train_labels.shape)
        print(test_labels.shape)

        # Define the specific model functions for training, test and validation
        cnn_train_input_fn = tensorflow.estimator.inputs.numpy_input_fn(
            x={"x": train_data},
            y=train_labels,
            batch_size=batch_size,
            num_epochs=nr_epochs,
            shuffle=True)
        cnn_eval_input_fn = tensorflow.estimator.inputs.numpy_input_fn(
            x={"x": test_data}, y=test_labels, num_epochs=1, shuffle=False)
        cnn_validate_input_fn = tensorflow.estimator.inputs.numpy_input_fn(
            x={"x": validate_data}, y=None, num_epochs=1, shuffle=False)

        # Create the Estimator with a predefined model function
        classifier = tensorflow.estimator.Estimator(
            model_fn=cnn_config.cnn_model_dnn5CL3_fn,
            model_dir='/data/Tensorflow/' + file_suffix)

        ### Train the Model.
        print('\nModel training\n\r\n\r\n')

        classifier.train(input_fn=cnn_train_input_fn, steps=train_steps)

        ### Test the model
        print('\n\r\n\rModel testing\n\n\n')
        resultfile.write('\n\r\n\rModel testing\n\r')
        # Evaluate the model.

        eval_result = classifier.evaluate(input_fn=cnn_eval_input_fn)

        print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
        resultfile.write(
            '\n\rTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
        resultfile.write('\n\rEval result:\n\r' + str(eval_result))

    else:

        foldframe = trainframe.append(testframe)
        foldframe = foldframe.reindex(numpy.random.permutation(
            foldframe.index))  # shuffle around data
        foldtrainframe = pandas.DataFrame()
        foldtestframe = pandas.DataFrame()
        foldframe_list = []

        foldframe_list = dataloader.getFoldFrame(foldframe_list, kfolds,
                                                 foldframe)

        frameinfo = dataloader.analyse_frame(validationframe, choosen_label)
        resultfile.write('\n\r\n\rValidationframe:\n\r')
        resultfile.write(frameinfo)

        # Validate model data
        validationset, labels_validate, label_mapping, int_labels_validate = \
         dataloader.get_model_data(validationframe, label_mapping, choosen_label, first_column, last_column)

        testresults = []

        for testindex in range(kfolds):

            foldtrainframe, foldtestframe = dataloader.getFoldTrainFrames(
                foldframe_list, testindex)

            frameinfo = dataloader.analyse_frame(foldtrainframe, choosen_label)
            resultfile.write('\n\rTrainframe:\n\r')
            resultfile.write(frameinfo)
            frameinfo = dataloader.analyse_frame(foldtestframe, choosen_label)
            resultfile.write('\n\r\n\rTestframe:\n\r')
            resultfile.write(frameinfo)

            # Train model data
            trainset, labels_training, label_mapping, int_labels_train = \
             dataloader.get_model_data(foldtrainframe, label_mapping, choosen_label, first_column, last_column)

            # Test model data
            testset, labels_test, label_mapping, int_labels_test = \
             dataloader.get_model_data(foldtestframe, label_mapping, choosen_label, first_column, last_column)

            #Numpy representation, setting to double (default) may cause exceptions for some optimizers
            train_data = trainset.values.astype(numpy.float32)
            train_labels = int_labels_train.values

            test_data = testset.values.astype(numpy.float32)
            test_labels = int_labels_test.values

            validate_data = validationset.values.astype(numpy.float32)

            print(train_data.shape)
            print(test_data.shape)
            print(validate_data.shape)

            print(train_labels.shape)
            print(test_labels.shape)

            # Define the specific model functions for training, test and validation
            cnn_train_input_fn = tensorflow.estimator.inputs.numpy_input_fn(
                x={"x": train_data},
                y=train_labels,
                batch_size=batch_size,
                num_epochs=nr_epochs,
                shuffle=True)
            cnn_eval_input_fn = tensorflow.estimator.inputs.numpy_input_fn(
                x={"x": test_data}, y=test_labels, num_epochs=1, shuffle=False)
            cnn_validate_input_fn = tensorflow.estimator.inputs.numpy_input_fn(
                x={"x": validate_data}, y=None, num_epochs=1, shuffle=False)

            # Create the Estimator with a predefined model function (predefined CNN network)
            classifier = tensorflow.estimator.Estimator(
                model_fn=cnn_config.cnn_model_dnn5CL3_fn,
                model_dir='/data/Tensorflow/' + file_suffix)

            ### Train the Model.
            print('\nModel training\n\r\n\r\n')

            classifier.train(input_fn=cnn_train_input_fn, steps=train_steps)

            ### Test the model
            print('\n\r\n\rModel testing\n\n\n')
            resultfile.write('\n\r\n\rModel testing\n\r')
            # Evaluate the model.

            eval_result = classifier.evaluate(input_fn=cnn_eval_input_fn)

            print(
                '\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
            resultfile.write('\n\rK-fold:' + str(testindex + 1))
            resultfile.write('\n\rTest set accuracy: {accuracy:0.3f}\n'.format(
                **eval_result))
            resultfile.write('\n\rEval result:\n\r' + str(eval_result))
            testresults.append(eval_result['accuracy'])

        average = 0.0
        for value in testresults:
            average += value

        resultfile.write('\n\rAverage testresult:' +
                         str(average / len(testresults)))

    ### Evaluate the model
    print('\nModel evaluation\n\n\n')
    resultfile.write('\n\rModel evaluation\n\r\n')
    expected = list(
        int_labels_validate
    )  # The integer representation of the labels. Converts with: inverted_label_mapping() to label
    # Get the predictionsresults from trained model
    predictions = classifier.predict(input_fn=cnn_validate_input_fn)

    template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
    predictfile = open("Results/predictions" + file_suffix + ".txt", "w")

    number_of_matches = 0
    number_of_validations = 0
    y_true = []
    y_predicted = []
    y_probability = []
    total_probability = 0
    y_predicted_new = []
    limit = 0.96  # Unhealthy trucks with probability value under this limit will be reclassified to healthy
    unhealthy_probabilities = pandas.Series()

    # Calculation of and printing of prediction results.
    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids']
        probability = pred_dict['probabilities'][class_id]
        resultfile.write('\n\r')
        resultfile.write(
            template.format(inverted_label_mapping[class_id],
                            100 * probability, inverted_label_mapping[expec]))
        number_of_validations += 1
        y_true.append(inverted_label_mapping[expec])
        y_predicted.append(inverted_label_mapping[class_id])
        y_probability.append(
            pred_dict['probabilities']
            [1])  # For positive label in ROC-curve (unhealthy)

        # Collect probability values for unhealthy trucks for plotting
        if inverted_label_mapping[class_id] == 1:
            unhealthy_probabilities = unhealthy_probabilities.append(
                pandas.Series([probability]))

        # Reclassify unhealthy truks whos probability is under choosen limit
        if inverted_label_mapping[class_id] == 1 and probability < limit:
            y_predicted_new.append(0)
        else:
            y_predicted_new.append(class_id)

        # Calculate the number of correct predicted classes and print to file
        if str(inverted_label_mapping[class_id]) == str(
                inverted_label_mapping[expec]):
            predictfile.write('Percent: ' + str(100 * probability) + '  ' +
                              choosen_label + ': ' +
                              str(inverted_label_mapping[expec]) + '\n\r')
            number_of_matches += 1
            total_probability += 100 * probability

    confusion_matrix_result = confusion_matrix(
        y_true, y_predicted,
        labels=list(label_mapping.keys()).sort())  # labels=[0,1]
    print(confusion_matrix_result)
    # CM regarding with potentially reclassified samples
    confusion_matrix_new = confusion_matrix(
        y_true, y_predicted_new,
        labels=list(label_mapping.keys()).sort())  # labels=[0,1]
    print(confusion_matrix_new)

    dataloader.print_cm(confusion_matrix_result, list(label_mapping.keys()),
                        file_suffix)
    dataloader.print_cm(confusion_matrix_new, list(label_mapping.keys()),
                        file_suffix + 'New')
    dataloader.print_roc_curve(numpy.array(y_true), numpy.array(y_probability),
                               file_suffix)
    dataloader.print_probabilities(unhealthy_probabilities, file_suffix)

    predictfile.write('\n\rNumber of matches in percent: ' +
                      str(100 * number_of_matches / number_of_validations))
    predictfile.write('\n\rTotal: ' + str(number_of_validations))
    predictfile.write('\n\rMatches: ' + str(number_of_matches))
    predictfile.write('\n\rAverage matches probability: ' +
                      str(total_probability / number_of_matches))
    resultfile.write('\n\r******************************\n\r')
    resultfile.close()
    predictfile.close()
Exemplo n.º 11
0
def train(num_epochs, model, device, train_loader, val_loader, images, texts, lengths, converter, optimizer, lr_scheduler, prediction_dir, print_iter) :
    criterion = CTCLoss()
    criterion.to(device)
    images = images.to(device)
    model.to(device)
    for epoch in range(num_epochs) :
        print(epoch)
        count = 0
        model.train()
        for i, datas in enumerate(train_loader) :
            datas, targets = datas
            batch_size = datas.size(0)
            count += batch_size
            dataloader.loadData(images, datas)
            t, l = converter.encode(targets)
            dataloader.loadData(texts, t)
            dataloader.loadData(lengths, l)
            preds = model(images)
            print('preds size : ', preds.size())
            preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
            print({'preds_size size : ', preds_size.size())
            cost = criterion(preds, texts, preds_size, lengths) / batch_size
            model.zero_grad()
            cost.backward()
            optimizer.step()
            if count % print_iter < train_loader.batch_size :
                print('epoch {} [{}/{}]loss : {}'.format(epoch, count, len(train_loader.dataset), cost))

        validation(model, device, val_loader, images, texts, lengths, converter, prediction_dir)
        
        save_model('{}'.format(epoch), model, optimizer, lr_scheduler)
        
        lr_scheduler.step()

def test(model, device, test_loader, images, texts, lengths, converter, prediction_dir) :
    model.to(device)
    images = images.to(device)
    model.eval()
    pred_json = {}
    pred_list = []
    make_folder(prediction_dir)
    for i, datas in enumerate(test_loader) :
        datas, targets = datas
        batch_size = datas.size(0)
        dataloader.loadData(images, datas)
        t, l = converter.encode(targets)
        dataloader.loadData(texts, t)
        dataloader.loadData(lengths, l)

        preds = model(images)
        
        preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
    
        _, preds = preds.max(2)
        preds = preds.transpose(1, 0).contiguous().view(-1)
        pred_string = converter.decode(preds.data, preds_size.data, raw=False)
        print('per batch pred_string : ',pred_string)


​        
        pred_dict = {'image_path' : test_loader.dataset.get_img_path(i), 'prediction' : pred_string}
        pred_list.append(pred_dict)
    
    pred_json = {'predict' : pred_list}
    with open(os.path.join(prediction_dir, 'predict.json'), 'w') as save_json :
        json.dump(pred_json, save_json, indent=2, ensure_ascii=False)


def validation(model, device, val_loader, images, texts, lengths, converter, prediction_dir) :
    test(model, device, val_loader, images, texts, lengths, converter, prediction_dir)
    print('validation test finish')
    DR_PATH = os.path.join(prediction_dir, 'predict.json')
    GT_PATH = glob.glob(val_loader.dataset.get_root() + '/*.json')[0]
    res = evaluation.evaluation_metrics(DR_PATH, GT_PATH)
    print('validation : ', res)

try:
    from nipa import nipa_data
    DATASET_PATH = nipa_data.get_data_root('deepfake')
except:
    DATASET_PATH = os.path.join('../data')

def main() :

    # mode argument
    args = argparse.ArgumentParser()
    args.add_argument("--letter", type=str, default=" ,.()\'\"?!01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ가각간갇갈갉갊감갑값갓갔강갖갗같갚갛개객갠갤갬갭갯갰갱갸갹갼걀걋걍걔걘걜거걱건걷걸걺검겁것겄겅겆겉겊겋게겐겔겜겝겟겠겡겨격겪견겯결겸겹겻겼경곁계곈곌곕곗고곡곤곧골곪곬곯곰곱곳공곶과곽관괄괆괌괍괏광괘괜괠괩괬괭괴괵괸괼굄굅굇굉교굔굘굡굣구국군굳굴굵굶굻굼굽굿궁궂궈궉권궐궜궝궤궷귀귁귄귈귐귑귓규균귤그극근귿글긁금급긋긍긔기긱긴긷길긺김깁깃깅깆깊까깍깎깐깔깖깜깝깟깠깡깥깨깩깬깰깸깹깻깼깽꺄꺅꺌꺼꺽꺾껀껄껌껍껏껐껑께껙껜껨껫껭껴껸껼꼇꼈꼍꼐꼬꼭꼰꼲꼴꼼꼽꼿꽁꽂꽃꽈꽉꽐꽜꽝꽤꽥꽹꾀꾄꾈꾐꾑꾕꾜꾸꾹꾼꿀꿇꿈꿉꿋꿍꿎꿔꿜꿨꿩꿰꿱꿴꿸뀀뀁뀄뀌뀐뀔뀜뀝뀨끄끅끈끊끌끎끓끔끕끗끙끝끼끽낀낄낌낍낏낑나낙낚난낟날낡낢남납낫났낭낮낯낱낳내낵낸낼냄냅냇냈냉냐냑냔냘냠냥너넉넋넌널넒넓넘넙넛넜넝넣네넥넨넬넴넵넷넸넹녀녁년녈념녑녔녕녘녜녠노녹논놀놂놈놉놋농높놓놔놘놜놨뇌뇐뇔뇜뇝뇟뇨뇩뇬뇰뇹뇻뇽누눅눈눋눌눔눕눗눙눠눴눼뉘뉜뉠뉨뉩뉴뉵뉼늄늅늉느늑는늘늙늚늠늡늣능늦늪늬늰늴니닉닌닐닒님닙닛닝닢다닥닦단닫달닭닮닯닳담답닷닸당닺닻닿대댁댄댈댐댑댓댔댕댜더덕덖던덛덜덞덟덤덥덧덩덫덮데덱덴델뎀뎁뎃뎄뎅뎌뎐뎔뎠뎡뎨뎬도독돈돋돌돎돐돔돕돗동돛돝돠돤돨돼됐되된될됨됩됫됴두둑둔둘둠둡둣둥둬뒀뒈뒝뒤뒨뒬뒵뒷뒹듀듄듈듐듕드득든듣들듦듬듭듯등듸디딕딘딛딜딤딥딧딨딩딪따딱딴딸땀땁땃땄땅땋때땍땐땔땜땝땟땠땡떠떡떤떨떪떫떰떱떳떴떵떻떼떽뗀뗄뗌뗍뗏뗐뗑뗘뗬또똑똔똘똥똬똴뙈뙤뙨뚜뚝뚠뚤뚫뚬뚱뛔뛰뛴뛸뜀뜁뜅뜨뜩뜬뜯뜰뜸뜹뜻띄띈띌띔띕띠띤띨띰띱띳띵라락란랄람랍랏랐랑랒랖랗래랙랜랠램랩랫랬랭랴략랸럇량러럭런럴럼럽럿렀렁렇레렉렌렐렘렙렛렝려력련렬렴렵렷렸령례롄롑롓로록론롤롬롭롯롱롸롼뢍뢨뢰뢴뢸룀룁룃룅료룐룔룝룟룡루룩룬룰룸룹룻룽뤄뤘뤠뤼뤽륀륄륌륏륑류륙륜률륨륩륫륭르륵른를름릅릇릉릊릍릎리릭린릴림립릿링마막만많맏말맑맒맘맙맛망맞맡맣매맥맨맬맴맵맷맸맹맺먀먁먈먕머먹먼멀멂멈멉멋멍멎멓메멕멘멜멤멥멧멨멩며멱면멸몃몄명몇몌모목몫몬몰몲몸몹못몽뫄뫈뫘뫙뫼묀묄묍묏묑묘묜묠묩묫무묵묶문묻물묽묾뭄뭅뭇뭉뭍뭏뭐뭔뭘뭡뭣뭬뮈뮌뮐뮤뮨뮬뮴뮷므믄믈믐믓미믹민믿밀밂밈밉밋밌밍및밑바박밖밗반받발밝밞밟밤밥밧방밭배백밴밸뱀뱁뱃뱄뱅뱉뱌뱍뱐뱝버벅번벋벌벎범법벗벙벚베벡벤벧벨벰벱벳벴벵벼벽변별볍볏볐병볕볘볜보복볶본볼봄봅봇봉봐봔봤봬뵀뵈뵉뵌뵐뵘뵙뵤뵨부북분붇불붉붊붐붑붓붕붙붚붜붤붰붸뷔뷕뷘뷜뷩뷰뷴뷸븀븃븅브븍븐블븜븝븟비빅빈빌빎빔빕빗빙빚빛빠빡빤빨빪빰빱빳빴빵빻빼빽뺀뺄뺌뺍뺏뺐뺑뺘뺙뺨뻐뻑뻔뻗뻘뻠뻣뻤뻥뻬뼁뼈뼉뼘뼙뼛뼜뼝뽀뽁뽄뽈뽐뽑뽕뾔뾰뿅뿌뿍뿐뿔뿜뿟뿡쀼쁑쁘쁜쁠쁨쁩삐삑삔삘삠삡삣삥사삭삯산삳살삵삶삼삽삿샀상샅새색샌샐샘샙샛샜생샤샥샨샬샴샵샷샹섀섄섈섐섕서석섞섟선섣설섦섧섬섭섯섰성섶세섹센셀셈셉셋셌셍셔셕션셜셤셥셧셨셩셰셴셸솅소속솎손솔솖솜솝솟송솥솨솩솬솰솽쇄쇈쇌쇔쇗쇘쇠쇤쇨쇰쇱쇳쇼쇽숀숄숌숍숏숑수숙순숟술숨숩숫숭숯숱숲숴쉈쉐쉑쉔쉘쉠쉥쉬쉭쉰쉴쉼쉽쉿슁슈슉슐슘슛슝스슥슨슬슭슴습슷승시식신싣실싫심십싯싱싶싸싹싻싼쌀쌈쌉쌌쌍쌓쌔쌕쌘쌜쌤쌥쌨쌩썅써썩썬썰썲썸썹썼썽쎄쎈쎌쏀쏘쏙쏜쏟쏠쏢쏨쏩쏭쏴쏵쏸쐈쐐쐤쐬쐰쐴쐼쐽쑈쑤쑥쑨쑬쑴쑵쑹쒀쒔쒜쒸쒼쓩쓰쓱쓴쓸쓺쓿씀씁씌씐씔씜씨씩씬씰씸씹씻씽아악안앉않알앍앎앓암압앗았앙앝앞애액앤앨앰앱앳앴앵야약얀얄얇얌얍얏양얕얗얘얜얠얩어억언얹얻얼얽얾엄업없엇었엉엊엌엎에엑엔엘엠엡엣엥여역엮연열엶엷염엽엾엿였영옅옆옇예옌옐옘옙옛옜오옥온올옭옮옰옳옴옵옷옹옻와왁완왈왐왑왓왔왕왜왝왠왬왯왱외왹왼욀욈욉욋욍요욕욘욜욤욥욧용우욱운울욹욺움웁웃웅워웍원월웜웝웠웡웨웩웬웰웸웹웽위윅윈윌윔윕윗윙유육윤율윰윱윳융윷으윽은을읊음읍읏응읒읓읔읕읖읗의읩읜읠읨읫이익인일읽읾잃임입잇있잉잊잎자작잔잖잗잘잚잠잡잣잤장잦재잭잰잴잼잽잿쟀쟁쟈쟉쟌쟎쟐쟘쟝쟤쟨쟬저적전절젊점접젓정젖제젝젠젤젬젭젯젱져젼졀졈졉졌졍졔조족존졸졺좀좁좃종좆좇좋좌좍좔좝좟좡좨좼좽죄죈죌죔죕죗죙죠죡죤죵주죽준줄줅줆줌줍줏중줘줬줴쥐쥑쥔쥘쥠쥡쥣쥬쥰쥴쥼즈즉즌즐즘즙즛증지직진짇질짊짐집짓징짖짙짚짜짝짠짢짤짧짬짭짯짰짱째짹짼쨀쨈쨉쨋쨌쨍쨔쨘쨩쩌쩍쩐쩔쩜쩝쩟쩠쩡쩨쩽쪄쪘쪼쪽쫀쫄쫌쫍쫏쫑쫓쫘쫙쫠쫬쫴쬈쬐쬔쬘쬠쬡쭁쭈쭉쭌쭐쭘쭙쭝쭤쭸쭹쮜쮸쯔쯤쯧쯩찌찍찐찔찜찝찡찢찧차착찬찮찰참찹찻찼창찾채책챈챌챔챕챗챘챙챠챤챦챨챰챵처척천철첨첩첫첬청체첵첸첼쳄쳅쳇쳉쳐쳔쳤쳬쳰촁초촉촌촐촘촙촛총촤촨촬촹최쵠쵤쵬쵭쵯쵱쵸춈추축춘출춤춥춧충춰췄췌췐취췬췰췸췹췻췽츄츈츌츔츙츠측츤츨츰츱츳층치칙친칟칠칡침칩칫칭카칵칸칼캄캅캇캉캐캑캔캘캠캡캣캤캥캬캭컁커컥컨컫컬컴컵컷컸컹케켁켄켈켐켑켓켕켜켠켤켬켭켯켰켱켸코콕콘콜콤콥콧콩콰콱콴콸쾀쾅쾌쾡쾨쾰쿄쿠쿡쿤쿨쿰쿱쿳쿵쿼퀀퀄퀑퀘퀭퀴퀵퀸퀼큄큅큇큉큐큔큘큠크큭큰클큼큽킁키킥킨킬킴킵킷킹타탁탄탈탉탐탑탓탔탕태택탠탤탬탭탯탰탱탸턍터턱턴털턺텀텁텃텄텅테텍텐텔템텝텟텡텨텬텼톄톈토톡톤톨톰톱톳통톺톼퇀퇘퇴퇸툇툉툐투툭툰툴툼툽툿퉁퉈퉜퉤튀튁튄튈튐튑튕튜튠튤튬튱트특튼튿틀틂틈틉틋틔틘틜틤틥티틱틴틸팀팁팃팅파팍팎판팔팖팜팝팟팠팡팥패팩팬팰팸팹팻팼팽퍄퍅퍼퍽펀펄펌펍펏펐펑페펙펜펠펨펩펫펭펴편펼폄폅폈평폐폘폡폣포폭폰폴폼폽폿퐁퐈퐝푀푄표푠푤푭푯푸푹푼푿풀풂품풉풋풍풔풩퓌퓐퓔퓜퓟퓨퓬퓰퓸퓻퓽프픈플픔픕픗피픽핀필핌핍핏핑하학한할핥함합핫항해핵핸핼햄햅햇했행햐향허헉헌헐헒험헙헛헝헤헥헨헬헴헵헷헹혀혁현혈혐협혓혔형혜혠혤혭호혹혼홀홅홈홉홋홍홑화확환활홧황홰홱홴횃횅회획횐횔횝횟횡효횬횰횹횻후훅훈훌훑훔훗훙훠훤훨훰훵훼훽휀휄휑휘휙휜휠휨휩휫휭휴휵휸휼흄흇흉흐흑흔흖흗흘흙흠흡흣흥흩희흰흴흼흽힁히힉힌힐힘힙힛힝")
    args.add_argument("--lr", type=float, default=0.0001)
    args.add_argument("--cuda", type=bool, default=True)
    args.add_argument("--num_epochs", type=int, default=50000)
    args.add_argument("--model_name", type=str, default="")
    args.add_argument("--batch", type=int, default=1)
    args.add_argument("--mode", type=str, default="train")
    args.add_argument("--prediction_dir", type=str, default="prediction")
    args.add_argument("--print_iter", type=int, default=10)
    
    config = args.parse_args()
    
    letter = config.letter
    lr = config.lr
    cuda = config.cuda
    num_epochs = config.num_epochs
    model_name = config.model_name
    batch = config.batch
    mode = config.mode
    prediction_dir = config.prediction_dir
    print_iter = config.print_iter
    imgH = 32
    imgW = 200
    nclass = len(letter) + 1
    nc = 1
    
    new_model = model.CRNN(imgH, nc, nclass, 256)
    new_model.apply(model.weights_init)
    device = torch.device('cuda') if cuda else torch.device('cpu')
    
    converter = dataloader.strLabelConverter(letter)
    
    images = torch.FloatTensor(batch, 1, imgH, imgW)
    texts = torch.IntTensor(batch * 1000)
    lengths = torch.IntTensor(batch)
    
    images = Variable(images)
    texts = Variable(texts)
    lengths = Variable(lengths)
    
    #check parameter of model
    print("------------------------------------------------------------")
    total_params = sum(p.numel() for p in new_model.parameters())
    print("num of parameter : ",total_params)
    trainable_params = sum(p.numel() for p in new_model.parameters() if p.requires_grad)
    print("num of trainable_ parameter :",trainable_params)
    print("------------------------------------------------------------")


    if mode == 'train' :
        print('trian start')
        train_loader = data_loader(
            DATASET_PATH, batch, imgH, imgW, phase='train')
        val_loader = data_loader(
            DATASET_PATH, batch, imgH, imgW, phase='val')
        params = [p for p in new_model.parameters() if p.requires_grad]
        optimizer = optim.Adam(params, lr=lr, betas=(0.5, 0.999))
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=40,gamma=0.1)
        train(num_epochs, new_model, device, train_loader, val_loader, images, texts, lengths, converter, optimizer, lr_scheduler, prediction_dir, print_iter)
        
    elif mode == 'test' :
        print('test start')
        test_loader = data_loader(
            DATASET_PATH, 1, imgH, imgW, phase='test')
        load_model(model_name, new_model)
        test(new_model, device, test_loader, images, texts, lengths, converter, prediction_dir)


​    
if __name__ == '__main__' :
​    main()
Exemplo n.º 12
0
                    default=50,
                    help="Maximum number of iterations in a round")

if __name__ == "__main__":
    args = parser.parse_args()
    #Settings
    min_max_iter = (args.min_iter, args.max_iter)

    #Load data
    dataname = args.data
    print(f"Truth inference: {args.truth_inference}")
    if args.crowdsourcing:
        print(f"Task assignment: {args.task_assignment}")

    print(f"Data: {dataname}")
    claims, claimsByObj, names, ground_truths, srcnames, h = loadData(dataname)

    #Run algorithm
    if args.truth_inference == "TDH":
        ti_model = TDH(claims, deepCopyClaimsByObj(claimsByObj), names, h,
                       min_max_iter)
    elif args.truth_inference == "Vote":
        ti_model = Vote(claims, deepCopyClaimsByObj(claimsByObj), names, h,
                        min_max_iter)
    ti_model.run(args.verbose)

    #evaluation
    evaluation = EvaluationModule(ground_truths, names, claimsByObj, h)
    accuracy, accuracy_gen, avg_distance = evaluation.eval(
        ti_model.getTruths())
def train(config, load_resnet50=False, pre_trained=None, cur_epochs=0):

    batch_size = config['batch_size']
    lr = config['lr']
    epochs = config['epoch']

    train_dataloader = loadData('train', 16, 71, batch_size)
    val_loader = loadData('val', 16, 71, batch_size, shuffle=False)
    model = PolygonModel(load_predtrained_resnet50=load_resnet50,
                         predict_delta=False).cuda()
    # checkpoint
    if pre_trained is not None:
        model.load_state_dict(torch.load(pre_trained))
        print('loaded pretrained polygon net!')

    # Regulation,原paper没有+regulation
    no_wd = []
    wd = []
    for name, param in model.named_parameters():
        if not param.requires_grad:
            # No optimization for frozen params
            continue
        if 'bn' in name or 'convLSTM' in name or 'bias' in name:
            no_wd.append(param)
        else:
            wd.append(param)

    optimizer = optim.Adam([{
        'params': no_wd,
        'weight_decay': 0.0
    }, {
        'params': wd
    }],
                           lr=lr,
                           weight_decay=config['weight_decay'],
                           amsgrad=False)

    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=config['lr_decay'][0],
                                          gamma=config['lr_decay'][1])

    print('Total Epochs:', epochs)
    for it in range(cur_epochs, epochs):
        accum = defaultdict(float)
        # accum['loss_total'] = 0.
        # accum['loss_lstm'] = 0.
        # accum['loss_delta'] = 0.
        for index, batch in enumerate(train_dataloader):
            img = torch.tensor(batch[0], dtype=torch.float).cuda()
            bs = img.shape[0]
            pre_v2 = torch.tensor(batch[2], dtype=torch.float).cuda()
            pre_v1 = torch.tensor(batch[3], dtype=torch.float).cuda()
            outdict = model(img, pre_v2, pre_v1,
                            mode='train_ce')  # (bs, seq_len, 28*28+1)s

            out = outdict['logits']
            # 之前训练不小心加了下面这句
            # out = torch.nn.functional.log_softmax(out, dim=-1)  # logits->log_probs
            out = out.contiguous().view(-1,
                                        28 * 28 + 1)  # (bs*seq_len, 28*28+1)
            target = batch[4]

            # smooth target
            target = dt_targets_from_class(np.array(target, dtype=np.int), 28,
                                           2)  # (bs, seq_len, 28*28+1)
            target = torch.from_numpy(target).cuda().contiguous().view(
                -1, 28 * 28 + 1)  # (bs, seq_len, 28*28+1)
            # 交叉熵损失计算
            mask_final = batch[6]  # 结束符标志mask  (bs, seq_len(70)从第一个点开始)
            mask_final = torch.tensor(mask_final).cuda().view(-1)
            mask_delta = batch[7]
            mask_delta = torch.tensor(mask_delta).cuda().view(-1)  # (bs*70)
            loss_lstm = torch.sum(-target *
                                  torch.nn.functional.log_softmax(out, dim=1),
                                  dim=1)  # (bs*seq_len)
            loss_lstm = loss_lstm * mask_final.type_as(
                loss_lstm)  # 从end point截断损失计算
            loss_lstm = loss_lstm.view(bs, -1)  # (bs, seq_len)
            loss_lstm = torch.sum(loss_lstm, dim=1)  # sum over seq_len  (bs,)
            real_pointnum = torch.sum(mask_final.contiguous().view(bs, -1),
                                      dim=1)
            loss_lstm = loss_lstm / real_pointnum  # mean over seq_len
            loss_lstm = torch.mean(loss_lstm)  # mean over batch

            # loss = loss_lstm + loss_delta
            loss = loss_lstm
            #TODO: 这里train_ce可以用这个loss, 但train_rl可以根据条件概率重写损失函数
            model.zero_grad()

            if 'grid_clip' in config:
                nn.utils.clip_grad_norm_(model.parameters(),
                                         config['grad_clip'])

            loss.backward()

            accum['loss_total'] += loss
            optimizer.step()

            # 打印损失
            if (index + 1) % 20 == 0:
                print('Epoch {} - Step {}, loss_total {}'.format(
                    it + 1, index, accum['loss_total'] / 20))
                accum = defaultdict(float)
            # 每3000step一次
            if (index + 1) % config['val_every'] == 0:
                # validation
                model.eval()  # 原作者只eval了这个
                val_IoU = []
                less_than2 = 0
                with torch.no_grad():
                    for val_index, val_batch in enumerate(val_loader):
                        img = torch.tensor(val_batch[0],
                                           dtype=torch.float).cuda()
                        bs = img.shape[0]

                        WH = val_batch[-1]  # WH_dict
                        left_WH = WH['left_WH']
                        origion_WH = WH['origion_WH']
                        object_WH = WH['object_WH']

                        val_mask_final = val_batch[6]
                        val_mask_final = torch.tensor(
                            val_mask_final).cuda().contiguous().view(-1)
                        out_dict = model(
                            img, mode='test')  # (N, seq_len) # test_time
                        pred_polys = out_dict['pred_polys']  # (bs, seq_len)
                        tmp = pred_polys
                        pred_polys = pred_polys.contiguous().view(
                            -1)  # (bs*seq_len)
                        val_target = val_batch[4]  # (bs, seq_len)
                        # 求accuracy
                        val_target = torch.tensor(
                            val_target,
                            dtype=torch.long).cuda().contiguous().view(
                                -1)  # (bs*seq_len)
                        val_acc1 = torch.tensor(pred_polys == val_target,
                                                dtype=torch.float).cuda()
                        val_acc1 = (val_acc1 * val_mask_final).sum().item()
                        val_acc1 = val_acc1 * 1.0 / val_mask_final.sum().item()
                        # 用作计算IoU
                        val_result_index = tmp.cpu().numpy()  # (bs, seq_len)
                        val_target = val_batch[4].numpy()  # (bs, seq_len)

                        # 求IoU
                        for ii in range(bs):
                            vertices1 = []
                            vertices2 = []
                            scaleW = 224.0 / object_WH[0][ii]
                            scaleH = 224.0 / object_WH[1][ii]
                            leftW = left_WH[0][ii]
                            leftH = left_WH[1][ii]
                            for label in val_result_index[ii]:
                                if label == 28 * 28:
                                    break
                                vertex = (
                                    ((label % 28) * 8.0 + 4) / scaleW + leftW,
                                    ((int(label / 28)) * 8.0 + 4) / scaleH +
                                    leftH)
                                vertices1.append(vertex)
                            for label in val_target[ii]:
                                if label == 28 * 28:
                                    break
                                vertex = (
                                    ((label % 28) * 8.0 + 4) / scaleW + leftW,
                                    ((int(label / 28)) * 8.0 + 4) / scaleH +
                                    leftH)
                                vertices2.append(vertex)
                            if len(vertices1) < 2:
                                less_than2 += 1
                                # IoU=0.
                                val_IoU.append(0.)
                                continue
                            _, nu_cur, de_cur = iou(
                                vertices1, vertices2, origion_WH[1][ii],
                                origion_WH[0][ii])  # (H, W)
                            iou_cur = nu_cur * 1.0 / de_cur if de_cur != 0 else 0
                            val_IoU.append(iou_cur)

                val_iou_data = np.mean(np.array(val_IoU))
                print('Validation After Epoch {} - step {}'.format(
                    str(it + 1), str(index + 1)))
                print('           IoU      on validation set: ', val_iou_data)
                print('less than 2: ', less_than2)
                if it > 4:  # it = 5
                    print('Saving training parameters after this epoch:')
                    torch.save(
                        model.state_dict(),
                        '/data/duye/pretrained_models/ResNext50_FPN_LSTM_Epoch{}-Step{}_ValIoU{}.pth'
                        .format(str(it + 1), str(index + 1),
                                str(val_iou_data)))
                # set to init
                model.train()  # important

        # 衰减
        scheduler.step()
        # 打印当前lr
        print()
        print('Epoch {} Completed!'.format(str(it + 1)))
        print()