Пример #1
0
    def __init__(self, options, mode):

        self.usecudnn = options["general"]["usecudnn"]

        self.batchsize = options["input"]["batchsize"]

        self.validationdataset_ori = LipreadingDataset(
            options[mode]["data_root"], options[mode]["index_root"],
            options[mode]["padding"], False)
        self.validationdataset = LipreadingDataset_val(
            options[mode]["data_root"], options[mode]["index_root"],
            options[mode]["padding"], False)

        self.tot_data = len(self.validationdataset)
        self.validationdataloader = DataLoader(
            self.validationdataset,
            batch_size=options["input"]["batchsize"],
            shuffle=False,
            num_workers=options["input"]["numworkers"],
            drop_last=False)
        self.mode = mode

        self.video_dist_label = convert_task3_label_to_video_label()

        self.average_IOU = 0

        self.video_id_cnt = 0

        self.time0 = time.time()
Пример #2
0
 def __init__(self, args):
     augment = False
     shuffle = False
     self.channel = args.channel
     self.batchsize = args.batch_size
     self.validationdataset = LipreadingDataset(args.dataset,
                                                "val",
                                                augment=augment,
                                                channel=self.channel)
     self.validationdataloader = DataLoader(self.validationdataset,
                                            batch_size=self.batchsize,
                                            shuffle=shuffle,
                                            num_workers=args.workers,
                                            drop_last=True)
     # self.usecudnn = options["general"]["usecudnn"]
     self.statsfrequency = args.statsfrequency
     # self.gpuid = options["general"]["gpuid"]
     self.log_file = args.logfile
     self.savedir = args.save_dir
     self.num_batches = int(len(self.validationdataset) / self.batchsize)
     self.num_samples = int(len(self.validationdataset))
     self.num_frames = args.num_frames
     self.modelname = args.modelname
     print_log('loaded validation dataset with %d data' %
               len(self.validationdataset),
               log=self.log_file)
Пример #3
0
    def __init__(self, options):

        self.usecudnn = options["general"]["usecudnn"]

        self.batchsize = options["input"]["batchsize"]

        self.statsfrequency = options["training"]["statsfrequency"]

        self.learningrate = options["training"]["learningrate"]

        self.modelType = options["training"]["learningrate"]

        self.weightdecay = options["training"]["weightdecay"]
        self.momentum = options["training"]["momentum"]

        self.save_prefix = options["training"]["save_prefix"]

        self.trainingdataset = LipreadingDataset(
            options["training"]["data_root"],
            options["training"]["index_root"], options["training"]["padding"],
            True)

        self.trainingdataloader = DataLoader(
            self.trainingdataset,
            batch_size=options["input"]["batchsize"],
            shuffle=options["input"]["shuffle"],
            num_workers=options["input"]["numworkers"],
            drop_last=True)
Пример #4
0
 def __init__(self, args):
     augment = True
     shuffle = True
     self.channel = args.channel
     self.batchsize = args.batch_size
     self.trainingdataset = LipreadingDataset(args.dataset,
                                              "train",
                                              augment=augment,
                                              channel=self.channel)
     self.trainingdataloader = DataLoader(self.trainingdataset,
                                          batch_size=self.batchsize,
                                          shuffle=shuffle,
                                          num_workers=args.workers,
                                          drop_last=True)
     # self.usecudnn = options["general"]["usecudnn"]
     self.statsfrequency = args.statsfrequency
     # self.gpuid = options["general"]["gpuid"]
     self.learningrate = args.lr
     self.weightdecay = args.weight_decay
     self.momentum = args.momentum
     self.log_file = args.logfile
     self.modelsavedir = args.save_dir
     # _, self.savename, _ = fileparts(self.modelsavedir)
     self.num_batches = int(len(self.trainingdataset) / self.batchsize)
     self.num_samples = int(len(self.trainingdataset))
     self.num_frames = args.num_frames
     self.modelname = args.modelname
     print_log('loaded training dataset with %d data' %
               len(self.trainingdataset),
               log=self.log_file)
     if augment: print_log('using augmentation', log=self.log_file)
     else: print_log('no data augmentation', log=self.log_file)
Пример #5
0
    def __init__(self, options, mode):

        self.usecudnn = options["general"]["usecudnn"]

        self.batchsize = options["input"]["batchsize"]
        self.validationdataset = LipreadingDataset(options[mode]["data_root"],
                                                   options[mode]["index_root"],
                                                   options[mode]["padding"],
                                                   False)

        self.tot_data = len(self.validationdataset)
        self.validationdataloader = DataLoader(
            self.validationdataset,
            batch_size=options["input"]["batchsize"],
            shuffle=options["input"]["shuffle"],
            num_workers=options["input"]["numworkers"],
            drop_last=False)
        self.mode = mode
Пример #6
0
 def __init__(self, options):
     self.dataset = LipreadingDataset(options['validation']['data_path'],
                                 "val", False, options['model']['landmark'])
     self.dataloader = DataLoader(
                                 self.dataset,
                                 batch_size=options["input"]["batch_size"],
                                 shuffle=options["input"]["shuffle"],
                                 num_workers=options["input"]["num_worker"],
                                 drop_last=True
                             )
     self.batch_size = options["input"]["batch_size"]
Пример #7
0
    def __init__(self, options, save_dir):

        self.validationdataset = LipreadingDataset(
            options["validation"]["dataset"],
            "val",
            augment=False,
            use_frames=options['training']['use_frames'])
        self.validationdataloader = DataLoader(
            self.validationdataset,
            batch_size=options["input"]["batchsize"],
            shuffle=options["input"]["shuffle"],
            num_workers=options["input"]["numworkers"],
            drop_last=True)
        self.usecudnn = options["general"]["usecudnn"]

        self.batchsize = options["input"]["batchsize"]

        self.save_dir = save_dir
Пример #8
0
    def __init__(self, options):

        self.validationdataset = LipreadingDataset(
            "/udisk/pszts-ssd/AV-ASR-data/BBC_Oxford/lipread_mp4", "val",
            False)
        self.validationdataloader = DataLoader(
            self.validationdataset,
            batch_size=options["input"]["batchsize"],
            shuffle=options["input"]["shuffle"],
            num_workers=options["input"]["numworkers"],
            drop_last=True)
        self.usecudnn = options["general"]["usecudnn"]

        self.batchsize = options["input"]["batchsize"]

        self.statsfrequency = options["training"]["statsfrequency"]

        self.gpuid = options["general"]["gpuid"]
 def __init__(self, options):
     self.batchsize = options["input"]["batchsize"]
     self.validationdataset = LipreadingDataset(
         options["general"]["dataset"], "val", False)
     self.validationdataloader = DataLoader(
         self.validationdataset,
         batch_size=self.batchsize,
         shuffle=False,
         num_workers=options["input"]["numworkers"],
         drop_last=True)
     self.usecudnn = options["general"]["usecudnn"]
     self.statsfrequency = options["training"]["statsfrequency"]
     self.gpuid = options["general"]["gpuid"]
     self.log_file = options["general"]["logfile"]
     self.savedir = options["general"]["modelsavedir"]
     self.num_batches = int(len(self.validationdataset) / self.batchsize)
     print_log('loaded validation dataset with %d data' %
               len(self.validationdataset),
               log=self.log_file)
Пример #10
0
 def __init__(self, options):
     self.batchsize = options["input"]["batchsize"]
     self.trainingdataset = LipreadingDataset(options["general"]["dataset"],
                                              "train")
     self.trainingdataloader = DataLoader(
         self.trainingdataset,
         batch_size=self.batchsize,
         shuffle=options["training"]["shuffle"],
         num_workers=options["input"]["numworkers"],
         drop_last=True)
     self.usecudnn = options["general"]["usecudnn"]
     self.statsfrequency = options["training"]["statsfrequency"]
     self.gpuid = options["general"]["gpuid"]
     self.learningrate = options["training"]["learningrate"]
     # self.modelType = options["training"]["learningrate"]
     self.weightdecay = options["training"]["weightdecay"]
     self.momentum = options["training"]["momentum"]
     self.log_file = options["general"]["logfile"]
     self.modelsavedir = options["general"]["modelsavedir"]
     _, self.time_str, _ = fileparts(self.modelsavedir)
     print_log('loaded training dataset with %d data' %
               len(self.trainingdataset),
               log=options["general"]["logfile"])
Пример #11
0
    def __init__(self, options):
        self.trainingdataset = LipreadingDataset(options["training"]["dataset"], "train", use_frames=options['training']['use_frames'])
        self.trainingdataloader = DataLoader(
                                    self.trainingdataset,
                                    batch_size=options["input"]["batchsize"],
                                    shuffle=options["input"]["shuffle"],
                                    num_workers=options["input"]["numworkers"],
                                    drop_last=True
                                )

        if len(self.trainingdataset) == 0:
            print("WARN: no data for training", file=sys.stderr)

        self.usecudnn = options["general"]["usecudnn"]

        self.batchsize = options["input"]["batchsize"]

        self.learningrate = options["training"]["learningrate"]

        self.modelType = options["training"]["learningrate"]

        self.weightdecay = options["training"]["weightdecay"]
        self.momentum = options["training"]["momentum"]
Пример #12
0
class Validator():
    def __init__(self, options, mode):

        self.usecudnn = options["general"]["usecudnn"]

        self.batchsize = options["input"]["batchsize"]
        self.validationdataset = LipreadingDataset(options[mode]["data_root"],
                                                   options[mode]["index_root"],
                                                   options[mode]["padding"],
                                                   False)

        self.tot_data = len(self.validationdataset)
        self.validationdataloader = DataLoader(
            self.validationdataset,
            batch_size=options["input"]["batchsize"],
            shuffle=options["input"]["shuffle"],
            num_workers=options["input"]["numworkers"],
            drop_last=False)
        self.mode = mode

    def __call__(self, model):
        with torch.no_grad():
            print("Starting {}...".format(self.mode))
            count = np.zeros((len(self.validationdataset.pinyins)))
            # print (len(self.validationdataset.pinyins))
            # assert (0)
            validator_function = model.validator_function()
            model.eval()
            # if(self.usecudnn):
            #     net = nn.DataParallel(model).cuda()
            net = model.cuda()
            num_samples = 0

            self.pinyins = self.validationdataset.get_pinyins()

            #print (self.pinyins)

            cnt = 0
            pinyin_lengh = []
            for pinyin in self.pinyins:
                cnt = cnt + 1
                #print (pinyin)
                pinyin_lengh.append(len(pinyin.split(" ")))
                #print (len(pinyin.split(" ")))
                # if cnt > 5:
                #     assert (0)
            print(max(pinyin_lengh))
            #assert (0)

            all_labels = []
            all_predictions = []

            for i_batch, sample_batched in enumerate(
                    self.validationdataloader):

                input = Variable(sample_batched['temporalvolume']).cuda()
                labels = Variable(sample_batched['label']).cuda()
                length = Variable(sample_batched['length']).cuda()

                model = model.cuda()
                #print(np.shape (input))
                outputs = net(input)
                #print (np.shape (outputs))
                #assert (0)
                (vector, top1) = validator_function(outputs, length, labels)

                _, maxindices = vector.cpu().max(1)

                all_labels.extend(labels.cpu().numpy()[:, 0])
                all_predictions.extend(maxindices.cpu().numpy())

                argmax = (-vector.cpu().numpy()).argsort()
                for i in range(input.size(0)):
                    p = list(argmax[i]).index(labels[i])
                    count[p:] += 1
                #print (count)
                num_samples += input.size(0)

                if i_batch % 50 == 0:
                    print(
                        'i_batch/tot_batch:{}/{},corret/tot:{}/{},current_acc:{}'
                        .format(i_batch, len(self.validationdataloader),
                                count[0], len(self.validationdataset),
                                1.0 * count[0] / num_samples))

                # print (len(all_labels))
                # if len (all_labels) > 100:
                #     break
                #break
        # all_labels = np.array(all_labels).flatten()
        # all_predictions = np.array(all_predictions).flatten()
        # print (all_labels)
        # print (all_predictions)

        all_length_labels = [pinyin_lengh[label] for label in all_labels]
        all_length_predictions = [
            pinyin_lengh[label] for label in all_predictions
        ]

        #print ()
        #all_length_labels.append
        cm = confusion_matrix(
            all_length_labels,
            all_length_predictions,
        )

        #np.save ("confusion_matrix.npy", cm)

        #print (self.pinyins[::-1])
        #assert (0)
        #cm = np.load ("confusion_matrix.npy")
        print(cm)
        print(cm.sum(axis=1))
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        #cm = cm[:100, :100]
        print(cm)
        pinyin_lengh_name = [1, 2, 3, 4, 5, 6, 7]
        #assert (0)
        trace = go.Heatmap(z=cm, x=pinyin_lengh_name, y=pinyin_lengh_name)
        #print (self.pinyins)
        data = [trace]
        py.iplot(data, filename='labelled-heatmap-length')

        plot_confusion_matrix(cm, pinyin_lengh_name,
                              "Confusion Matrix for Pinyins")
        plt.savefig('HAR_cm.png', format='png')
        plt.show()
        assert (0)

        return count / num_samples
Пример #13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config", type=str, help="configuration file")
    parser.add_argument("-s",
                        "--start",
                        type=int,
                        help="start epoch if reload",
                        default=1)
    parser.add_argument("-t",
                        "--test",
                        type=bool,
                        help="test mode",
                        default=False)
    parser.add_argument("-b",
                        "--best",
                        type=float,
                        help="best epoch val accuracy",
                        default=0.)
    parser.add_argument("-e",
                        "--bepoch",
                        type=int,
                        help="the number of bad epoch",
                        default=0)
    args = parser.parse_args()
    writer = SummaryWriter()

    print("Loading options...")
    with open(args.config, 'r') as optionsFile:
        options = yaml.load(optionsFile.read())
    options['training']['start_epoch'] = args.start - 1
    if (options["general"]["usecudnnbenchmark"]
            and options["general"]["usecudnn"]):
        print("Running cudnn benchmark...")
        torch.backends.cudnn.benchmark = True
    writer.add_text('name', options['name'])
    #Create the model.
    model = LipRead(options).cuda()
    print('lr : ', options['training']['learning_rate'])
    print('weight_decay : ', options['training']['weight_decay'])
    print('landmark : ', options['input']['landmark'])
    print('landmark channel concat : ',
          not options['input']['landmark_seperate'])
    print('coord conv : ', options['model']['coord'])
    print('attention : ', options['model']['attention'])
    optimizer = optim.Adam(model.parameters(),
                           lr=options['training']['learning_rate'],
                           weight_decay=options['training']['weight_decay'])
    if options['training']['schedule'] == 'plateau':
        if args.start > 1 and args.best == 0. and not args.test:
            print("must have best accuracy")
            raise
        plat = True
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer,
            mode='max',
            factor=options['training']['lr_decay'],
            patience=3,
            verbose=True,
            threshold=0.003,
            threshold_mode='abs',
        )
        scheduler.best = args.best
        scheduler.num_bad_epochs = args.bepoch
    else:
        plat = False
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones=options['training']['schedule'],
            gamma=options['training']['lr_decay'])

    criterion = model.loss()

    if not os.path.isdir(
            os.path.join(options["general"]["save_path"], options['name'])):
        os.mkdir(os.path.join(options["general"]["save_path"],
                              options['name']))
    if not os.path.isdir(
            os.path.join(options["general"]["save_path"], options['name'],
                         'optimizers')):
        os.mkdir(
            os.path.join(options["general"]["save_path"], options['name'],
                         'optimizers'))
    if not os.path.isdir(
            os.path.join(options["general"]["save_path"], options['name'],
                         'models')):
        os.mkdir(
            os.path.join(options["general"]["save_path"], options['name'],
                         'models'))

    if (options["general"]['model_load']):
        path = glob(
            os.path.join(options["general"]["save_path"], options['name'],
                         'models', 'model{}.pth'.format(args.start - 1)))
        #path = sorted(glob(os.path.join(options["general"]["save_path"], options['name'], 'models', '*.pth')), key=lambda name : int(name.split('/')[-1].replace('.pth', '').replace('model', '')))
        if path:
            print('load {} model..'.format(path[-1]))
            model.load_state_dict(torch.load(path[-1]))
        #path = sorted(glob(os.path.join(options["general"]["save_path"], options['name'], 'optimizers', '*.pth')), key=lambda name : int(name.split('/')[-1].replace('.pth', '').replace('optimizer', '')))
        path = glob(
            os.path.join(options["general"]["save_path"], options['name'],
                         'optimizers',
                         'optimizer{}.pth'.format(args.start - 1)))
        if path:
            print('load {} optimizer..'.format(path[-1]))
            optimizer.load_state_dict(torch.load(path[-1]))

    train_dataset = LipreadingDataset(options["training"]["data_path"],
                                      "train", options['input']['aug'],
                                      options['input']['landmark'],
                                      options['input']['landmark_seperate'])
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=options["input"]["batch_size"],
                                  shuffle=options["input"]["shuffle"],
                                  num_workers=options["input"]["num_worker"],
                                  drop_last=True)
    val_dataset = LipreadingDataset(options['validation']['data_path'], "val",
                                    False, options['input']['landmark'],
                                    options['input']['landmark_seperate'])
    val_dataloader = DataLoader(val_dataset,
                                batch_size=options["input"]["batch_size"],
                                shuffle=options["input"]["shuffle"],
                                num_workers=options["input"]["num_worker"],
                                drop_last=True)
    train_size = len(train_dataset)

    batch_size = options["input"]["batch_size"]
    stats_frequency = options["training"]["stats_frequency"]
    if args.test:
        test_dataset = LipreadingDataset(options['validation']['data_path'],
                                         "test", False,
                                         options['input']['landmark'],
                                         options['input']['landmark_seperate'])
        test_dataloader = DataLoader(
            test_dataset,
            batch_size=options["input"]["batch_size"],
            shuffle=options["input"]["shuffle"],
            num_workers=options["input"]["num_worker"],
            drop_last=True)
        model.eval()
        with torch.no_grad():
            print("Starting testing...")
            count = 0
            validator_function = model.validator_function()

            for i_batch, sample_batched in enumerate(test_dataloader):
                input = sample_batched[0]
                labels = sample_batched[1]

                input = input.cuda()
                labels = labels.cuda()

                outputs = model(input)

                count += validator_function(outputs, labels)

            accuracy = count / len(val_dataset)
            print('#############test result################')
            print('correct count: {}, total count: {}, accu: {}'.format(
                count, len(test_dataset), accuracy))
            # with open(os.path.join('./', options['name']+'.txt'), "a") as outputfile:
            #     outputfile.write("\ncorrect count: {}, total count: {} accuracy: {}" .format(count, len(test_dataset), accuracy ))
            return

    for epoch in range(options["training"]["start_epoch"],
                       options["training"]["max_epoch"]):

        model.train()
        if (options["training"]["train"]):
            if options['training']['schedule'] and not plat:
                scheduler.step(epoch)
            running_loss = 0.0
            count = 0
            count_bs = 0
            startTime = datetime.now()
            print("Starting training...")
            for i_batch, sample_batched in enumerate(train_dataloader):
                optimizer.zero_grad()
                if options['input']['landmark_seperate']:
                    x = sample_batched[0].cuda()
                    labels = sample_batched[1].cuda()
                    dot_labels = sample_batched[2].float().cuda()
                else:
                    x = sample_batched[0].cuda()
                    labels = sample_batched[1].cuda()
                if not options['input']['landmark_seperate']:
                    outputs = model(x)
                else:
                    outputs = model(x, dot_labels)
                loss = criterion(outputs, labels)
                count += model.validator_function()(outputs, labels)
                count_bs += labels.shape[0]

                running_loss += loss.item()
                loss.backward()

                optimizer.step()
                if (i_batch % stats_frequency == 0 and i_batch != 0):

                    print('[%d, %5d] loss: %.8f, acc: %f' %
                          (epoch + 1, i_batch + 1,
                           running_loss / stats_frequency, count / count_bs))
                    writer.add_scalar(
                        'train/loss', running_loss / stats_frequency,
                        (epoch - 1) * train_size + i_batch * batch_size)
                    writer.add_scalar('train/accuracy', count / count_bs,
                                      (epoch - 1) * train_size +
                                      i_batch * batch_size)
                    writer.add_scalar('train/true', count,
                                      (epoch - 1) * train_size +
                                      i_batch * batch_size)
                    writer.add_scalar('train/false', count_bs - count,
                                      (epoch - 1) * train_size +
                                      i_batch * batch_size)
                    try:
                        print('lr {}, name {}'.format(
                            optimizer.param_groups[-1]['lr'], options['name']))
                        writer.add_scalar(
                            'train/lr', optimizer.param_groups[-1]['lr'],
                            (epoch - 1) * train_size + i_batch * batch_size)
                    except Exception as e:
                        print('lr {}, name {}'.format(
                            options['training']['learning_rate'],
                            options['name']))
                        writer.add_scalar(
                            'train/lr', options['training']['learning_rate'],
                            (epoch - 1) * train_size + i_batch * batch_size)
                    running_loss = 0.0
                    count = 0
                    count_bs = 0
                    currentTime = datetime.now()
                    output_iteration(i_batch * batch_size,
                                     currentTime - startTime, train_size)

        print("Epoch completed")
        if options['general']['model_save'] and options['training']['train']:
            print("saving state..")
            torch.save(
                model.state_dict(),
                os.path.join(options["general"]["save_path"], options['name'],
                             'models', 'model{}.pth'.format(epoch + 1)))
            torch.save(
                optimizer.state_dict(),
                os.path.join(options["general"]["save_path"], options['name'],
                             'optimizers',
                             'optimizer{}.pth'.format(epoch + 1)))

        model.eval()
        with torch.no_grad():
            if (options["validation"]["validate"]):
                print("Starting validation...")
                count = 0
                validator_function = model.validator_function()

                for i_batch, sample_batched in enumerate(val_dataloader):
                    if options['input']['landmark_seperate']:
                        x = sample_batched[0].cuda()
                        labels = sample_batched[1].cuda()
                        dot_labels = sample_batched[2].float().cuda()
                    else:
                        x = sample_batched[0].cuda()
                        labels = sample_batched[1].cuda()
                    if not options['input']['landmark_seperate']:
                        outputs = model(x)
                    else:
                        outputs = model(x, dot_labels)

                    count += validator_function(outputs, labels)

                accuracy = count / len(val_dataset)
                if options['training']['schedule'] and plat:
                    scheduler.step(accuracy)
                writer.add_scalar('val/accuracy', accuracy, epoch)
                writer.add_scalar('val/true', count, epoch)
                writer.add_scalar('val/false', len(val_dataset) - count, epoch)
                print('correct count: {}, total count: {}, accu: {}'.format(
                    count, len(val_dataset), accuracy))
                with open(os.path.join('./', options['name'] + '.txt'),
                          "a") as outputfile:
                    outputfile.write(
                        "\ncorrect count: {}, total count: {} accuracy: {}".
                        format(count, len(val_dataset), accuracy))
Пример #14
0
class Validator():
    def __init__(self, options, mode):

        self.usecudnn = options["general"]["usecudnn"]

        self.batchsize = options["input"]["batchsize"]

        self.validationdataset_ori = LipreadingDataset(
            options[mode]["data_root"], options[mode]["index_root"],
            options[mode]["padding"], False)
        self.validationdataset = LipreadingDataset_val(
            options[mode]["data_root"], options[mode]["index_root"],
            options[mode]["padding"], False)

        self.tot_data = len(self.validationdataset)
        self.validationdataloader = DataLoader(
            self.validationdataset,
            batch_size=options["input"]["batchsize"],
            shuffle=False,
            num_workers=options["input"]["numworkers"],
            drop_last=False)
        self.mode = mode

        self.video_dist_label = convert_task3_label_to_video_label()

        self.average_IOU = 0

        self.video_id_cnt = 0

        self.time0 = time.time()

    def __call__(self, model):
        with torch.no_grad():
            print("Starting {}...".format(self.mode))
            # count = np.zeros((len(self.validationdataset.pinyins)))
            # print (len(self.validationdataset.pinyins))
            self.pinyins = self.validationdataset_ori.get_pinyins()

            self.video_list = self.validationdataset.get_video_list()

            video_list_check_list = np.zeros(len(self.video_list))
            # assert (0)
            validator_function_pred = model.validator_function_pred()
            model.eval()
            # if(self.usecudnn):
            # net = nn.DataParallel(model).cuda()
            net = model.cuda()
            num_samples = 0

            all_labels = []
            all_predictions = []

            output_list = []
            # print (self.video_list)

            for i_batch, sample_batched in enumerate(
                    self.validationdataloader):

                input = Variable(sample_batched['temporalvolume']).cuda()
                # labels = Variable(sample_batched['label']).cuda()
                length = Variable(sample_batched['length']).cuda()

                op = sample_batched['op']

                ed = sample_batched['ed']

                path = sample_batched['path']

                video_length = sample_batched['video_length']

                # if path == "004499b75f5456aa7f866b7f5252b73c":
                #     continue
                # assert(0)

                # print (input[0])
                # print (np.shape (input))

                model = model.cuda()
                #print(np.shape (input))
                outputs = net(input)
                #print (np.shape (outputs))
                #assert (0)
                (values, predition,
                 averageEnergies) = validator_function_pred(outputs, length)

                pre_video_id = None

                pre_video_length = None

                # time0 = time.time()

                for i in range(0, len(predition)):
                    if video_list_check_list[self.video_list.index(
                            path[i])] == 1:
                        # print (path[i])
                        output_list.append(averageEnergies[i].cpu().numpy())
                        pre_video_id = path[i]
                        pre_video_length = video_length[i]

                    elif video_list_check_list[self.video_list.index(
                            path[i])] == 0:
                        if len(
                                output_list
                        ) != 0 and pre_video_id != None and pre_video_id in self.video_dist_label.keys(
                        ):

                            self.video_id_cnt = self.video_id_cnt + 1

                            pred_prob = np.max(output_list, axis=0)

                            np.save(
                                "checkpoint_prob_30/" + pre_video_id + ".npy",
                                np.array(pred_prob))

                            # pred_word = [self.pinyins[int(i)] for i in  np.where (pred_prob > -0.5 )[0]]

                            sorted_index = sorted(enumerate(pred_prob),
                                                  key=lambda x: x[1])

                            # print (sorted_index)

                            pred_word = [
                                self.pinyins[int(i[0])]
                                for i in sorted_index[-int(pre_video_length /
                                                           13):]
                            ]

                            # print (pred_word)

                            # print (pred_word)

                            # assert (0)

                            print(set(pred_word))

                            print(set(self.video_dist_label[pre_video_id]))

                            output = str(pre_video_id) + " " + str(pred_word)

                            print(output)

                            f = open('output_10.txt', 'a')

                            f.write(output + "\n")

                            f.close()

                            x = len(
                                set(pred_word)
                                & set(self.video_dist_label[pre_video_id]))

                            y = len(
                                set(pred_word)
                                | set(self.video_dist_label[pre_video_id]))

                            # print (x)

                            # print (y)

                            print("Num:", self.video_id_cnt, " this_term:",
                                  x / y)

                            self.average_IOU = self.average_IOU + (x / y)

                            print("average:",
                                  self.average_IOU / self.video_id_cnt,
                                  "time needed:", (time.time() - self.time0) /
                                  self.video_id_cnt)

                            # time0 = time.time()

                        # print (np.shape (averageEnergies[i].cpu().numpy()))
                        output_list = [averageEnergies[i].cpu().numpy()]
                        video_list_check_list[self.video_list.index(
                            path[i])] = 1
                        pre_video_id = path[i]
                        pre_video_length = video_length[i]

                continue
                # print (values)

                # predition = [ self.pinyins[label] for label in predition ]

                # for i in range(0, len(predition)):
                #     if values[i] > -5:
                #         output = str(path[i]) + " " + str(op[i].cpu().numpy()) + " " + str(ed[i].cpu().numpy()) + " " + str(predition[i]) + " " + str(values[i].cpu().numpy())
                #         f.write(output + "\n" )# print (output)

                # _, maxindices = vector.cpu().max(1)

                # all_labels.extend (labels.cpu().numpy()[:,0])
                # all_predictions.extend (maxindices.cpu().numpy())

                # argmax = (-vector.cpu().numpy()).argsort()
                # for i in range(input.size(0)):
                #     p = list(argmax[i]).index(labels[i])
                #     count[p:] += 1
                # #print (count)
                # num_samples += input.size(0)

                # if i_batch % 50 == 0:
                #     print('i_batch/tot_batch:{}/{},corret/tot:{}/{},current_acc:{}'.format(i_batch,len(self.validationdataloader),count[0],len(self.validationdataset),1.0*count[0]/num_samples))

                # print (len(all_labels))
                # if len (all_labels) > 100:
                #     break
                #break

        # all_labels = np.array(all_labels).flatten()
        # all_predictions = np.array(all_predictions).flatten()
        # print (all_labels)
        # print (all_predictions)
        assert (0)
        all_length_labels = [pinyin_lengh[label] for label in all_labels]
        all_length_predictions = [
            pinyin_lengh[label] for label in all_predictions
        ]

        #print ()
        #all_length_labels.append
        cm = confusion_matrix(
            all_length_labels,
            all_length_predictions,
        )

        #np.save ("confusion_matrix.npy", cm)

        #print (self.pinyins[::-1])
        #assert (0)
        #cm = np.load ("confusion_matrix.npy")
        print(cm)
        print(cm.sum(axis=1))
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        #cm = cm[:100, :100]
        print(cm)
        pinyin_lengh_name = [1, 2, 3, 4, 5, 6, 7]
        #assert (0)
        trace = go.Heatmap(z=cm, x=pinyin_lengh_name, y=pinyin_lengh_name)
        #print (self.pinyins)
        data = [trace]
        py.iplot(data, filename='labelled-heatmap-length')

        plot_confusion_matrix(cm, pinyin_lengh_name,
                              "Confusion Matrix for Pinyins")
        plt.savefig('HAR_cm.png', format='png')
        plt.show()
        assert (0)

        return count / num_samples
Пример #15
0
import math
import sys
import torch
import time
import numpy as np

with open(sys.argv[1], 'r') as optFile:
    opt = toml.loads(optFile.read())

if (opt["general"]["usecudnnbenchmark"] and opt["general"]["usecudnn"]):
    print("Running cudnn benchmark...")
    torch.backends.cudnn.benchmark = True

writer = SummaryWriter(comment=opt["general"]["comment"])

train_dataset = LipreadingDataset(opt["training"]["dataset"], "train")
train_dataloader = DataLoader(train_dataset,
                              batch_size=opt["input"]["batchsize"],
                              shuffle=opt["input"]["shuffle"],
                              num_workers=opt["input"]["numworkers"],
                              drop_last=False)

val_dataset = LipreadingDataset(opt["validation"]["dataset"], "val")
val_dataloader = DataLoader(val_dataset,
                            batch_size=opt["input"]["batchsize"],
                            shuffle=opt["input"]["shuffle"],
                            num_workers=opt["input"]["numworkers"],
                            drop_last=False)

model = LRGANModel()
model.initialize(opt)
Пример #16
0
from data import LipreadingDataset_LR as LipreadingDataset
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import torch.nn as nn
import os
import pdb
import math
import sys
import torch
import time
import numpy as np

with open(sys.argv[1], 'r') as optFile:
    opt = toml.loads(optFile.read())

val_dataset = LipreadingDataset(opt["validation"]["dataset"], "val")
val_dataloader = DataLoader(val_dataset,
                            batch_size=opt["input"]["batchsize"],
                            shuffle=opt["input"]["shuffle"],
                            num_workers=opt["input"]["numworkers"],
                            drop_last=False)

model = LRGANModel()
model.initialize(opt)
print('Start Testing...')

model.set_eval()
count = np.array([0, 0, 0, 0])
acc = np.array([0.0, 0.0, 0.0, 0.0])
val_loss = np.array([0.0, 0.0, 0.0, 0.0])
len_dataset = len(val_dataloader)