コード例 #1
0
ファイル: eval.py プロジェクト: britney-f/SALMNet
def main():
    net = Baseline(num_classes=culane.num_classes, deep_base=args['deep_base']).cuda()

    print('load checkpoint \'%s.pth\' for evaluation' % args['checkpoint'])
    pretrained_dict = torch.load(os.path.join(ckpt_path, exp_name, args['checkpoint'] + '_checkpoint.pth'))
    pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items()}
    net.load_state_dict(pretrained_dict)

    net.eval()

    save_dir = os.path.join(ckpt_path, exp_name, 'vis_%s_test' % args['checkpoint'])
    check_mkdir(save_dir)
    log_path = os.path.join(save_dir, str(datetime.datetime.now()) + '.log')

    data_list = [l.strip('\n') for l in open(os.path.join(culane.root, culane.list, 'test_gt.txt'), 'r')]

    loss_record = AverageMeter()
    gt_all, prediction_all=[], []

    for idx in range(len(data_list)):
        print('evaluating %d / %d' % (idx + 1, len(data_list)))

        img = Image.open(culane.root + data_list[idx].split(' ')[0]).convert('RGB')
        gt = Image.open(culane.root + data_list[idx].split(' ')[1])

        img, gt = val_joint_transform(img, gt)

        with torch.no_grad():
            img_var = Variable(img_transform(img).unsqueeze(0)).cuda()
            gt_var = Variable(mask_transform(gt).unsqueeze(0)).cuda()

            prediction = net(img_var)[0]

            loss = criterion(prediction, gt_var)
            loss_record.update(loss.data, 1)

            scoremap = F.softmax(prediction, dim=1).data.squeeze().cpu().numpy()

            prediction = prediction.data.max(1)[1].squeeze().cpu().numpy().astype(np.uint8)
            prediction_all.append(prediction)
            gt_all.append(np.array(gt))

        if args['save_results']:
            check_mkdir(save_dir + data_list[idx].split(' ')[0][:-10])
            out_file = open(os.path.join(save_dir, data_list[idx].split(' ')[0][1:-4] + '.lines.txt'), 'w')
            prob2lines(scoremap, out_file)

    acc, acc_cls, mean_iu, fwavacc = evaluation(prediction_all, gt_all, culane.num_classes)
    log = 'val results: loss %.5f  acc %.5f  acc_cls %.5f  mean_iu %.5f  fwavacc %.5f' % \
              (loss_record.avg, acc, acc_cls, mean_iu, fwavacc)
    print(log)
    open(log_path, 'w').write(log + '\n')
コード例 #2
0
def test(args):
    device = torch.device('cuda' if args.cuda else 'cpu')

    pprint(args.__dict__)
    interface = FileInterface(**args.__dict__)
    piqa_model = Baseline(**args.__dict__).to(device)

    processor = SquadProcessor(args.char_vocab_size,
                               args.glove_vocab_size,
                               args.word_vocab_size,
                               elmo=args.elmo)

    bind_model(interface, processor, piqa_model)
    interface.load(args.iteration, session=args.load_dir)

    test_examples = load_squad(interface.test_path, draft=args.draft)
    test_dataset = tuple(
        processor.preprocess(example) for example in test_examples)

    test_sampler = SquadSampler(test_dataset, bucket=True)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             sampler=test_sampler,
                             collate_fn=processor.collate)

    print('Inferencing')
    with torch.no_grad():
        piqa_model.eval()
        pred = {}
        for batch_idx, (test_batch, _) in enumerate(
                zip(test_loader, range(args.eval_steps))):
            test_batch = {
                key: val.to(device)
                for key, val in test_batch.items()
            }
            model_output = piqa_model(**test_batch)
            results = processor.postprocess_batch(test_dataset, test_batch,
                                                  model_output)
            if batch_idx % args.dump_period == 0:
                dump = get_dump(test_dataset, test_batch, model_output,
                                results)
                interface.dump(batch_idx, dump)
            for result in results:
                pred[result['id']] = result['pred']

            print('[%d/%d]' % (batch_idx + 1, len(test_loader)))
        interface.pred(pred)
コード例 #3
0
ファイル: policy_grad.py プロジェクト: Tzeusy/RL_car
def test():
    # Prepare env
    env = create_env()
    h, w, c = env.observation_space.shape

    # Load 5 best models
    device = torch.device("cpu")
    model_dir = "./policy_grad"
    model_fns = {}
    for fn in os.listdir(model_dir):
        if fn.endswith('.pth'):
            score = fn.split("_")[-1][:-4]
            model_fns[fn] = float(score)
    top_5 = heapq.nlargest(3, model_fns, key=model_fns.get)

    models = []
    for fn in top_5:
        path = os.path.join(model_dir, fn)
        model = Baseline(h, w).to(device)
        model.load_state_dict(torch.load(path, map_location='cpu'))
        model.eval()
        models.append(model)

    # Watch race car perform
    state = env.reset().transpose((2, 0, 1))
    state = torch.tensor([state], dtype=torch.float, device=device)
    total_reward = 0
    for t in count():
        # Select and perform an action
        votes = []
        for model in models:
            pi, _ = model(state)
            votes.append(pi.argmax().item())
        action_idx = Counter(votes).most_common(1)[0][0]
        action = index_to_action(action_idx)
        state, reward, done, _ = env.step(action)
        env.render()

        # Update
        state = state.transpose((2, 0, 1))
        state = torch.tensor([state], dtype=torch.float, device=device)
        total_reward += reward
        if done:
            break
    print("Total reward: {}".format(total_reward))
コード例 #4
0
def embed(args):
    device = torch.device('cuda' if args.cuda else 'cpu')

    pprint(args.__dict__)
    interface = FileInterface(**args.__dict__)
    piqa_model = Baseline(**args.__dict__).to(device)

    processor = SquadProcessor(args.char_vocab_size,
                               args.glove_vocab_size,
                               args.word_vocab_size,
                               elmo=args.elmo)

    bind_model(interface, processor, piqa_model)
    interface.load(args.iteration, session=args.load_dir)

    test_examples = load_squad(interface.test_path, draft=args.draft)
    test_dataset = tuple(
        processor.preprocess(example) for example in test_examples)

    test_sampler = SquadSampler(test_dataset, bucket=True)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             sampler=test_sampler,
                             collate_fn=processor.collate)

    print('Saving embeddings')
    with torch.no_grad():
        piqa_model.eval()
        for batch_idx, (test_batch, _) in enumerate(
                zip(test_loader, range(args.eval_steps))):
            test_batch = {
                key: val.to(device)
                for key, val in test_batch.items()
            }

            if args.mode == 'embed' or args.mode == 'embed_context':

                context_output = piqa_model.get_context(**test_batch)
                context_results = processor.postprocess_context_batch(
                    test_dataset,
                    test_batch,
                    context_output,
                    emb_type=args.emb_type)

                for id_, phrases, matrix in context_results:
                    interface.context_emb(id_,
                                          phrases,
                                          matrix,
                                          emb_type=args.emb_type)

            if args.mode == 'embed' or args.mode == 'embed_question':

                question_output = piqa_model.get_question(**test_batch)
                question_results = processor.postprocess_question_batch(
                    test_dataset,
                    test_batch,
                    question_output,
                    emb_type=args.emb_type)

                for id_, emb in question_results:
                    interface.question_emb(id_, emb, emb_type=args.emb_type)

            print('[%d/%d]' % (batch_idx + 1, len(test_loader)))
コード例 #5
0
def train(args):
    start_time = time.time()
    device = torch.device('cuda' if args.cuda else 'cpu')

    pprint(args.__dict__)
    interface = FileInterface(**args.__dict__)
    piqa_model = Baseline(**args.__dict__).to(device)

    loss_model = Loss().to(device)
    optimizer = torch.optim.Adam(p for p in piqa_model.parameters()
                                 if p.requires_grad)

    batch_size = args.batch_size
    char_vocab_size = args.char_vocab_size
    glove_vocab_size = args.glove_vocab_size
    word_vocab_size = args.word_vocab_size
    glove_size = args.glove_size
    elmo = args.elmo
    draft = args.draft

    def preprocess(interface_):
        # get data
        print('Loading train and dev data')
        train_examples = load_squad(interface_.train_path, draft=draft)
        dev_examples = load_squad(interface_.test_path, draft=draft)

        # iff creating processor
        print('Loading GloVe')
        glove_words, glove_emb_mat = load_glove(
            glove_size,
            vocab_size=args.glove_vocab_size - 2,
            glove_dir=interface_.glove_dir,
            draft=draft)

        print('Constructing processor')
        processor = SquadProcessor(char_vocab_size,
                                   glove_vocab_size,
                                   word_vocab_size,
                                   elmo=elmo)
        processor.construct(train_examples, glove_words)

        # data loader
        print('Preprocessing datasets')
        train_dataset = tuple(
            processor.preprocess(example) for example in train_examples)
        dev_dataset = tuple(
            processor.preprocess(example) for example in dev_examples)

        print('Creating data loaders')
        train_sampler = SquadSampler(train_dataset,
                                     max_context_size=256,
                                     max_question_size=32,
                                     bucket=True,
                                     shuffle=True)
        train_loader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  collate_fn=processor.collate,
                                  sampler=train_sampler)

        dev_sampler = SquadSampler(dev_dataset, bucket=True)
        dev_loader = DataLoader(dev_dataset,
                                batch_size=batch_size,
                                collate_fn=processor.collate,
                                sampler=dev_sampler)

        if args.preload:
            train_loader = tuple(train_loader)
            dev_loader = tuple(dev_loader)

        out = {
            'glove_emb_mat': glove_emb_mat,
            'processor': processor,
            'train_dataset': train_dataset,
            'dev_dataset': dev_dataset,
            'train_loader': train_loader,
            'dev_loader': dev_loader
        }

        return out

    out = interface.cache(
        preprocess,
        interface_=interface) if args.cache else preprocess(interface)
    glove_emb_mat = out['glove_emb_mat']
    processor = out['processor']
    train_dataset = out['train_dataset']
    dev_dataset = out['dev_dataset']
    train_loader = out['train_loader']
    dev_loader = out['dev_loader']

    print("Initializing model weights")
    piqa_model.load_glove(torch.tensor(glove_emb_mat))

    bind_model(interface, processor, piqa_model, optimizer=optimizer)

    step = 0
    best_report = None

    print('Training')
    piqa_model.train()
    for epoch_idx in range(args.epochs):
        for i, train_batch in enumerate(train_loader):
            train_batch = {
                key: val.to(device)
                for key, val in train_batch.items()
            }
            model_output = piqa_model(step=step, **train_batch)
            train_results = processor.postprocess_batch(
                train_dataset, train_batch, model_output)
            train_loss = loss_model(step=step, **model_output, **train_batch)
            train_f1 = float(
                np.mean([result['f1'] for result in train_results]))
            train_em = float(
                np.mean([result['em'] for result in train_results]))

            # optimize
            optimizer.zero_grad()
            train_loss.backward()
            optimizer.step()
            step += 1

            # report & eval & save
            if step % args.report_period == 1:
                report = OrderedDict(step=step,
                                     train_loss=train_loss.item(),
                                     train_f1=train_f1,
                                     train_em=train_em,
                                     time=time.time() - start_time)
                interface.report(**report)
                print(', '.join('%s=%.5r' % (s, r) for s, r in report.items()))

            if step % args.eval_save_period == 1:
                with torch.no_grad():
                    piqa_model.eval()
                    loss_model.eval()
                    pred = {}
                    dev_losses, dev_results = [], []
                    for dev_batch, _ in zip(dev_loader,
                                            range(args.eval_steps)):
                        dev_batch = {
                            key: val.to(device)
                            for key, val in dev_batch.items()
                        }
                        model_output = piqa_model(**dev_batch)
                        results = processor.postprocess_batch(
                            dev_dataset, dev_batch, model_output)

                        dev_loss = loss_model(step=step,
                                              **dev_batch,
                                              **model_output)

                        for result in results:
                            pred[result['id']] = result['pred']
                        dev_results.extend(results)
                        dev_losses.append(dev_loss.item())

                    dev_loss = float(np.mean(dev_losses))
                    dev_f1 = float(
                        np.mean([result['f1'] for result in dev_results]))
                    dev_em = float(
                        np.mean([result['em'] for result in dev_results]))

                    report = OrderedDict(step=step,
                                         dev_loss=dev_loss,
                                         dev_f1=dev_f1,
                                         dev_em=dev_em,
                                         time=time.time() - start_time)
                    summary = False
                    if best_report is None or report['dev_f1'] > best_report[
                            'dev_f1']:
                        best_report = report
                        summary = True
                        interface.save(iteration=step)
                        interface.pred(pred)
                    interface.report(summary=summary, **report)
                    print(
                        ', '.join('%s=%.5r' % (s, r)
                                  for s, r in report.items()),
                        '(dev_f1_best=%.5r @%d)' %
                        (best_report['dev_f1'], best_report['step']))
                    piqa_model.train()
                    loss_model.train()

            if step == args.train_steps:
                break
        if step == args.train_steps:
            break
コード例 #6
0
    def infer(test_image_data_path, test_meta_data_path):
        # DONOTCHANGE This Line
        test_meta_data = pd.read_csv(test_meta_data_path,
                                     delimiter=',',
                                     header=0)

        device = 0

        models = args.models.split(",")
        model_weights = [float(w) for w in args.model_weights.split(",")]
        nsml_sessionss = args.nsml_sessionss.split(",")
        nsml_checkpoints = args.nsml_checkpoints.split(",")
        loss_types = args.loss_types.split(",")

        transform_random_crop = args.transform_random_crop.split(",")
        transform_random_sized_crop = args.transform_random_sized_crop.split(
            ",")
        transform_norm = args.transform_norm.split(",")
        infer_transform_center_crop = args.infer_transform_center_crop.split(
            ",")

        total_output_probs = None
        for i, model_name in enumerate(models):
            batch_size = batch_size_map[model_name] // 2

            infer_transform_list = []

            if infer_transform_center_crop[i] == "True":
                infer_transform_list.append(transforms.Resize((248, 248)))
                infer_transform_list.append(
                    transforms.CenterCrop((args.input_size, args.input_size)))
                infer_transform_list.append(transforms.ToTensor())
                if transform_norm[i] == "True":
                    infer_transform_list.append(
                        transforms.Normalize(
                            [0.44097832, 0.44847423, 0.42528335],
                            [0.25748107, 0.26744914, 0.30532702]))
            else:
                if transform_random_crop[i] == "True":
                    infer_transform_list.append(transforms.Resize((256, 256)))
                    infer_transform_list.append(
                        transforms.CenterCrop(
                            (args.input_size, args.input_size)))
                elif transform_random_sized_crop[i] == "True":
                    infer_transform_list.append(transforms.Resize((256, 256)))
                    infer_transform_list.append(
                        transforms.CenterCrop(
                            (args.input_size, args.input_size)))
                else:
                    infer_transform_list.append(
                        transforms.Resize((args.input_size, args.input_size)))
                infer_transform_list.append(transforms.ToTensor())
                if transform_norm[i] == "True":
                    infer_transform_list.append(
                        transforms.Normalize(
                            [0.44097832, 0.44847423, 0.42528335],
                            [0.25748107, 0.26744914, 0.30532702]))

            print("transform", infer_transform_list)

            dataloader = DataLoader(
                AIRushDataset(
                    test_image_data_path,
                    test_meta_data,
                    label_path=None,
                    transform=transforms.Compose(infer_transform_list)
                ),  #[transforms.Resize((args.input_size, args.input_size)), transforms.ToTensor()])),
                batch_size=batch_size,
                shuffle=False,
                num_workers=0,
                pin_memory=True)

            if model_name == "Resnet18":
                model = Resnet18(args.output_size)
            elif model_name == "Resnet152":
                model = Resnet152(args.output_size)
            elif model_name == "baseline":
                model = Baseline(args.hidden_size, args.output_size)
            elif model_name.split("-")[0] == "efficientnet":
                model = EfficientNet.from_pretrained(args.model,
                                                     args.output_size)
            else:
                raise Exception("model type is invalid : " + args.model)

            model.to(device)

            def load_fn(dir_name):
                save_state_path = os.path.join(dir_name, 'state_dict.pkl')
                state = torch.load(save_state_path)
                model.load_state_dict(state['model'])
                print("model loaded", dir_name)

            model.eval()

            nsml.load(checkpoint=nsml_checkpoints[i],
                      load_fn=load_fn,
                      session="team_13/airush1/" + nsml_sessionss[i])

            output_probs = None
            for batch_idx, image in enumerate(dataloader):
                image = image.to(device)
                output = model(image).double()

                if loss_types[i] == "cross_entropy":
                    output_prob = F.softmax(output, dim=1)
                else:
                    output_prob = torch.sigmoid(output)

                if output_probs is None:
                    output_probs = to_np(output_prob)
                else:
                    output_probs = np.concatenate(
                        [output_probs, to_np(output_prob)], axis=0)
            if total_output_probs is None:
                total_output_probs = output_probs * model_weights[i]
            else:
                total_output_probs += (output_probs * model_weights[i])

        predict = np.argmax(total_output_probs, axis=1)

        return predict  # this return type should be a numpy array which has shape of (138343)
コード例 #7
0
def main_test():
    print('Running test...')
    torch.multiprocessing.set_sharing_strategy('file_system')
    model = Baseline()
    if args.use_swa:
        model = torch.optim.swa_utils.AveragedModel(model)
    model = torch.nn.DataParallel(model).cuda()
    # ckpt structure {epoch, state_dict, optimizer, best_corr}
    if args.resume and os.path.isfile(args.resume):
        print('Load checkpoint:', args.resume)
        ckpt = torch.load(args.resume)
        args.start_epoch = ckpt['epoch']
        best_corr = ckpt['best_corr']
        model.load_state_dict(ckpt['state_dict'])
        print('Loaded ckpt at epoch:', args.start_epoch)
    else:
        print('No model given. Abort!')
        exit(1)

    test_loader = torch.utils.data.DataLoader(
        dataset=EEV_Dataset(
            csv_path=None,
            vidmap_path=args.test_vidmap,
            image_feat_path=args.image_features,
            audio_feat_path=args.audio_features,
            mode='test',
            test_freq=args.test_freq
        ),
        batch_size=None, shuffle=False,
        num_workers=args.workers, pin_memory=False
    )

    model.eval()
    batch_time = AverageMeter()

    t_start = time.time()

    outputs = []
    with torch.no_grad():
        for i, (img_feat, au_feat, frame_count, vid) in enumerate(test_loader):
            img_feat = torch.stack(img_feat).cuda()
            au_feat = torch.stack(au_feat).cuda()
            assert len(au_feat.size()) == 3, 'bad auf %s' % (vid)
            output = model(img_feat, au_feat) # [Clip S 15]
            # rearrange and remove extra padding in the end
            output = rearrange(output, 'Clip S C -> (Clip S) C')
            output = torch.cat([output, output[-1:]]) # repeat the last frame to avoid missing 
            if args.train_freq < args.test_freq:
                # print('interpolating:', output.size()[0], frame_count)
                output = interpolate_output(output, args.train_freq, 6)
            # print('Interpolated:', output.size()[0], frame_count)
            # truncate extra frames
            assert output.size(0) >= frame_count, '{}/{}'.format(output.size(0), frame_count)
            output = output[:frame_count]
            outputs.append((vid, frame_count, output.cpu().detach().numpy()))

            # update statistics
            batch_time.update(time.time() - t_start)
            t_start = time.time()

            if i % args.print_freq == 0:
                output = ('Test: [{0}/{1}]\t'
                          'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                    i, len(test_loader), batch_time=batch_time))
                print(output)
    
    time_stamps = [0, 166666, 333333, 500000, 666666, 833333]
    time_step = 1000000 # time starts at 0
    header = 'Video ID,Timestamp (milliseconds),amusement,anger,awe,concentration,confusion,contempt,contentment,disappointment,doubt,elation,interest,pain,sadness,surprise,triumph\n'
   
    final_res = {}
    for vid, frame_count, out in outputs:# videos
        video_time = frame_count // 6 + 1
        # print('video', vid, video_time)
        entry_count = 0
        for t in range(video_time): # seconds
            for i in range(6): # frames
                timestamp = time_step * t + time_stamps[i]
                fcc = t * 6 + i
                if fcc >= frame_count:
                    continue
                # print('Frame count', frame_count)
                frame_output = out[fcc]
                frame_output = [str(x) for x in frame_output]
                temp = '{vid},{timestamp},'.format(vid=vid,timestamp=timestamp) + ','.join(frame_output) + '\n'
                # file.write(temp)
                if vid in final_res:
                    final_res[vid].append(temp)
                else:
                    final_res[vid] = [temp]
                entry_count += 1
        assert entry_count == frame_count
    # fixed for now
    missing = [('WKXrnB7alT8', 2919), ('o0ooW14pIa4', 3733), ('GufMoL_MuNE',2038), ('Uee0Tv1rTz8', 1316), ('ScvvOWtb04Q', 152), ('R9kJlLungmo', 3609),('QMW3GuohzzE', 822), ('fjJYTW2n6rk', 4108), ('rbTIMt0VcLw', 1084),('L9cdaj74kLo', 3678), ('l-ka23gU4NA', 1759)]
    for vid, length in missing:
        video_time = length // 6 + 1
        # print('video', vid, video_time)
        for t in range(video_time): # seconds
            for i in range(6): # frames
                timestamp = time_step * t + time_stamps[i]
                fcc = t * 6 + i
                if fcc >= length:
                    continue
                frame_output = ',0'*15
                temp = '{vid},{timestamp}'.format(vid=vid, timestamp=timestamp) + frame_output + '\n'
                # file.write(temp)
                if vid in final_res:
                    final_res[vid].append(temp)
                else:
                    final_res[vid] = [temp]
    print('Write test outputs...')
    with open('test_output.csv', 'w') as file:
        file.write(header)
        temp_vidmap = [x.strip().split(' ') for x in open(args.test_vidmap)]
        temp_vidmap = [x[0] for x in temp_vidmap]
        for vid in tqdm(temp_vidmap):
            for entry in final_res[vid]:
                file.write(entry)
コード例 #8
0
                output_prob = F.softmax(output, dim=1)
                predict_vector = np.argmax(to_numpy(output_prob), axis=1)
                label_vector = to_numpy(tags)
                bool_vector = predict_vector == label_vector
                accuracy = bool_vector.sum() / len(bool_vector)

                if batch_idx % args.log_interval == 0:
                    print(
                        'Batch {} / {}: Batch Loss {:.4f} / Batch Acc {:.2f}'.
                        format(batch_idx, len(train_loader), loss.item(),
                               accuracy))
                train_loss += loss.item() / len(train_loader)
                train_total_correct += bool_vector.sum()

            model.eval()
            valid_loss = 0
            valid_total_correct = 0

            with torch.no_grad():
                for batch_idx, (image, tags) in enumerate(valid_loader):
                    if use_gpu:
                        image = image.to(device)
                        tags = tags.to(device)

                    output = model(image)
                    loss = criterion(output, tags)

                    output_prob = F.softmax(output, dim=1)
                    predict_vector = np.argmax(to_numpy(output_prob), axis=1)
                    label_vector = to_numpy(tags)
コード例 #9
0
ファイル: submit.py プロジェクト: Qidian213/Emotion_challenge
    '6_5', '6_6', '6_7', '7_1', '7_2', '7_3', '7_4', '7_5', '7_6', '7_7'
]

transform_test = T.Compose([
    T.Resize([224, 224]),
    T.ToTensor(),
    T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

model_name = 'AlexNet'
model_path = './models/1_AlexNet_0.18444_14.pth'

model = Baseline(model='test', model_name=model_name)
model.load_param(model_path)
model = model.cuda()
model = model.eval()

records = open('./faces_224/anns/val_ld.txt').read().strip().split('\n')

result_file = open("predictions.txt", 'w')
with torch.no_grad():
    for rec in records:
        rec = rec.strip('\n').split()
        img_path = rec[0]

        landmark = rec[1:]
        landmark = np.array(list(map(float, landmark)), dtype=np.float32)
        landmark = torch.tensor(landmark, dtype=torch.float32).unsqueeze(0)

        img = Image.open(img_path).convert('RGB')
        img = transform_test(img).unsqueeze(0)