Exemplo n.º 1
0
def validate_model():
    # parse config
    args = parse_args()
    config = parse_config(args.config)
    val_config = merge_configs(config, 'test', vars(args))

    val_reader = KineticsReader(args.model_name.upper(), 'test',
                                val_config).create_reader()

    val_model = ECO.GoogLeNet(val_config['MODEL']['num_classes'],
                              val_config['MODEL']['seg_num'],
                              val_config['MODEL']['seglen'], 'RGB')

    model, _ = fluid.dygraph.load_dygraph(args.save_dir + '/ucf_model')
    val_model.load_dict(model)

    val_model.eval()

    acc_list = []
    for batch_id, data in enumerate(val_reader()):
        dy_x_data = np.array([x[0] for x in data]).astype('float32')
        y_data = np.array([[x[1]] for x in data]).astype('int64')

        img = fluid.dygraph.to_variable(dy_x_data)
        label = fluid.dygraph.to_variable(y_data)
        label.stop_gradient = True

        out, acc = val_model(img, label)
        if out is not None:
            acc_list.append(acc.numpy()[0])

    val_model.train()
    return np.mean(acc_list)
Exemplo n.º 2
0
def validate_model():
    # parse config
    args = parse_args()
    config = parse_config(args.config)
    val_config = merge_configs(config, 'test', vars(args))

    val_dataset = ECO_Dataset(args.model_name.upper(), val_config, mode='test')

    val_loader = paddle.io.DataLoader(val_dataset,
                                      places=paddle.CUDAPlace(0),
                                      batch_size=None,
                                      batch_sampler=None)

    val_model = ECO.GoogLeNet(val_config['MODEL']['num_classes'],
                              val_config['MODEL']['seg_num'],
                              val_config['MODEL']['seglen'], 'RGB', 0.00002)

    model_dict = paddle.load(args.save_dir + '/ucf_model_hapi')
    val_model.set_state_dict(model_dict)

    val_model.eval()

    acc_list = []
    for batch_id, data in enumerate(val_loader()):

        img = data[0]
        label = data[1]

        out, acc = val_model(img, label)
        if out is not None:
            acc_list.append(acc.numpy()[0])

    val_model.train()
    return np.mean(acc_list)
Exemplo n.º 3
0
def eval(args):
    # parse config
    config = parse_config(args.config)
    val_config = merge_configs(config, 'test', vars(args))
    # print_configs(val_config, "test")

    val_model = ECO.GoogLeNet(val_config['MODEL']['num_classes'],
                              val_config['MODEL']['seg_num'],
                              val_config['MODEL']['seglen'], 'RGB')

    label_dic = np.load('label_dir.npy', allow_pickle=True).item()
    label_dic = {v: k for k, v in label_dic.items()}

    val_dataset = ECO_Dataset(args.model_name.upper(), val_config, mode='test')

    val_loader = paddle.io.DataLoader(val_dataset,
                                      places=paddle.CUDAPlace(0),
                                      batch_size=None,
                                      batch_sampler=None)

    if args.weights:
        weights = args.weights
    else:
        print("model path must be specified")
        exit()

    para_state_dict = paddle.load(weights)
    val_model.set_state_dict(para_state_dict)
    val_model.eval()

    acc_list = []
    for batch_id, data in enumerate(val_loader()):
        img = data[0]
        label = data[1]

        out, acc = val_model(img, label)
        acc_list.append(acc.numpy()[0])

    print("测试集准确率为:{}".format(np.mean(acc_list)))
Exemplo n.º 4
0
def test(args):
    config = parse_config(args.config)
    test_config = merge_configs(config, 'test', vars(args))
    # print_configs(test_config, "test")
    with fluid.dygraph.guard():
        test_model = ECO.GoogLeNet(test_config['MODEL']['num_classes'],
                                   test_config['MODEL']['seg_num'],
                                   test_config['MODEL']['seglen'], 'RGB')

        # get test reader
        test_reader = KineticsReader(args.model_name.upper(), 'test',
                                     test_config).create_reader()

        # if no weight files specified, exit()
        if args.weights:
            weights = args.weights
        else:
            print("model path must be specified")
            exit()

        para_state_dict, _ = fluid.load_dygraph(weights)
        test_model.load_dict(para_state_dict)
        test_model.eval()

        acc_list = []
        for batch_id, data in enumerate(test_reader()):
            dy_x_data = np.array([x[0] for x in data]).astype('float32')
            y_data = np.array([[x[1]] for x in data]).astype('int64')

            img = fluid.dygraph.to_variable(dy_x_data)
            label = fluid.dygraph.to_variable(y_data)
            label.stop_gradient = True

            out, acc = test_model(img, label)
            acc_list.append(acc.numpy()[0])

        print("The accuracy for test dataset is:{}".format(np.mean(acc_list)))
Exemplo n.º 5
0
def train(args):
    all_train_rewards = []
    all_test_rewards = []
    prev_result = 0
    # parse config
    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    with fluid.dygraph.guard(place):
        config = parse_config(args.config)
        train_config = merge_configs(config, 'train', vars(args))
        print_configs(train_config, 'Train')

        train_model = ECO.GoogLeNet(train_config['MODEL']['num_classes'],
                                    train_config['MODEL']['seg_num'],
                                    train_config['MODEL']['seglen'], 'RGB')
        opt = fluid.optimizer.Momentum(
            0.001,
            0.9,
            parameter_list=train_model.parameters(),
            use_nesterov=True,
            regularization=fluid.regularizer.L2Decay(
                regularization_coeff=0.0005))

        if args.pretrain:
            model, _ = fluid.dygraph.load_dygraph('trained_model/best_model')
            train_model.load_dict(model)

        # build model
        if not os.path.exists(args.save_dir):
            os.makedirs(args.save_dir)

        # get reader
        train_reader = KineticsReader(args.model_name.upper(), 'train',
                                      train_config).create_reader()

        epochs = args.epoch or train_model.epoch_num()

        train_model.train()

        for i in range(epochs):
            for batch_id, data in enumerate(train_reader()):
                dy_x_data = np.array([x[0] for x in data]).astype('float32')
                y_data = np.array([[x[1]] for x in data]).astype('int64')

                img = fluid.dygraph.to_variable(dy_x_data)
                label = fluid.dygraph.to_variable(y_data)
                label.stop_gradient = True

                out, acc = train_model(img, label)

                if out is not None:

                    loss = fluid.layers.cross_entropy(out, label)
                    avg_loss = fluid.layers.mean(loss)

                    avg_loss.backward()

                    opt.minimize(avg_loss)
                    train_model.clear_gradients()

                    if batch_id % 200 == 0:
                        print("Loss at epoch {} step {}: {}, acc: {}".format(
                            i, batch_id, avg_loss.numpy(), acc.numpy()))
                        fluid.dygraph.save_dygraph(
                            train_model.state_dict(),
                            args.save_dir + '/ucf_model')
                        result = validate_model()

                        all_test_rewards.append(result)
                        if result > prev_result:
                            prev_result = result
                            print('The best result is ' + str(result))
                            fluid.save_dygraph(train_model.state_dict(),
                                               'trained_model/best_model')
                            np.savez('result_data/ucf_data.npz',
                                     all_train_rewards=all_train_rewards,
                                     all_test_rewards=all_test_rewards)

            all_train_rewards.append(acc.numpy())

        logger.info("Final loss: {}".format(avg_loss.numpy()))
        print("Final loss: {}".format(avg_loss.numpy()))

        np.savez('result_data/ucf_data.npz',
                 all_train_rewards=all_train_rewards,
                 all_test_rewards=all_test_rewards)
Exemplo n.º 6
0
def train(args):
    all_train_rewards = []
    all_test_rewards = []
    prev_result = 0

    config = parse_config(args.config)
    train_config = merge_configs(config, 'train', vars(args))
    print_configs(train_config, 'Train')

    train_model = ECO.GoogLeNet(train_config['MODEL']['num_classes'],
                                train_config['MODEL']['seg_num'],
                                train_config['MODEL']['seglen'], 'RGB',
                                0.00002)
    opt = paddle.optimizer.Momentum(0.001,
                                    0.9,
                                    parameters=train_model.parameters())

    if args.pretrain:
        # load the pretrained model
        model_dict = paddle.load('best_model/best_model_seg12')

        train_model.set_state_dict(model_dict)

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    train_dataset = ECO_Dataset(args.model_name.upper(),
                                train_config,
                                mode='train')

    train_loader = paddle.io.DataLoader(train_dataset,
                                        places=paddle.CUDAPlace(0),
                                        batch_size=None,
                                        batch_sampler=None)

    epochs = args.epoch or train_model.epoch_num()

    train_model.train()

    for i in range(epochs):

        for batch_id, data in enumerate(train_loader()):

            img = data[0]
            label = data[1]

            out, acc = train_model(img, label)

            if out is not None:

                loss = paddle.nn.functional.cross_entropy(out, label)
                avg_loss = paddle.mean(loss)

                avg_loss.backward()

                opt.minimize(avg_loss)
                train_model.clear_gradients()

                if batch_id % 200 == 0:
                    print("Loss at epoch {} step {}: {}, acc: {}".format(
                        i, batch_id, avg_loss.numpy(), acc.numpy()))
                    paddle.save(train_model.state_dict(),
                                args.save_dir + '/ucf_model_hapi')
        all_train_rewards.append(acc.numpy())

        result = validate_model()

        all_test_rewards.append(result)
        if result > prev_result:
            prev_result = result
            print('The best result is ' + str(result))
            paddle.save(train_model.state_dict(),
                        'best_model/final_best_model_hapi')  #保存模型
    logger.info("Final loss: {}".format(avg_loss.numpy()))
    print("Final loss: {}".format(avg_loss.numpy()))

    np.savez('result/final_ucf_data_hapi.npz',
             all_train_rewards=all_train_rewards,
             all_test_rewards=all_test_rewards)