Ejemplo n.º 1
0
def run_nag(dataset, loss, alpha=1, regularizer=None, verbose=False, eta=0.01):
    #normalized gradient from langford paper of     print("dataset %s using model lineara,  normalized gradient algorithm with %s-regularized %s loss."%(dataset,regularizer,loss))

    from models.linear_model import LinearModel
    m = LinearModel(2)

    if loss == "squared_loss":
        from losses.squared_loss import SquaredLoss
        ls = SquaredLoss(m)
    elif loss == "abs_loss":
        from losses.abs_loss import AbsLoss
        ls = AbsLoss(m)
    else:
        raise ValueError("no valid loss specified")

    if regularizer == "l2":
        from losses.regularized_loss import RegularizedLoss
        from losses.regularizations.l2 import L2
        l = RegularizedLoss(m, ls, L2(), alpha)
    elif regularizer == "l1":
        from losses.regularized_loss import RegularizedLoss
        from losses.regularizations.l1 import L1
        l = RegularizedLoss(m, ls, L1(), alpha)
    elif regularizer == None:
        l = ls
    else:
        raise ValueError("invalid regularization specified")

    from algos.nag import NAG
    alg = NAG(m, l, eta, verbose=verbose)

    run_data("datasets/%s" % dataset, alg)
    print("The parameter vector after training is")
    print(m.get_param_vector())
    del m, ls, l, alg
Ejemplo n.º 2
0
    def test_hl_comparison(self):
        np.random.seed(4509)
        n = 100
        x_ = np.random.normal(size=(n, 4))
        y_ = x_[:, 0] + 0.3 * x_[:, 3] + 0.5 * np.random.normal(size=n)

        rtol = 1e-4
        for scale in [True, False]:
            for loss in LOSS_TYPES:
                np.testing.assert_allclose(
                    LinearModel(loss_type=loss, quantile=0.3,
                                scale=scale).fit(x_, y_)._weights,
                    fit_linear_lbfgs(x_, y_, loss_type=loss, scale=scale,
                                     quantile=0.3)[0],
                    rtol=rtol)

        np.testing.assert_allclose(
            LinearModel(loss_type='quantile',
                        quantile=0.3).fit(x_, y_)._weights,
            fit_linear_lbfgs(x_, y_, loss_type='quantile', quantile=0.3)[0],
            rtol=rtol)
        np.testing.assert_allclose(
            LinearModel(loss_type='quantile',
                        quantile=0.7).fit(x_, y_)._weights,
            fit_linear_lbfgs(x_, y_, loss_type='quantile', quantile=0.7)[0],
            rtol=rtol)
        np.testing.assert_allclose(
            LinearModel(loss_type='l2', scale=False).fit(x_, y_)._weights,
            fit_linear_lbfgs(x_, y_, loss_type='l2')[0], rtol=rtol)

        np.testing.assert_allclose(
            LinearModel(loss_type='l2', scale=False).fit(x_, y_)._bias,
            fit_linear_lbfgs(x_, y_, loss_type='l2')[2], rtol=rtol)
Ejemplo n.º 3
0
def run_nag(dataset,loss,alpha=1,regularizer=None,verbose=False,eta=0.01):
    #normalized gradient from langford paper of     print("dataset %s using model lineara,  normalized gradient algorithm with %s-regularized %s loss."%(dataset,regularizer,loss))

    from models.linear_model import LinearModel
    m=LinearModel(2)

    if loss=="squared_loss":
        from losses.squared_loss import SquaredLoss
        ls=SquaredLoss(m)
    elif loss=="abs_loss":
        from losses.abs_loss import AbsLoss
        ls=AbsLoss(m)
    else:
        raise ValueError("no valid loss specified")

    if regularizer=="l2":
        from losses.regularized_loss import RegularizedLoss
        from losses.regularizations.l2 import L2
        l=RegularizedLoss(m,ls,L2(),alpha)
    elif regularizer=="l1":
        from losses.regularized_loss import RegularizedLoss
        from losses.regularizations.l1 import L1
        l=RegularizedLoss(m,ls,L1(),alpha)
    elif regularizer==None:
        l=ls
    else:
        raise ValueError("invalid regularization specified")

    from algos.nag import NAG
    alg=NAG(m,l,eta,verbose=verbose)

    run_data("datasets/%s"%dataset,alg)
    print("The parameter vector after training is")
    print(m.get_param_vector())
    del m, ls, l, alg
Ejemplo n.º 4
0
def run_ssgd(dataset,
             loss,
             model="linear",
             alpha=1,
             regularizer=None,
             verbose=False,
             scaler="scale_mean0",
             replay=False,
             period1=1,
             period2=1):
    #regularized sgd
    from algos.ssgd import SSGD
    print(
        "dataset %s using model %s, %s-regularized sgd squared loss and scaling %s. eta=0.01 and replay=%s"
        % (dataset, model, regularizer, scaler, replay))

    if model == "linear":
        from models.linear_model import LinearModel
        m = LinearModel(2)
    elif model == "affine":
        from models.affine_model import AffineModel
        m = AffineModel(2)
    else:
        raise ValueError("no valid model specified")

    if loss == "squared_loss":
        from losses.squared_loss import SquaredLoss
        ls = SquaredLoss(m)
    elif loss == "abs_loss":
        from losses.abs_loss import AbsLoss
        ls = AbsLoss(m)
    else:
        raise ValueError("no valid loss specified")

    if regularizer == "l2":
        from losses.regularized_loss import RegularizedLoss
        from losses.regularizations.l2 import L2
        l = RegularizedLoss(m, ls, L2(), alpha)
    elif regularizer == "l1":
        from losses.regularized_loss import RegularizedLoss
        from losses.regularizations.l1 import L1
        l = RegularizedLoss(m, ls, L1(), alpha)
    elif regularizer == None:
        l = ls
    else:
        raise ValueError("invalid regularization specified")

    alg = SSGD(m, l, 1, verbose=verbose, scaler=scaler)

    run_data("datasets/%s" % dataset, alg, replay=replay)
    print("The parameter vector after training is")
    print(m.get_param_vector())
    del m, ls, l, alg
Ejemplo n.º 5
0
def get_model(opt, input_size):
    '''
    Setup the model, accordingly to the option
    :param opt: the option, which contains info to generate the network
    :param input_size: the size of the inputs which the model will expect,
    :return: a full constructed model
    '''
    if opt['model'] == 'Linear':
        input_shape = 1
        for l in input_size:
            input_shape *= l
        layers = [(input_shape, opt['hidden_units'], True), ['relu'],
                  (opt['hidden_units'], opt['hidden_units'], True),
                  ['softmax'], (opt['hidden_units'], 2, True), ['softmax']]
        linear = LinearModel(opt, layers)
        return linear
    elif opt['model'] == 'Recurrent':
        rec = RecurrentModel(opt, input_size)
        return rec
    elif opt['model'] == 'AttentionRecurrent':
        raise NotImplementedError(
            'AttentionalRecurrent model is not finished yet.')
        arec = AttentionalRecurrentModel(opt, input_size)
        return arec
    elif opt['model'] == 'Convolutional':
        convo = ConvolutionalModel(opt, input_size)
        return convo
    elif opt['model'] == 'BiConvolutional':
        biconvo = BiConvolutionalModel(opt, input_size)
        return biconvo
    elif opt['model'] == 'Sequential':
        return None
    else:
        raise NotImplementedError('This model has not been yet implemented.')
Ejemplo n.º 6
0
def run_sgd(dataset,loss,model="linear",alpha=1,regularizer=None,verbose=False):
    #regularized sgd
    from algos.sgd import SGD
    print("dataset %s using model %s, %s-regularized sgd squared loss. eta=0.01"%(dataset,model,regularizer))

    if model=="linear":
        from models.linear_model import LinearModel
        m=LinearModel(2)
    elif model=="affine":
        from models.affine_model import AffineModel
        m=AffineModel(2)
    else:
        raise ValueError("no valid model specified")

    if loss=="squared_loss":
        from losses.squared_loss import SquaredLoss
        ls=SquaredLoss(m)
    elif loss=="abs_loss":
        from losses.abs_loss import AbsLoss
        ls=AbsLoss(m)
    else:
        raise ValueError("no valid loss specified")

    if regularizer=="l2":
        from losses.regularized_loss import RegularizedLoss
        from losses.regularizations.l2 import L2
        l=RegularizedLoss(m,ls,L2(),alpha)
    elif regularizer=="l1":
        from losses.regularized_loss import RegularizedLoss
        from losses.regularizations.l1 import L1
        l=RegularizedLoss(m,ls,L1(),alpha)
    elif regularizer==None:
        l=ls
    else:
        raise ValueError("invalid regularization specified")

    alg=SGD(m,l,1,verbose=verbose)

    run_data("datasets/%s"%dataset,alg)
    print("The parameter vector after training is")
    print(m.get_param_vector())
    del m, ls, l, alg
Ejemplo n.º 7
0
    def build_dual_from_max_primal(self) -> LinearModel:
        # Building FO
        variables = []
        fo_coefficients = [i[0] for i in self.primal.b]
        fo_coefficients_variables = []
        for index, c in enumerate(self.primal.constraints):
            var = None
            if c.equality_operator == '<=':
                var = Variable(name='y{0}'.format(index + 1),
                               initial_index=index)
            elif c.equality_operator == '>=':
                var = Variable(name='y{0}'.format(index + 1),
                               initial_index=index,
                               constraint=VariableConstraint.Negative)
            elif c.equality_operator == '=':
                var = Variable(name='y{0}'.format(index + 1),
                               initial_index=index,
                               constraint=VariableConstraint.Unrestricted)
            variables.append(var)
            fo_coefficients_variables.append((fo_coefficients[index], var))

        fo = ObjectiveFunction('min', fo_coefficients_variables)

        # Building Constraints
        constraints_inequalities = []
        for v in self.primal.fo.variables:
            if v.non_positive:
                constraints_inequalities.append('<=')
            elif v.free:
                constraints_inequalities.append('=')
            else:
                constraints_inequalities.append('>=')

        constraints = []
        At = self.primal.A.transpose()
        right_side = self.primal.fo.coefficients
        _i = 0
        for row in At:
            const_coefficients_variables = []
            for index, v in enumerate(variables):
                const_coefficients_variables.append((row[index], v))
            constraint = Constraint(
                name='R{0}'.format(_i + 1),
                coefficients_variables=const_coefficients_variables,
                equality_operator=constraints_inequalities[_i],
                right_side=right_side[_i])
            constraints.append(constraint)
            _i = _i + 1

        return LinearModel(objective_function=fo,
                           constraints_list=constraints,
                           name=self.primal.name + '- Dual')
Ejemplo n.º 8
0
    def test_hl_large(self):
        np.random.seed(4509)
        n = 1_000_000
        x_ = np.random.normal(size=(n, 100))
        y_ = x_[:, 0] + 0.3 * x_[:, 3] + 0.5 * np.random.normal(size=n)
        y_ += x_[:, 5] * (0.3 + x_[:, 6]) * 0.3
        y_ += 0.03 * x_[:, 10]

        t0 = time.time()
        linear_model = LinearModel(loss_type='l2', l1_w=0.01, scale=True).fit(x_, y_)
        print('time:', time.time() - t0)
        print(linear_model._weights.ravel())

        t0 = time.time()
        lm = Lasso(alpha=0.1).fit(x_, y_)
        print('time:', time.time() - t0)
        print(lm.coef_)
Ejemplo n.º 9
0
def create_net(task_config, projector_config):

    model = task_config['model']
    output_size = task_config['output_size']
    context_size = projector_config.get('context_size', None)
    block_in = projector_config.get('block_in', None)
    block_out = projector_config.get('block_out', None)

    print("Creating", model)

    if context_size > 0:
        hyper = True
        hyperlayers = ['conv2']
    else:
        hyper = False
        hyperlayers = []

    if model == 'deepbind':
        num_filters = task_config.get('num_filters', 16)
        hidden_dim = task_config.get('hidden_dim', 32)
        net = DeepBind(context_size,
                       block_in,
                       block_out, {'context_size': 100},
                       hyper,
                       filters=num_filters,
                       hidden_units=hidden_dim)

    elif model == 'linear_model':
        input_size = task_config.get('input_dim', 20)
        net = LinearModel(context_size,
                          block_in,
                          block_out,
                          input_dim=input_size,
                          output_dim=output_size,
                          hyper=hyper)

    elif model == 'lstm_language_model':
        layer_size = task_config.get('layer_size', 32)
        net = LSTMLanguageModel(context_size,
                                block_in,
                                block_out,
                                ninp=layer_size,
                                nhid=layer_size,
                                hyper=hyper)

    elif model == 'wide_resnet':
        N = task_config.get('N', 6)
        k = task_config.get('k', 1)
        num_classes = output_size
        net = WideResnet(context_size, block_in, block_out, N, k, num_classes,
                         hyper)

    elif model == 'lenet':
        if context_size > 0:
            hyperlayers = ['conv2', 'fc1', 'fc2']
        net = LeNet(context_size, block_in, block_out, hyperlayers)

    else:
        print("Please select a valid model kind")
        sys.exit(0)

    return net
Ejemplo n.º 10
0
from models.linear_model import LinearModel

print('Part 2: Initializing training with regularization.\n')

# Ignore overflows from learning rates with exploding gradient.
np.seterr(all='ignore')

# Training - Part 2, adjusting regularization parameter to investigate effect on the model.

lambdas = sorted([10**x for x in range(-3, 3)])
rate = 1e-05
for lam in lambdas:
    model = LinearModel(train='data/PA1_train.pkl',
                        validation='data/PA1_dev.pkl',
                        test='data/PA1_test.pkl',
                        target='price',
                        rate=rate,
                        lam=lam,
                        eps=2.5,
                        normalize=True)

    learned_model = model.train_model(max_iter=10000)

    if learned_model['exploding'] is False and learned_model[
            'convergence'] is True:
        print('Training complete.')

    # Save output for learned model to .json file.
    filename = 'rate_' + str('{:.0E}'.format(rate)) + '_lam_' + str(
        '{:.0E}'.format(lam))
    my_path = Path('..', 'model_output', 'part_2', filename + '_train.json')
    train_path = Path(__file__).parent.resolve().joinpath(my_path)
Ejemplo n.º 11
0
    def branch_and_bound(self):
        # Relaxed Solution
        relaxed_solution = self.solve_two_phase(self.model)
        relaxed_branch = Branch(relaxed_solution)
        if relaxed_branch.has_only_integers or not relaxed_branch.needs_branching or len(
                relaxed_branch.needs_branching) <= 0:
            self.solution = relaxed_solution
            self.best_solution = relaxed_solution
            print(
                '[WARNING]: Branch and Bound relaxed solution only contained integers.'
            )
            return

        # Global variables
        possible_solutions = []
        global_fo = 0
        best_solution = None
        base_constraints = self.model.constraints
        iteration = 0
        has_finished = False

        # Branch and Bound
        v = relaxed_branch.variable_to_branch
        needs_further_branching = []
        loop_constraints = base_constraints
        parent_branch = relaxed_branch
        __i__ = 1
        while not has_finished:
            coefficients_variables = [
                (0, _v[1])
                if _v[1].id != parent_branch.variable_to_branch[1].id else
                (1, _v[1]) for _v in parent_branch.solution.decision_variables
            ]
            lower_bound = floor(v[0])
            upper_bound = ceil(v[0])
            c_down = Constraint(coefficients_variables, '<=', lower_bound)
            c_up = Constraint(coefficients_variables, '>=', upper_bound)
            const_down = deepcopy(loop_constraints)
            const_up = deepcopy(loop_constraints)
            const_down.append(c_down)
            const_up.append(c_up)
            l_down = LinearModel(objective_function=self.model.fo,
                                 constraints_list=const_down,
                                 name='Branch {0}'.format(iteration))
            iteration = iteration + 1
            l_up = LinearModel(objective_function=self.model.fo,
                               constraints_list=const_up,
                               name='Branch {0}'.format(iteration))
            iteration = iteration + 1
            s_up = self.solve_two_phase(l_up)
            s_down = self.solve_two_phase(l_down)
            b_down = Branch(solution=s_down)
            b_down.constraints = const_down
            parent_branch.children.append(b_down)
            b_up = Branch(solution=s_up)
            b_up.constraints = const_up
            parent_branch.children.append(b_up)

            if b_down.feasible:
                if b_down.feasible and b_down.has_only_integers:
                    possible_solutions.append(b_down)
                    if b_down.fo_value > global_fo:
                        global_fo = b_down.fo_value
                        best_solution = b_down
                else:
                    needs_further_branching.append(b_down)

            if b_up.feasible:
                if b_up.has_only_integers:
                    possible_solutions.append(b_up)
                    if b_up.fo_value > global_fo:
                        global_fo = b_up.fo_value
                        best_solution = b_up
                else:
                    needs_further_branching.append(b_up)

            if needs_further_branching and len(needs_further_branching) > 0:
                needs_further_branching = sorted(needs_further_branching,
                                                 key=lambda _b: _b.fo_value,
                                                 reverse=True)
                possible_next_branch = needs_further_branching[0]
                if possible_next_branch.fo_value > global_fo:
                    v = possible_next_branch.variable_to_branch
                    loop_constraints = possible_next_branch.constraints
                    needs_further_branching.pop(0)
                    parent_branch = possible_next_branch
                    __i__ += 1
                else:
                    has_finished = True
                    self.branch_tree = BranchTree(root_branch=relaxed_branch)
                    self.best_solution = best_solution
                    self.all_solutions = possible_solutions
            else:
                has_finished = True
                self.branch_tree = BranchTree(root_branch=relaxed_branch)
                self.best_solution = best_solution
                self.all_solutions = possible_solutions
Ejemplo n.º 12
0
TEST_DATASET_SIZE = 100

data = np.genfromtxt('data.csv', delimiter=',')
weights = data[1:, 2:3]
heights = data[1:, 1:2]
r = np.corrcoef(weights.flatten(), heights.flatten())[0, 1]
print("Weights - Heights correlation coefficient: ", r)

max_r_2 = 0.
best_handler = None

rrh = ReadyRegressionHandler(TEST_DATASET_SIZE)
rrh.run(weights, heights)

linear_model = LinearModel()
crh = CustomRegressionHandler(linear_model, TEST_DATASET_SIZE)
crh.run(weights, heights)
max_r_2, best_handler = crh.compare(max_r_2, best_handler)

parabolic_model = ParabolicModel()
crh = CustomRegressionHandler(parabolic_model, TEST_DATASET_SIZE)
crh.run(weights, heights)
max_r_2, best_handler = crh.compare(max_r_2, best_handler)

exponential_model = ExponentialModel()
crh = CustomRegressionHandler(exponential_model, TEST_DATASET_SIZE)
crh.run(weights, heights)
max_r_2, best_handler = crh.compare(max_r_2, best_handler)

power_model = PowerModel()
Ejemplo n.º 13
0
train_data = np.load("./datasets/linear_train.npy")
test_x = np.load("./datasets/linear_test_x.npy")

# tf 형식에 맞게 변환
x_data = np.expand_dims(train_data[:, 0], axis=1)
y_data = train_data[:, 1]

# 학습 데이터 시각화
plt.scatter(x_data,
            y_data,
            c=np.random.random(len(y_data)),
            cmap=plt.cm.rainbow)
plt.show()

# 모델 생성
model = LinearModel(num_units=1)

# 최적화 함수, 손실함수와 모델 바인딩
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
              loss=tf.keras.losses.MSE,
              metrics=[tf.keras.metrics.MeanSquaredError()])

# 모델 학습
model.fit(x=x_data, y=y_data, epochs=10, batch_size=32)

# 모델 테스트
prediction = model.predict(x=test_x, batch_size=None)

# 결과 시각화
plt.scatter(x_data,
            y_data,
Ejemplo n.º 14
0
def main(args):
    print('==> Using settings {}'.format(args))

    print('==> Loading dataset...')
    dataset_path = path.join('data', 'data_3d_' + args.dataset + '.npz')
    if args.dataset == 'h36m':
        from common.h36m_dataset import Human36mDataset, TRAIN_SUBJECTS, TEST_SUBJECTS
        dataset = Human36mDataset(dataset_path)
        subjects_train = TRAIN_SUBJECTS
        subjects_test = TEST_SUBJECTS
    else:
        raise KeyError('Invalid dataset')

    print('==> Preparing data...')
    dataset = read_3d_data(dataset)

    print('==> Loading 2D detections...')
    keypoints = create_2d_data(path.join('data', 'data_2d_' + args.dataset + '_' + args.keypoints + '.npz'), dataset)

    action_filter = None if args.actions == '*' else args.actions.split(',')
    if action_filter is not None:
        action_filter = map(lambda x: dataset.define_actions(x)[0], action_filter)
        print('==> Selected actions: {}'.format(action_filter))

    stride = args.downsample
    cudnn.benchmark = True
    device = torch.device("cpu")        ############################# cuda!!!!!!!!!!!!!!!!!!!!!!!!!!!!

    # Create model
    print("==> Creating model...")
    num_joints = dataset.skeleton().num_joints()
    model_pos = LinearModel(num_joints * 2, (num_joints - 1) * 3).to(device)
    model_pos.apply(init_weights)
    print("==> Total parameters: {:.2f}M".format(sum(p.numel() for p in model_pos.parameters()) / 1000000.0))


    criterion = nn.MSELoss(reduction='mean').to(device)
    optimizer = torch.optim.Adam(model_pos.parameters(), lr=args.lr)

    # Optionally resume from a checkpoint
    if args.resume or args.evaluate:
        ckpt_path = (args.resume if args.resume else args.evaluate)

        if path.isfile(ckpt_path):
            print("==> Loading checkpoint '{}'".format(ckpt_path))
            ckpt = torch.load(ckpt_path, map_location='cpu')
            start_epoch = ckpt['epoch']
            error_best = ckpt['error']
            glob_step = ckpt['step']
            lr_now = ckpt['lr']
            model_pos.load_state_dict(ckpt['state_dict'])
            optimizer.load_state_dict(ckpt['optimizer'])
            print("==> Loaded checkpoint (Epoch: {} | Error: {})".format(start_epoch, error_best))

            if args.resume:
                ckpt_dir_path = path.dirname(ckpt_path)
                logger = Logger(path.join(ckpt_dir_path, 'log.txt'), resume=True)
        else:
            raise RuntimeError("==> No checkpoint found at '{}'".format(ckpt_path))
    else:
        start_epoch = 0
        error_best = None
        glob_step = 0
        lr_now = args.lr
        ckpt_dir_path = path.join(args.checkpoint, datetime.datetime.now().isoformat())

        if not path.exists(ckpt_dir_path):
            os.makedirs(ckpt_dir_path)
            print('==> Making checkpoint dir: {}'.format(ckpt_dir_path))

        logger = Logger(os.path.join(ckpt_dir_path, 'log.txt'))
        logger.set_names(['epoch', 'lr', 'loss_train', 'error_eval_p1', 'error_eval_p2'])

    if args.evaluate:
        print('==> Evaluating...')

        if action_filter is None:
            action_filter = dataset.define_actions()

        errors_p1 = np.zeros(len(action_filter))
        errors_p2 = np.zeros(len(action_filter))

        for i, action in enumerate(action_filter):
            poses_valid, poses_valid_2d, actions_valid = fetch(subjects_test, dataset, keypoints, [action], stride)
            valid_loader = DataLoader(PoseGenerator(poses_valid, poses_valid_2d, actions_valid),
                                      batch_size=args.batch_size, shuffle=False,
                                      num_workers=args.num_workers, pin_memory=True)
            errors_p1[i], errors_p2[i] = evaluate(valid_loader, model_pos, device)

        print('Protocol #1   (MPJPE) action-wise average: {:.2f} (mm)'.format(np.mean(errors_p1).item()))
        print('Protocol #2 (P-MPJPE) action-wise average: {:.2f} (mm)'.format(np.mean(errors_p2).item()))
        exit(0)

    poses_train, poses_train_2d, actions_train = fetch(subjects_train, dataset, keypoints, action_filter, stride)
    train_loader = DataLoader(PoseGenerator(poses_train, poses_train_2d, actions_train), batch_size=args.batch_size,
                              shuffle=True, num_workers=args.num_workers, pin_memory=True)

    poses_valid, poses_valid_2d, actions_valid = fetch(subjects_test, dataset, keypoints, action_filter, stride)
    valid_loader = DataLoader(PoseGenerator(poses_valid, poses_valid_2d, actions_valid), batch_size=args.batch_size,
                              shuffle=False, num_workers=args.num_workers, pin_memory=True)

    for epoch in range(start_epoch, args.epochs):
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr_now))

        # Train for one epoch
        epoch_loss, lr_now, glob_step = train(train_loader, model_pos, criterion, optimizer, device, args.lr, lr_now,
                                              glob_step, args.lr_decay, args.lr_gamma, max_norm=args.max_norm)

        # Evaluate
        error_eval_p1, error_eval_p2 = evaluate(valid_loader, model_pos, device)

        # Update log file
        logger.append([epoch + 1, lr_now, epoch_loss, error_eval_p1, error_eval_p2])

        # Save checkpoint
        if error_best is None or error_best > error_eval_p1:
            error_best = error_eval_p1
            save_ckpt({'epoch': epoch + 1, 'lr': lr_now, 'step': glob_step, 'state_dict': model_pos.state_dict(),
                       'optimizer': optimizer.state_dict(), 'error': error_eval_p1}, ckpt_dir_path, suffix='best')

        if (epoch + 1) % args.snapshot == 0:
            save_ckpt({'epoch': epoch + 1, 'lr': lr_now, 'step': glob_step, 'state_dict': model_pos.state_dict(),
                       'optimizer': optimizer.state_dict(), 'error': error_eval_p1}, ckpt_dir_path)

    logger.close()
    logger.plot(['loss_train', 'error_eval_p1'])
    savefig(path.join(ckpt_dir_path, 'log.eps'))

    return
Ejemplo n.º 15
0
def main(args):
    print('==> Using settings {}'.format(args))

    convm = torch.zeros(3, 17, 17, dtype=torch.float)

    print('==> Loading dataset...')
    dataset_path = path.join('data', 'data_3d_' + args.dataset + '.npz')
    if args.dataset == 'h36m':
        from common.h36m_dataset import Human36mDataset
        dataset = Human36mDataset(dataset_path)
    else:
        raise KeyError('Invalid dataset')

    print('==> Preparing data...')
    dataset = read_3d_data(dataset)

    print('==> Loading 2D detections...')
    keypoints = create_2d_data(
        path.join('data',
                  'data_2d_' + args.dataset + '_' + args.keypoints + '.npz'),
        dataset)

    cudnn.benchmark = True
    device = torch.device("cuda")

    # Create model
    print("==> Creating model...")

    if args.architecture == 'linear':
        from models.linear_model import LinearModel, init_weights
        num_joints = dataset.skeleton().num_joints()
        model_pos = LinearModel(num_joints * 2,
                                (num_joints - 1) * 3).to(device)
        model_pos.apply(init_weights)
    elif args.architecture == 'gcn':
        from models.sem_gcn import SemGCN
        from common.graph_utils import adj_mx_from_skeleton
        p_dropout = (None if args.dropout == 0.0 else args.dropout)
        adj = adj_mx_from_skeleton(dataset.skeleton())
        model_pos = SemGCN(convm,
                           adj,
                           args.hid_dim,
                           num_layers=args.num_layers,
                           p_dropout=p_dropout,
                           nodes_group=dataset.skeleton().joints_group()
                           if args.non_local else None).to(device)
    else:
        raise KeyError('Invalid model architecture')

    print("==> Total parameters: {:.2f}M".format(
        sum(p.numel() for p in model_pos.parameters()) / 1000000.0))

    # Resume from a checkpoint
    ckpt_path = args.evaluate

    if path.isfile(ckpt_path):
        print("==> Loading checkpoint '{}'".format(ckpt_path))
        ckpt = torch.load(ckpt_path)
        start_epoch = ckpt['epoch']
        error_best = ckpt['error']
        model_pos.load_state_dict(ckpt['state_dict'])
        print("==> Loaded checkpoint (Epoch: {} | Error: {})".format(
            start_epoch, error_best))
    else:
        raise RuntimeError("==> No checkpoint found at '{}'".format(ckpt_path))

    print('==> Rendering...')

    poses_2d = keypoints[args.viz_subject][args.viz_action]
    out_poses_2d = poses_2d[args.viz_camera]
    out_actions = [args.viz_camera] * out_poses_2d.shape[0]

    poses_3d = dataset[args.viz_subject][args.viz_action]['positions_3d']
    assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
    out_poses_3d = poses_3d[args.viz_camera]

    ground_truth = dataset[args.viz_subject][args.viz_action]['positions_3d'][
        args.viz_camera].copy()

    input_keypoints = out_poses_2d.copy()
    render_loader = DataLoader(PoseGenerator([out_poses_3d], [out_poses_2d],
                                             [out_actions]),
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=args.num_workers,
                               pin_memory=True)

    prediction = evaluate(render_loader, model_pos, device,
                          args.architecture)[0]

    # Invert camera transformation
    cam = dataset.cameras()[args.viz_subject][args.viz_camera]
    prediction = camera_to_world(prediction, R=cam['orientation'], t=0)
    prediction[:, :, 2] -= np.min(prediction[:, :, 2])
    ground_truth = camera_to_world(ground_truth, R=cam['orientation'], t=0)
    ground_truth[:, :, 2] -= np.min(ground_truth[:, :, 2])

    anim_output = {'Regression': prediction, 'Ground truth': ground_truth}
    input_keypoints = image_coordinates(input_keypoints[..., :2],
                                        w=cam['res_w'],
                                        h=cam['res_h'])
    render_animation(input_keypoints,
                     anim_output,
                     dataset.skeleton(),
                     dataset.fps(),
                     args.viz_bitrate,
                     cam['azimuth'],
                     args.viz_output,
                     limit=args.viz_limit,
                     downsample=args.viz_downsample,
                     size=args.viz_size,
                     input_video_path=args.viz_video,
                     viewport=(cam['res_w'], cam['res_h']),
                     input_video_skip=args.viz_skip)
Ejemplo n.º 16
0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

from models.linear_model import LinearModel

# 데이터 불러오기
train_data = np.load(".\\datasets\\linear_train.npy")
# test_x = np.load(".\\datasets\\linear_test_x.npy")

# tf 형식에 맞게 변환
x_data = np.expand_dims(train_data[:, 0], axis=1)  #train_data의 x값만 따로 저장
y_data = train_data[:, 1]  #train_data의 y값만 따로 저장

# 모델 생성
model = LinearModel(num_units=1)

# 최적화 함수, 손실함수와 모델 바인딩
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
              loss=tf.keras.losses.MSE,
              metrics=[tf.keras.metrics.MeanSquaredError()])

# SGD(Stochastic Gradient Descent) : 확률적 경사 하강법
#  >> 입력 데이터가 확률적으로 선택된다.
# loss : MSE(Mean Square, Error비용함수)
# MeanSquaredError : 실제결과값과 예상값의 차이(error)

# 모델 정리
model.summary()
#load 종료
train_data.close()
Ejemplo n.º 17
0
    Used to test instance of LinearModel() class.
"""

import pathlib
import pickle
import pprint

from models.linear_model import LinearModel

pp = pprint.PrettyPrinter()

model = LinearModel(train='data/PA1_train.pkl',
                    validation='data/PA1_dev.pkl',
                    test='data/PA1_test.pkl',
                    target='price',
                    rate=1e-05,
                    lam=0.001,
                    eps=0.5,
                    normalize=True)

names = model.weight_labels
learned_model = model.train_model(50000)
val_predictions = model.predict_validation(
    learned_model['weights'])['predictions']
test_predictions = model.predict_test(
    (learned_model['weights']))['predictions']

prediction_output = pathlib.Path('model_output/predictions.pkl')
prediction_file = pathlib.Path('model_output/predictions.txt')

pred_output_path = pathlib.Path(__file__).parent.resolve().joinpath(
Ejemplo n.º 18
0
        variable_leave_B = solver.B_variables[variable_leave_B_index]
        variable_join_N = solver.N_variables[variable_join_N_index]

        solver.B_variables[variable_leave_B_index] = variable_join_N
        solver.N_variables[variable_join_N_index] = variable_leave_B
        self.current_iteration = self.current_iteration + 1

        self.__solve_lp__(__solver__=solver)


if __name__ == '__main__':
    x1 = Variable(name='x1')
    x2 = Variable(name='x2')
    x3 = Variable(name='x3')
    fo = ObjectiveFunction('min', [(1, x1), (-1, x2), (2, x3)])
    c1 = Constraint([(1, x1), (1, x2), (1, x3)], '=', 3)
    c2 = Constraint([(2, x1), (-1, x2), (3, x3)], '<=', 4)
    model = LinearModel(objective_function=fo, constraints_list=[c1, c2])
    p1 = Phase1(linear_model=model)
    initial_base = p1.find_base()
    p2 = Phase2(linear_model=model, base_indexes=p1.base_variables)
    p2.solve()

    print('Phase1 unit test passed')






Ejemplo n.º 19
0
from models.linear_model import LinearModel


# 데이터 불러오기
train_data = np.load(".\\datasets\\linear_train.npy")
# test_x = np.load(".\\datasets\\linear_test_x.npy")


# tf 형식에 맞게 변환
x_data = np.expand_dims(train_data[:,0], axis=1) #train_data의 x값만 따로 저장
y_data = train_data[:,1] #train_data의 y값만 따로 저장


# 모델 생성
model = LinearModel(num_units=1)

# 최적화 함수, 손실함수와 모델 바인딩
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
			  loss=tf.keras.losses.MSE,
			  metrics=[tf.keras.metrics.MeanSquaredError()])

# SGD(Stochastic Gradient Descent) : 확률적 경사 하강법
#  >> 입력 데이터가 확률적으로 선택된다.
# loss : MSE(Mean Square, Error비용함수)
# MeanSquaredError : 실제결과값과 예상값의 차이(error)

# # 모델 학습
model.fit(x=x_data, 
		  y=y_data, 
		  epochs=10, #데이터 전체에 대한 학습 반복 횟수