예제 #1
0
def get_model(args):
    loss_dict = {}
    softmaxLoss = SoftmaxCrossEntropyLoss("softmax")
    euclideanLoss = EuclideanLoss("euclidean")
    loss_dict['softmax'] = softmaxLoss
    loss_dict['euclidean'] = euclideanLoss
    
    config = {
        'learning_rate': args.learning_rate,
        'weight_decay': args.weight_decay,
        'momentum': args.momentum,
        'batch_size': args.batch_size,
        'max_epoch': args.max_epoch,
        'disp_freq': args.disp_freq,
        'test_epoch': args.test_epoch
    }
    loss = loss_dict[args.loss]

    model = Network()
    layer = args.hidden_layer
    if layer == 1:
        model.add(Linear('fc1', 784, args.hidden_size, 0.01))
        model.add(get_activation(args.activation, 0))
        model.add(Linear('fc2', args.hidden_size, 10, 0.01))
        model.add(get_activation(args.activation, 1))
    else:
        model.add(Linear('fc1', 784, args.hidden_size, 0.01))
        model.add(get_activation(args.activation, 0))
        model.add(Linear('fc2', args.hidden_size, args.hidden_size//2, 0.01))
        model.add(get_activation(args.activation, 1))
        model.add(Linear('fc2', args.hidden_size//2, 10, 0.01))
        model.add(get_activation(args.activation, 2))
    return model, config, loss
def build_model(config):
    model = Network()
    layer_num = 0
    for layer in config['use_layer']:
        if layer['type'] == "Linear":
            in_num = layer['in_num']
            out_num = layer['out_num']
            if "init_std" in layer.keys():
                model.add(
                    Linear(layer['type'] + str(layer_num),
                           in_num,
                           out_num,
                           init_std=layer['init_std']))
            else:
                model.add(
                    Linear(layer['type'] + str(layer_num), in_num, out_num))
            layer_num += 1
        elif layer['type'] == 'Relu':
            model.add(Relu(layer['type'] + str(layer_num)))
            layer_num += 1
        elif layer['type'] == 'Sigmoid':
            model.add(Sigmoid(layer['type'] + str(layer_num)))
            layer_num += 1
        else:
            assert 0
    loss_name = config['use_loss']
    if loss_name == 'EuclideanLoss':
        loss = EuclideanLoss(loss_name)
    elif loss_name == 'SoftmaxCrossEntropyLoss':
        loss = SoftmaxCrossEntropyLoss(loss_name)
    else:
        assert 0
    return model, loss
예제 #3
0
def GCN_check(name, adj, weights, layer_config):
    num_layer = len(layer_config)

    model = Network()
    for i in range(num_layer - 2):
        model.add(Aggregate('A{}'.format(i), adj))
        model.add(
            Linear('W{}'.format(i), layer_config[i], layer_config[i + 1],
                   'xavier').set_W(weights[i]))
        model.add(Tanh('Tanh{}'.format(i)))

    model.add(Aggregate('A{}'.format(num_layer - 2), adj))
    model.add(
        Linear('W{}'.format(num_layer - 2), layer_config[-2], layer_config[-1],
               'xavier').set_W(weights[-1]))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    # loss = EuclideanLoss(name='loss')

    print("Model " + name)
    for layer in model.layer_list:
        print(":\t" + repr(layer))
    print(':\t' + repr(loss))

    print('Forward Computation: ', model.str_forward('X'))
    print('Backward Computation:', model.str_backward('Z-Y'))
    print()
    model.str_update()
    print()

    return model, loss
예제 #4
0
파일: train.py 프로젝트: uclasystem/dorylus
def MLP(name, weights, layer_config):
    num_layer = len(layer_config)

    model = Network()
    for i in range(num_layer - 2):
        model.add(Linear('W{}'.format(i),
                         layer_config[i], layer_config[i + 1], 'kaiming'))
        model.add(Relu('Relu{}'.format(i)))

    model.add(Linear('W{}'.format(num_layer - 2),
                     layer_config[-2], layer_config[-1], 'kaiming'))

    loss = SoftmaxCrossEntropyLoss(name='loss')

    print("Model "+name)
    for layer in model.layer_list:
        print(":\t" + repr(layer))
    print(':\t' + repr(loss))
    print()

    print('Forward Computation: ', model.str_forward('X'))
    print('Backward Computation:', model.str_backward('Z-Y'))
    print()
    model.str_update()
    print()

    return model, loss
예제 #5
0
파일: train.py 프로젝트: uclasystem/dorylus
def example_GCN(name, adj, weights, layer_config):
    model = Network()
    model.add(Aggregate('A1', adj))
    model.add(Linear('W1', layer_config[0], layer_config[1], 'kaiming'))
    model.add(Relu('Relu1'))
    model.add(Aggregate('A2', adj))
    model.add(Linear('W2', layer_config[1], layer_config[1], 'kaiming'))
    model.add(Relu('Relu2'))
    model.add(Aggregate('A3', adj))
    model.add(Linear('W3', layer_config[1], layer_config[2], 'kaiming'))

    loss = SoftmaxCrossEntropyLoss(name='loss')

    print("Model "+name)
    for layer in model.layer_list:
        print(":\t" + repr(layer))
    print(':\t' + repr(loss))

    print('Forward Computation: ', model.str_forward('X'))
    print('Backward Computation:', model.str_backward('Z-Y'))
    print()
    model.str_update()
    print()

    return model, loss
def Model_Linear_Gelu_1_SoftmaxCrossEntropyLoss():
    name = '1_Gelu_SoftmaxCrossEntropyLoss'
    model = Network()
    model.add(Linear('fc1', 784, 256, 0.01))
    model.add(Gelu('a1'))
    model.add(Linear('fc2', 256, 10, 0.01))
    loss = SoftmaxCrossEntropyLoss(name='loss')
    return name, model, loss
def Model_Linear_Gelu_2_SoftmaxCrossEntropyLoss():
    name = '2_Gelu_SoftmaxCrossEntropyLoss'
    model = Network()
    model.add(Linear('fc1', 784, 441, 0.01))
    model.add(Gelu('a1'))
    model.add(Linear('fc2', 441, 196, 0.01))
    model.add(Gelu('a2'))
    model.add(Linear('fc3', 196, 10, 0.01))
    loss = SoftmaxCrossEntropyLoss(name='loss')
    return name, model, loss
예제 #8
0
def evaluate(model, data):
    x_data = data['x']
    y_data = data['y']

    batch_size = 100
    size = len(x_data)
    correct = 0
    loss_value = 0
    loss = SoftmaxCrossEntropyLoss('loss')
    for start_idx in range(0, size, batch_size):
        end_idx = min(start_idx + batch_size, size)
        x = np.array(x_data[start_idx:end_idx])
        y = y_data[start_idx:end_idx]

        ans = model.forward(x)
        output = softmax(ans)

        loss_value += len(y) * loss.forward(ans, onehot_encoding(y, 5))
        correct += len(y) * calculate_acc(output, y)

    return loss_value / size, correct / size
예제 #9
0
def basicConv2Layer():
    model = Network()
    model.add(Conv2D('conv1', 1, 4, 3, 1, 1))
    model.add(Relu('relu1'))
    model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
    model.add(Conv2D('conv2', 4, 4, 3, 1, 1))
    model.add(Relu('relu2'))
    model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 4 x 7 x 7
    model.add(Reshape('flatten', (-1, 196)))
    model.add(Linear('fc3', 196, 10, 0.1))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    return model, loss
예제 #10
0
def LeNet():
    model = Network()
    model.add(Conv2D('conv1', 1, 6, 5, 2, 1))
    model.add(Relu('relu1'))
    model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 6 x 14 x 14
    model.add(Conv2D('conv2', 6, 16, 5, 0, 1))
    model.add(Relu('relu2'))
    model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 16 x 5 x 5
    model.add(Reshape('flatten', (-1, 400)))
    model.add(Linear('fc1', 400, 120, 0.1))
    model.add(Relu('relu3'))
    model.add(Linear('fc2', 120, 84, 0.1))
    model.add(Relu('relu4'))
    model.add(Linear('fc3', 84, 10, 0.1))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    return model, loss
예제 #11
0
from solve_net import show4category
train_data, test_data, train_label, test_label = load_mnist_4d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Conv2D('conv1', 1, 4, 3, 1, 0.01))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 4, 8, 3, 1, 0.01))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 8 x 7 x 7
model.add(Reshape('flatten', (-1, 392)))
model.add(Linear('fc3', 392, 10, 0.01))

loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0,
    'momentum': 0.7,
    'batch_size': 100,
    'max_epoch': 300,
    'disp_freq': 5,
    'test_epoch': 2
예제 #12
0
model4 = Network(name='model4')
model4.add(Linear('m4_fc1', 784, 512, 0.01))
model4.add(Relu('m4_fc2'))
model4.add(Linear('m4_fc3', 512, 128, 0.01))
model4.add(Relu('m4_fc4'))
model4.add(Linear('m4_fc5', 128, 10, 0.01))

model5 = Network(name='model5')
model5.add(Linear('m5_fc1', 784, 392, 0.01))
model5.add(Relu('m5_fc2'))
model5.add(Linear('m5_fc3', 392, 196, 0.01))
model5.add(Relu('m5_fc4'))
model5.add(Linear('m5_fc5', 196, 10, 0.01))

loss1 = EuclideanLoss(name='Euclidean')
loss2 = SoftmaxCrossEntropyLoss(name='XEntropy')

#models = [model1, model2, model3, model4, model5]
#losses = [loss1, loss2]
model = model4
loss = loss2

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0.0,
예제 #13
0
    t = np.array(time_list)
    return [final_acc, end_time - start_time, x, ya, yl, t]


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--train_one_layer", default=False)
    parser.add_argument("--train_two_layer", default=False)
    parser.add_argument("--modified_gd", default=False)
    parser.add_argument("--stop_time", default=0, type=int)

    args = parser.parse_args()

    train_data, test_data, train_label, test_label = load_mnist_2d('data')
    loss1 = EuclideanLoss(name="euclidean loss")
    loss2 = SoftmaxCrossEntropyLoss(name="softmax cross entropy loss")

    config = {
        'learning_rate': 0.01,
        'weight_decay': 0.001,
        'momentum': 0.8,
        'batch_size': 64,
        'max_epoch': 50,
        'disp_freq': 1000,
        'test_epoch': 2,
        'stop_time': args.stop_time
    }

    if Type(args.train_one_layer):
        config['max_epoch'] = 50
예제 #14
0
파일: run_cnn.py 프로젝트: Funaizhang/THUAC
train_data, test_data, train_label, test_label = load_mnist_4d('data')

# Your model defintion here
# You should explore different model architecture
model = Network('CNN_test')
model.add(Conv2D('conv1', 1, 4, 3, 1, 0.1))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 4, 4, 3, 1, 0.1))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 4 x 7 x 7
model.add(Reshape('flatten', (-1, 196)))
model.add(Linear('fc3', 196, 10, 0.1))

loss = SoftmaxCrossEntropyLoss(name='SoftmaxCrossEntropy')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0.0,
    'momentum': 0.9,
    'batch_size': 100,
    'max_epoch': 2,
    'disp_freq': 100,
    'layer_vis': 'relu1'
예제 #15
0
elif args.layers == 1:
    model.add(Linear('fc1', 784, 256, args.std))
    model.add(activation('act'))
    model.add(Linear('fc2', 256, 10, args.std))
else:
    model.add(Linear('fc1', 784, 256, args.std))
    model.add(activation('act'))
    model.add(Linear('fc2', 256, 128, args.std))
    model.add(activation('act'))
    model.add(Linear('fc3', 128, 10, args.std))

if args.loss == 'mse':
    model.add(Sigmoid('sigmoid'))
    loss = EuclideanLoss('loss')
else:
    loss = SoftmaxCrossEntropyLoss('loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': args.lr,
    'weight_decay': args.weight_decay,
    'momentum': args.momentum,
    'batch_size': args.batch_size,
    'max_epoch': args.max_epoch,
    'disp_freq': 50,
    'test_epoch': 1
예제 #16
0
        val_data = pickle.load(f)

    with open('data.pkl', 'rb') as f:
        test_data = pickle.load(f)

    x_data = train_data['x']
    y_data = train_data['y']

    model = build_model()
    batch_size = 128
    size = len(x_data)
    global_step = 0

    epoch = 2

    loss = SoftmaxCrossEntropyLoss('loss')

    logs = []
    for i in range(epoch):

        for start_idx in range(0, size, batch_size):
            end_idx = min(start_idx + batch_size, size)
            x = np.array(x_data[start_idx:end_idx])
            label = y_data[start_idx:end_idx]
            y = onehot_encoding(label, 5)

            x = np.array(x)

            val_loss, val_acc = 0, 0
            val_loss, val_acc = evaluate(model, val_data)