Пример #1
0
def get_optimizer(wrt,
                  fprime,
                  optimize_method,
                  step_rate,
                  momentum=0.0,
                  decay=0.9,
                  **kwargs):
    '''Get an optimizer.'''
    if optimize_method == 'rmsprop':
        optimizer = RmsProp(wrt=wrt,
                            fprime=fprime,
                            step_rate=step_rate,
                            decay=decay,
                            momentum=momentum)
    elif optimize_method == 'adam':
        optimizer = Adam(wrt=wrt, fprime=fprime, step_rate=step_rate)
    elif optimize_method == 'gd':
        optimizer = GradientDescent(wrt=wrt,
                                    fprime=fprime,
                                    step_rate=step_rate,
                                    momentum=momentum)
    else:
        raise ValueError('Can not load predefined optimization method %s' %
                         optimize_method)
    return optimizer
Пример #2
0
def train(bm, theta_list, method, max_iter=1000, step_rate=0.1):
    '''
    train a Born Machine.
    
    Args:
        bm (QCBM): quantum circuit born machine training strategy.
        theta_list (1darray): initial parameters.
        method ('Adam'|'L-BFGS-B'):
            * L-BFGS-B: efficient, but not noise tolerant.
            * Adam: noise tolerant.
        max_iter (int): maximum allowed number of iterations.
        step_rate (float): learning rate for Adam optimizer.
        
    Returns:
        (float, 1darray): final loss and parameters.
    '''
    theta_list = np.array(theta_list)
    if method == 'Adam':
        from climin import Adam
        optimizer = Adam(wrt=theta_list,
                         fprime=bm.gradient,
                         step_rate=step_rate)
        for info in optimizer:
            step = info['n_iter']
            loss = bm.mmd_loss(theta_list)
            print('step = %d, loss = %s' % (step, loss))
            if step == max_iter:
                break
        return bm.mmd_loss(theta_list), theta_list
    else:
        res = minimize(
            bm.mmd_loss,
            x0=theta_list,
            method=method,
            jac=bm.gradient,
            tol=1e-12,
            options={
                'maxiter': max_iter,
                'disp': 2,
                'gtol': 1e-12,
                'ftol': 0
            },
        )
        return res.fun, res.x
Пример #3
0
             first_layer=True),
    Linear(doutput, 10),
    Softmax()
]
'''
# baseline purposes
model = [
			Dropout(dropout),
			LSTM(1, states, sigma=sigma, fbias=1.5, last_state_only=True),
			Linear(states, 10),
 			Softmax()
 		]
'''
W = extract_weights(model)

optimizer = Adam(W, dW, learning_rate, momentum=momentum)

print 'Approx. Parameters: ', W.size

for i in optimizer:
    if i['n_iter'] > niterations:
        break

    print str(data.epoch) + '\t' + str(i['n_iter']), '\t',
    print logs['loss'][-1], '\t',
    print logs['gradient_norm'][-1]

    if data.epoch_complete:
        inputs, labels = data.fetch_val()
        nsamples = inputs.shape[2]
        inputs = np.split(inputs, nsamples / batch_size, axis=2)
Пример #4
0
 			Linear(output, doutputs),
 			Softmax()
 		]
'''
model = [
			Dropout(dropout),
			LSTM(dinput, states, fbias=0.0, sigma=0.1),
			Dropout(dropout),
			LSTM(states, states, fbias=0.0, sigma=0.1),
 			Linear(states, doutputs),
 			Softmax()
 		]

W = extract_weights(model)

optimizer = Adam(W, dW, learning_rate, momentum=momentum)

config = 'experiment_name = ' + str(experiment_name) + '\n' + 'periods = ' + str(periods) + '\n' + 'vocabulary_size = ' + str(vocabulary_size) + '\n' + 'states = ' + str(states) + '\n' + 'output = ' + str(output) + '\n' + 'dinput = ' + str(dinput) + '\n' + 'doutputs = ' + str(doutputs) + '\n' + 'sequence_length = ' + str(sequence_length) + '\n' + 'batch_size = ' + str(batch_size) + '\n' + 'learning_rate = ' + str(learning_rate) + '\n' + 'niterations = ' + str(niterations) + '\n' + 'momentum = ' + str(momentum) + '\n' + 'forget_every = ' + str(forget_every) + '\n' + 'gradient_clip = ' + str(gradient_clip) + '\n' + 'sample_every = ' + str(sample_every) + '\n' + 'save_every = ' + str(save_every) + '\n' + 'plot_every = ' + str(plot_every) + '\n' + 'full_recurrence = ' + str(full_recurrence) + '\n' + 'learn_state = ' + str(learn_state) + '\n' + 'anneal = ' + str(anneal) + '\n' + 'dynamic_forgetting = ' + str(dynamic_forgetting) + '\n' + 'text = ' + str(text_file) + '\n'
config += 'optimizer = ' + str(optimizer.__class__.__name__) + '\n' 
config += 'dropout = ' + str(dropout) + '\n' 

f = open(path + 'config.txt', 'w')
f.write(config)
f.close()

print config, 'Approx. Parameters: ', W.size

for i in optimizer:
	if i['n_iter'] > niterations:
		break