Пример #1
0
def get_optimizer(wrt,
                  fprime,
                  optimize_method,
                  step_rate,
                  momentum=0.0,
                  decay=0.9,
                  **kwargs):
    '''Get an optimizer.'''
    if optimize_method == 'rmsprop':
        optimizer = RmsProp(wrt=wrt,
                            fprime=fprime,
                            step_rate=step_rate,
                            decay=decay,
                            momentum=momentum)
    elif optimize_method == 'adam':
        optimizer = Adam(wrt=wrt, fprime=fprime, step_rate=step_rate)
    elif optimize_method == 'gd':
        optimizer = GradientDescent(wrt=wrt,
                                    fprime=fprime,
                                    step_rate=step_rate,
                                    momentum=momentum)
    else:
        raise ValueError('Can not load predefined optimization method %s' %
                         optimize_method)
    return optimizer
Пример #2
0
def train(bm, theta_list, method, max_iter=1000, step_rate=0.1):
    '''
    train a Born Machine.
    
    Args:
        bm (QCBM): quantum circuit born machine training strategy.
        theta_list (1darray): initial parameters.
        method ('Adam'|'L-BFGS-B'):
            * L-BFGS-B: efficient, but not noise tolerant.
            * Adam: noise tolerant.
        max_iter (int): maximum allowed number of iterations.
        step_rate (float): learning rate for Adam optimizer.
        
    Returns:
        (float, 1darray): final loss and parameters.
    '''
    theta_list = np.array(theta_list)
    if method == 'Adam':
        from climin import Adam
        optimizer = Adam(wrt=theta_list,
                         fprime=bm.gradient,
                         step_rate=step_rate)
        for info in optimizer:
            step = info['n_iter']
            loss = bm.mmd_loss(theta_list)
            print('step = %d, loss = %s' % (step, loss))
            if step == max_iter:
                break
        return bm.mmd_loss(theta_list), theta_list
    else:
        res = minimize(
            bm.mmd_loss,
            x0=theta_list,
            method=method,
            jac=bm.gradient,
            tol=1e-12,
            options={
                'maxiter': max_iter,
                'disp': 2,
                'gtol': 1e-12,
                'ftol': 0
            },
        )
        return res.fun, res.x
Пример #3
0
             first_layer=True),
    Linear(doutput, 10),
    Softmax()
]
'''
# baseline purposes
model = [
			Dropout(dropout),
			LSTM(1, states, sigma=sigma, fbias=1.5, last_state_only=True),
			Linear(states, 10),
 			Softmax()
 		]
'''
W = extract_weights(model)

optimizer = Adam(W, dW, learning_rate, momentum=momentum)

print 'Approx. Parameters: ', W.size

for i in optimizer:
    if i['n_iter'] > niterations:
        break

    print str(data.epoch) + '\t' + str(i['n_iter']), '\t',
    print logs['loss'][-1], '\t',
    print logs['gradient_norm'][-1]

    if data.epoch_complete:
        inputs, labels = data.fetch_val()
        nsamples = inputs.shape[2]
        inputs = np.split(inputs, nsamples / batch_size, axis=2)