示例#1
0
def main():
    parser = argparse.ArgumentParser(description='Deblur image')
    parser.add_argument('-d','--data', help='Input data image')
    parser.add_argument('-o','--output', help='Output image')

    #START: Stage 0 load the image
    args = parser.parse_args()
    input_name = args.data
    img = cv2.imread(input_name)
    fig = plt.figure()
    ax1 = fig.add_subplot(2,2,1)
    ax1.imshow(img)

    #Stage 1 add padding
    img,y,x = add_padding(img)

    #Stage 2 openCV optimization

    #Stage 3 Machine Learning Deblur
    model = NeuralNet()
    img = model.predict(img)

    #Stage 4 More openCV

    #End
    ax2 = fig.add_subplot(2,2,2)
    img = remove_padding(img,x,y)
    ax2.imshow(img)
    plt.show()
示例#2
0
    def __init__(self, layers, up_weights=None, down_weights=None):
        '''Initializes a BN from the layers given'''
        self.numlayers = len(layers)

        down_layers = [layer.__class__.from_layer(layer) for layer in layers[::-1]]#Copy list so that upnet and downnet layers are different objects
        self.upnet = NeuralNet(layers, up_weights)
        self.downnet = NeuralNet(down_layers, down_weights)
示例#3
0
 def __init__(self, numvis, numhid, vislayer=None, hidlayer=None, vishid=None):
     '''Initialize an RBM with numvis visible units and numhid hidden units. The weights are randomly initialized
     explicitly passed in as a parameter.'''
     self.numvis = numvis
     self.numhid = numhid
     weights = [vishid] if vishid is not None else None
     NeuralNet.__init__(self, [vislayer or BinaryStochasticLayer(numvis), hidlayer or BinaryStochasticLayer(numhid)], weights)
示例#4
0
 def __init__(self,
              numvis,
              numhid,
              vislayer=None,
              hidlayer=None,
              vishid=None):
     '''Initialize an RBM with numvis visible units and numhid hidden units. The weights are randomly initialized
     explicitly passed in as a parameter.'''
     self.numvis = numvis
     self.numhid = numhid
     weights = [vishid] if vishid is not None else None
     NeuralNet.__init__(self, [
         vislayer or BinaryStochasticLayer(numvis), hidlayer
         or BinaryStochasticLayer(numhid)
     ], weights)
示例#5
0
def stochasticGradientDescent(net: network.NeuralNet, inputs, targets,
                              epochs: int):
    assert (len(inputs) == len(targets))

    indices = list(range(0, len(inputs)))
    for i in range(0, epochs):
        print("Epoch %d" % i)

        random.shuffle(indices)

        for i in indices:
            grads = net.gradient(inputs[i], targets[i])

            for grad in grads:
                if grad is not None:
                    grad *= 0.1

            net.adjustWeights(grads)
示例#6
0
 def get_net(self):
     net = NeuralNet(self.layer_spec, self.activation_function,
                     self.regularisation_coefficient)
     if self.persistence_id:
         persist = Persist(self.persistence_id)
         if persist.exists():
             persist.load(net)
             print 'Loaded network from', persist.get_filename()
     return net
示例#7
0
def makeNN(filename, outputfile, hidden, pca, layers):
    fgen = FeatureGenerator(PhonemeDataFile(filename))
    features, pcas = list(fgen.features_vector(pca))
    shuffle(features)
    split = int(len(features) * 0.8)
    train = features[:split] #The larger set for training
    test = features[split:] 
    
    num_input = len(train[0][0])
    num_output = len(train[0][1])
    inputVars = tuple([num_input] + [hidden]*layers + [num_output])
    print "Making NN with: %s"%str(inputVars)
    print "len(train)=%d, len(test)=%d"%(len(train),len(test))

    network = NeuralNet( inputVars )
    network.train(train, test, debug=True)
    if network.save(pcas,list(fgen.phones),outputfile):
        print "Saved nn successfully"
    else: print "Error while saving nn"
示例#8
0
def trainOnFeatures(filename):
    train,test = genFeatures(filename)
    nin,nout = len(train[0][0]),len(train[0][1])
    net = NeuralNet( (nin, 20, nout) )
    num_epochs = net.train( train, test )
    return net,test
示例#9
0
            from_type3 = type3[0:n_inputs]
            from_type3_targets = type3_targets[0:n_inputs]

            from_type4 = type4[0:n_inputs]
            from_type4_targets = type4_targets[0:n_inputs]

            inputs = torch.cat((from_type1, from_type2, from_type3, from_type4), 0 )

            targets = torch.cat( (from_type1_targets,from_type2_targets,from_type3_targets,from_type4_targets)  ,0)

            args = dict()
            args['n_inputs'] = n_inputs
            args['n_neurons'] = n_neurons

            # Model
            net = NeuralNet(hidden_neurons=args['n_neurons'])

            criterion = nn.MSELoss(reduction="mean")
            optimizer = optim.SGD(net.parameters(), lr=learning_rate)
            hold_loss=[]

            EPOCHS = math.ceil(K /(n_inputs * 4))
            # prog_bar = Bar('Training...', suffix='%(percent).1f%% - %(eta)ds - %(index)d / %(max)d', max=EPOCHS )
            # Train loop
            for epoch in range(0, EPOCHS):
                running_loss = 0.0

                # Batch gradient descent
                optimizer.zero_grad()
                output = net(inputs)
                loss = criterion(output, targets)
示例#10
0
def train_alphazero(lr, dropout, num_channels, epochs, batch_size,
                    replay_buffer_size, temp_decrese_moves, mcts_rollouts,
                    n_episodes_per_iteration, eval, model, test):
    board = Game(player_turn=1)
    network = NeuralNet(board, num_channels, lr, dropout, epochs, batch_size)
    if model is not None:
        print("Loading {}".format(model))
        network.load(model)
    if test:
        while True:
            while board.turn_player() == -1:
                move = np.argmax(network(board, board.valid_moves())[0][0])
                print("Board {}, move {}".format(board.board(), move))
                board.move(move)
            print("{}".format(board.board()))
            import pdb
            pdb.set_trace()
    # set up the experiment
    experiment = Experiment(
        api_key=os.environ.get('ALPHAZERO_API_KEY'),
        project_name=os.environ.get('ALPHAZERO_PROJECT_NAME'),
        workspace=os.environ.get('ALPHAZERO_WORKSPACE'))
    experiment.log_multiple_params({
        'lr':
        lr,
        'dropout':
        dropout,
        'num_channels':
        num_channels,
        'epochs':
        epochs,
        'batch_size':
        batch_size,
        'replay_buffer_size':
        replay_buffer_size,
        'temp_decrese_moves':
        temp_decrese_moves,
        'mcts_rollouts':
        mcts_rollouts,
        'n_episodes_per_iteration':
        n_episodes_per_iteration
    })
    buf = ReplayBuffer(replay_buffer_size, batch_size)

    epoch = 0
    while True:
        epoch += 1
        print("Epoch {}, {}".format(epoch, time.clock()))
        for i in range(n_episodes_per_iteration):
            winner = execute_episode(network, buf, experiment)
            print("Finished episode {}, winner {}, time {}".format(
                i, winner, time.clock()))
        network.clone()
        loss, entropy = train_network(network, buf, experiment)

        print("Training loss: {}, entropy: {}".format(loss, entropy))
        won_counter = evaluate_network(network, board, 10)
        if won_counter >= 5:
            print("Performance improved, {} games won".format(won_counter))
            network.save()
        else:
            print("Performance decreased, {} games won".format(won_counter))
            network.revert_network()
示例#11
0
def main():
    nn = NeuralNet(64,20,2)
    nn.train(PATTERNS, iterations=1000)
    print nn.show(TESTS)
示例#12
0
class BN(object):
    '''A BN is a bidirectional NeuralNet. It is equivelant to two opposite direction feed forward nets.'''
    def __init__(self, layers, up_weights=None, down_weights=None):
        '''Initializes a BN from the layers given'''
        self.numlayers = len(layers)

        down_layers = [layer.__class__.from_layer(layer) for layer in layers[::-1]]#Copy list so that upnet and downnet layers are different objects
        self.upnet = NeuralNet(layers, up_weights)
        self.downnet = NeuralNet(down_layers, down_weights)

    @classmethod
    def from_rbms(cls, rbms):
        '''Initializes a BN from a list of RBMS. NOTE: the down weights and upweights are tied.
        Modifying one, modifies the other. To untie, call the __untie__ method.'''
        layers = []
        # First layer of dbn is the visible layer of the bottom rbm
        layers.append(rbms[0].get_vislayer())
        # Keep all hidden layers
        for rbm in rbms:
            layers.append(rbm.get_hidlayer())

        up_weights = [rbm.get_vishid() for rbm in rbms]
        down_weights = [rbm.get_vishid().transpose() for rbm in rbms[::-1]]

        return cls(layers, up_weights, down_weights)

    def bottom_up(self, data):
        '''Expects data to be probabilities'''
        self.upnet.layers[0].probs = data
        self.upnet.layers[0].activities = sample_binary_stochastic(data)
        return self.upnet.forward_pass(data, 1)

    def top_down(self, data):
        '''Expects data to be binary'''
        self.downnet.layers[0].activities = data
        return self.downnet.forward_pass(data, 1)

    def top_down_prob(self, data):
        '''Expects data to be binary'''
        self.downnet.layers[0].activities = data
        last = len(self.downnet.weights)
        data = dot(data, self.downnet.weights[0])

        for i in range(1, self.downnet.numlayers):
            self.downnet.layers[i].process(data)
            data = self.downnet.layers[i].probs
            if i < last:
                data = dot(data, self.downnet.weights[i])
        return [layer.activities for layer in self.downnet.layers]


    def __untie_weights__(self):
        '''This is an ugly step, and is only necessary when the db is initialized from RBMs.
        It unties the recognition weights from the generative ones.'''
        numweights = self.numlayers - 1
        for i in range(numweights):
            self.upnet.weights[i] = self.upnet.weights[i].copy()

    def wake_phase(self, data):
        '''The first step of wake-sleep and contrastive wake-sleep. Returns wake_deltas, a list of matrices by which the
        the weights of the down net should be adjusted. Also returns wake_bias_deltas, wake_visbias_delta, and hidden states 
        of top layer. Assumes DBN layers are binary stochastic layers.'''
        #Get the states and probabilities of every layer after doing a bottom-up pass
        hid_states = self.bottom_up(data)
        hid_probs = []
        for layer in self.upnet.layers:
            hid_probs.append(layer.probs)

        wake_deltas = []
        wake_bias_deltas = []
        #Bias deltas for the generative visible units
        wake_visbias_delta = (data - self.upnet.layers[0].probs).sum(0)/data.shape[0]
        #Iterate over each layer excluding bottom layer
        for i in range(self.upnet.numlayers - 1):
            upper_state = hid_states[i+1]
            upper_activity = hid_probs[i+1]
            lower_state = hid_states[i]
            lower_activity = hid_probs[i]

            delta = dot(upper_state.transpose(), (lower_state - lower_activity))/data.shape[0]
            #Get bias deltas as well to update hidden biases in downnet
            delta_bias = array([(upper_state - upper_activity).sum(0)/data.shape[0]])
            wake_deltas.insert(0,delta)
            wake_bias_deltas.insert(0, delta_bias)

        return wake_deltas, wake_bias_deltas, wake_visbias_delta, hid_states[-1]

    def sleep_phase(self, data):
        '''The last step of wake-sleep and contrastive wake-sleep. Returns sleep_deltas, a list of matrices by which the
        the weights of the up net should be adjusted. Also returns sleep_bias_deltas. Assumes DBN layers are binary stochastic layers.'''
        #Get the states and probabilities of every layer after doing a top-down pass
        hid_states = self.top_down(data)
        hid_probs = []
        for layer in self.downnet.layers:
            hid_probs.append(layer.probs)

        sleep_deltas = []
        sleep_bias_deltas = []
        #Iterate over each layer excluding top layer
        for i in range(self.downnet.numlayers -1):
            lower_state = hid_states[i+1]
            upper_state = hid_states[i]
            upper_activity = hid_probs[i]

            delta = dot(lower_state.transpose(), (upper_state - upper_activity))/data.shape[0]
            #Get bias deltas to update hidden biases in upnet
            delta_bias = array([(upper_state - upper_activity).sum(0)/data.shape[0]])
            sleep_deltas.insert(0,delta)
            sleep_bias_deltas.insert(0, delta_bias)

        return sleep_deltas, sleep_bias_deltas

    def wake_sleep(self, data, learning_rate):
        '''Combines wake and sleep phases'''

        downnet_deltas, downnet_hidbias_deltas, downnet_visbias_delta, top_state = self.wake_phase(data)
        upnet_deltas, upnet_bias_deltas = self.sleep_phase(top_state) #The top state is the input for the top-down pass
        recons_error = square(data - self.upnet.layers[0].probs).sum()
        print 'BN Reconstruction Error', recons_error

        self.downnet.layers[-1].bias += learning_rate*downnet_visbias_delta
        for i in range(len(downnet_deltas)):
            self.downnet.weights[i] += learning_rate*downnet_deltas[i]
            self.downnet.layers[i+1].bias += learning_rate*downnet_hidbias_deltas[i]

        for i in range(len(upnet_deltas)):
            self.upnet.weights[i] += learning_rate*upnet_deltas[i]
            self.upnet.layers[i+1].bias += learning_rate*upnet_bias_deltas[i]
        return recons_error