Ejemplo n.º 1
0
def train():
    app = Net()
    app = app.create_model()
    images, labels = read_tfrecord()
    images_val, labels_val = read_tfrecord(type_='val')

    app.fit(images, labels, 128, 100, validation_data=(images_val, labels_val))

    app.save('../model/net.h5')
Ejemplo n.º 2
0
def run_model(data, test_data, args):
    ### INITIALIZE SETTINGS ###
    G = Global()
    G.set_debug_true()  ####
    if data is None:
        return 0
    ### DEFINE MODEL ###
    n, m = data.shape
    out_dim = args['output-dimension']
    hidden_model = DeepNet
    hidden_sizes = args['hidden-dims']
    index = Net(hidden_model, m, out_dim, hidden_sizes).to(device)
    ### TRAIN MODEL ###
    index = index.fit(data, args)
    ### TEST MODEL ###
    results = index.test(data, test_data)
    if args['to_plot']:
        index.create_plot(data, filename='train')
        ## Plot test data ##
        index.create_plot(test_data, name='Test Data', filename='test')
    ## Save plots ##
    name = args['filename_prefix']
    G.save_figs(name)
    ## Release Space ##
    del G, index
    ## Return neighbors ##
    return results
Ejemplo n.º 3
0
    def generate_rollout(self,
                         itration_number,
                         iter_train,
                         iter_dev,
                         verbose=False):
        self.log_probs = []
        self.actions = []
        self.entropies = []
        self.reward = None

        state = torch.zeros(self.hidden_size)
        terminated = False
        self.reward = 0

        while not terminated:
            log_prob, state, terminated = self.step(state)
            self.log_probs.append(log_prob)

        if verbose:
            print('\nGenerated network:')
            print(self.actions)

        net = Net(self.actions)
        accuracy = net.fit(iter_train, iter_dev)

        self.reward += accuracy

        return self.reward, net
    def generate_rollout(self, iter_train, iter_dev, verbose=False):
        self.log_probs = []
        self.actions = []
        self.entropies = []
        self.reward = None
        self.adv = 0

        state = None
        input = torch.zeros(self.hidden_size)
        is_embed = True
        terminate = False
        self.reward = 0

        while not terminate:
            logits, state = self(input, state, is_embed)

            idx = torch.distributions.Categorical(logits=logits).sample().detach()
            probs = F.softmax(logits, dim=-1)
            log_probs = torch.log(probs)
            self.log_probs.append(log_probs[idx])

            action = self.index_to_action[int(idx)]
            self.actions.append(action)

            entropy = -(log_probs * probs).sum(dim=-1)
            self.entropies.append(entropy)

            terminate = (action == 'EOS') or (len(self.actions) == self.max_depth)
            is_embed = False
            input = Variable(torch.LongTensor([idx]), requires_grad=False)

        if verbose:
            print('\nGenerated network:')
            print(self.actions)

        net = Net(self.actions)
        accuracy = net.fit(iter_train, iter_dev)
        self.reward += accuracy

        # moving average baseline
        if self.baseline is None:
            self.baseline = self.reward
        else:
            self.baseline = self.decay * self.baseline + (1 - self.decay) * self.reward
        self.adv = self.reward - self.baseline

        return self.reward, self.adv