Esempio n. 1
0
 def __init__(self, layers, alpha=0.3, C=0.0, gradcheck=1e-4):
     self.alpha = alpha
     self._lambda = C
     self.layers = layers
     self.weights = []
     self.biases = []
     self._x = T.dmatrix('x')
     self._y = T.dmatrix('y')
     for i, n in enumerate(layers):
         if i != len(layers) - 1:
             w = shared(get_weights((layers[i + 1], n)),
                        name="w{}".format(i))
             b = shared(get_weights((layers[i + 1], 1)),
                        name="b{}".format(i))
             self.weights.append(w)
             self.biases.append(b)
Esempio n. 2
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 activation='sigmoid',
                 batch_normalization=False,
                 name='fully_connected'):
        """Initialize weights and bias."""
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.batch_normalization = batch_normalization

        # Set the activation function for this layer
        if activation == 'sigmoid':
            self.activation = T.nnet.sigmoid
        elif activation == 'tanh':
            self.activation = T.tanh
        elif activation == 'relu':
            self.activation = T.nnet.relu
        elif activation == 'softmax':
            self.activation = T.nnet.softmax
        elif activation == 'linear':
            self.activation = 'linear'
        else:
            raise NotImplementedError("Unknown activation")

        # Initialize weights & biases for this layer
        self.weights = get_weights(shape=(input_dim, output_dim),
                                   name=name + '__weights')
        self.bias = get_bias(output_dim, name=name + '__bias')

        self.params = [self.weights, self.bias]
Esempio n. 3
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 activation='sigmoid',
                 name='fully_connected'):
        """Initialize weights and biases."""
        # Set input and output dimensions
        self.input_dim = input_dim
        self.output_dim = output_dim

        # Set the activation function for this layer
        if activation == 'sigmoid':
            self.activation = T.nnet.sigmoid
        elif activation == 'tanh':
            self.activation = T.tanh
        elif activation == 'relu':
            self.activation = T.nnet.relu
        elif activation == 'linear':
            self.activation = None
        else:
            raise NotImplementedError("Unknown activation")

        self.weights = get_relu_weights((input_dim, output_dim),
                                        name=name + '__weights')
        self.gating_weights = get_weights(shape=(input_dim, output_dim),
                                          name=name +
                                          '__weights')  # Transform gate

        self.bias = get_bias(output_dim, name=name + '__bias')
        self.gating_bias = get_highway_bias(output_dim,
                                            name=name + '__gating_bias')
        self.params = [
            self.weights, self.gating_weights, self.gating_bias, self.bias
        ]
Esempio n. 4
0
    def generate_knn_network(self, network, k):
        #k = 21
        print "knn red"
        network_size = network.vcount()
        edgesList = network.get_edgelist()
        weight_list = network.es['weight']
        dict_weights = get_weights(edgesList, weight_list)

        new_network = Graph()
        new_network.add_vertices(network_size)

        k_edges = []
        for i in range(network_size):
            edges_to_analize = dict()
            vertex = i
            vecinos = network.neighbors(vertex)
            #print vertex , vecinos
            for j in vecinos:
                if vertex < j:
                    key = str(vertex) + '-' + str(j)
                else:
                    key = str(j) + '-' + str(vertex)

                weight = dict_weights[key]
                edges_to_analize[key] = weight
            edges_to_analize_sorted = sorted(edges_to_analize.items(), key=operator.itemgetter(1), reverse=True)
            #edges_to_analize_sorted = sorted(edges_to_analize.items(), key=operator.itemgetter(1))
            number_vecinos = len(vecinos)
            index_remove = number_vecinos - k
            #print number_vecinos, k, index_remove
            k_best = edges_to_analize_sorted[0:k]
            removed = edges_to_analize_sorted[k:]
            #print k_best


            for j in k_best:
                key = j[0]
                aresta = key.split('-')
                aresta_i = int(aresta[0])
                aresta_f = int(aresta[1])
                edge_pair = (aresta_i, aresta_f)
                k_edges.append(edge_pair)

        #print k_edges
        new_network.add_edges(k_edges)


        #print len(edgesList) , len(new_network.get_edgelist())
        new_edge_list =  new_network.get_edgelist()
        k_weights = []

        for i in new_edge_list:
            key = str(i[0]) + '-' + str(i[1])
            weight = dict_weights[key]
            k_weights.append(weight)


        new_network.es['weight'] = k_weights
        #draw_graph(new_network)
        return new_network
Esempio n. 5
0
def gen_weights_all(ddir):
    params = import_params(ddir)

    try:
        nRows = int(params['nRowsIn'])
        nCols = int(params['nColsIn'])
        nOutputs = int(params['nOutputs'])
    except (KeyError, TypeError) as err:
        print('necessary parameter not found: ' + str(err))
        sys.exit(-1)

    nInputs = nRows * nCols

    Wx, Wy = get_weights(ddir, nInputs, nOutputs)
    if Wx is None or Wy is None:
        print("failed to read weights")
        return

    # take most recent values, strip time and transpose so we can write column wise (like eigen)
    Wx, Wy = Wx[-1,1:,:].T, Wy[-1,1:,:].T
    # use final value and reshape to row vector
    Wx, Wy = np.squeeze(Wx.reshape(-1,1)), np.squeeze(Wy.reshape(-1,1))
    # prepend dummy time stamp
    Wx, Wy = np.concatenate(([0.0],Wx)), np.concatenate(([0.0],Wy))
    save_weights_all(ddir, [Wx], [Wy])
Esempio n. 6
0
 def __init__(self):
     super().__init__()
     self.xyz_test, self.e_test = utils.read_data(self.test_set,
                                                  E_columns=4)
     self.weights_test = utils.get_weights(self.e_test[:, 0], self.delta_e,
                                           self.e_min)
     self.y_test_ref = self.e_test[:, 1]
Esempio n. 7
0
def get_mlp_model(n_in, n_out, n_layers=2, n_hidden=50):
    assert n_layers >= 2, '`n_layers` should be greater than 1 (otherwise it is just an mlp)'

    # initialize weights
    weights = [utils.get_weights('w_1', n_in, n_hidden)]
    weights += [utils.get_weights('w_%d' % i, n_hidden, n_hidden) for i in range(2, n_layers)]
    weights += [utils.get_weights('w_%d' % n_layers, n_hidden, n_out)]

    # initialize biases
    biases = [utils.get_weights('b_%d' % i, n_hidden) for i in range(1, n_layers)]
    biases += [utils.get_weights('b_%d' % n_layers, n_out)]

    # binarized versions
    deterministic_binary_weights = [utils.binarize(w, mode='deterministic') for w in weights]
    stochastic_binary_weights = [utils.binarize(w, mode='stochastic') for w in weights]

    # variables
    lr = T.scalar(name='learning_rate')
    X = T.matrix(name='X', dtype=theano.config.floatX)
    y = T.matrix(name='y', dtype=theano.config.floatX)

    # generate outputs of mlps
    d_outs = [utils.hard_sigmoid(T.dot(X, deterministic_binary_weights[0]) + biases[0])]
    for w, b in zip(deterministic_binary_weights[1:], biases[1:]):
        d_outs.append(utils.hard_sigmoid(T.dot(d_outs[-1], w) + b))
    s_outs = [utils.hard_sigmoid(T.dot(X, stochastic_binary_weights[0]) + biases[0])]
    for w, b in zip(stochastic_binary_weights[1:], biases[1:]):
        s_outs.append(utils.hard_sigmoid(T.dot(s_outs[-1], w) + b))

    # cost function (see utils)
    cost = utils.get_cost((s_outs[-1]+1.)/2., (y+1.)/2., mode='mse')

    # get the update functions
    params = weights + biases
    grads = [T.grad(cost, p) for p in stochastic_binary_weights + biases]
    updates = [(p, T.clip(p - lr * g, -1, 1)) for p, g in zip(params, grads)]

    # generate training and testing functions
    train_func = theano.function([X, y, lr], [cost], updates=updates)
    test_func = theano.function([X], [d_outs[-1]])
    grads_func = theano.function([X, y], grads)
    int_output_func = theano.function([X], s_outs + d_outs)

    return train_func, test_func, grads_func, weights + biases, int_output_func
Esempio n. 8
0
    def train(self):
        self.model.train()

        current_weights = get_weights(self.model)

        for idx, _ in enumerate(self.dataloaders_dict['train']):
            gradients = [
                worker.compute_gradients.remote(current_weights)
                for worker in self.workers
            ]
            current_weights = self.apply_gradients(*ray.get(gradients))
            print(f'Computed batch {idx}')
Esempio n. 9
0
    def apply_gradients(self, *gradients):
        summed_gradients = [
            np.stack(gradient_zip).sum(axis=0)
            for gradient_zip in zip(*gradients)
        ]
        self.optimizer.zero_grad()

        set_gradients(self.model, summed_gradients)

        self.optimizer.step()

        return get_weights(self.model)
Esempio n. 10
0
def make_kernel_odd(weights):

    kernels = utils.get_kernels(weights)

    new_kernels = np.ndarray(shape=(kernels.shape[0], kernels.shape[1],
                                    kernels.shape[2] + 1))

    for i in range(kernels.shape[0]):
        for j in range(kernels.shape[1]):

            new_kernels[i][j] = np.append(kernels[i][j], 0)

    return utils.get_weights(new_kernels)
Esempio n. 11
0
 def __init__(self, hidden_dim, q_dim, layer, config):
     super(BasicBlock, self).__init__()
     self.config = config
     self.layer = layer
     self.gnn_type = config.gnn.split(':')[0]
     if config.tok2ent == 'mean_max':
         input_dim = hidden_dim * 2
     else:
         input_dim = hidden_dim
     self.tok2ent = tok_to_ent(config.tok2ent)()
     self.query_weight = get_weights((q_dim, input_dim))
     self.temp = np.sqrt(q_dim * input_dim)
     self.gat = AttentionLayer(input_dim,
                               hidden_dim,
                               config.n_heads,
                               config,
                               layer_id=layer)
     self.int_layer = InteractionLayer(hidden_dim * 2, hidden_dim, config)
Esempio n. 12
0
    def fit(self, successes, trials, n_samples=1000, baseline=0.0, values=None, smoothing=1.0):
        '''
        Generate the weights for each arm based on bandit history.

        Parameters:
            successes (array): A 1 x n array with total successes for each arm
               trials (array): A 1 x n array with total trials for each arm
              n_samples (int): The number of samples to pull from each arm's distribution
                               for Thompson Sampling.
             baseline (float): The minimum weight to give each ar
               values (array): A 1 x n array with the reward value for each arm, or None
            smoothing (float): The constant factor by which to divide all trials and successes

        Updates
            self.weights (array): A 1 x n array with normalized weights for each arm
        '''

        self.values = utils.set_values(values, len(trials))
        self.samples = utils.get_samples(trials, successes, n_samples, smoothing, self.values)
        self._raw_weights = utils.get_weights(self.samples)
        self.weights = utils.normalize_weights(self._raw_weights, baseline)
Esempio n. 13
0
    def loadData(self):

        trainset = SimulationDataset(
            "train",
            transforms=transforms.Compose([
                utils.RandomCoose(['centre', 'left', 'right']),
                utils.Preprocess(self.input_shape),
                utils.RandomTranslate(100, 10),
                utils.RandomBrightness(),
                utils.RandomContrast(),
                utils.RandomHue(),
                utils.RandomHorizontalFlip(),
                utils.ToTensor(),
                utils.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]))

        weights = utils.get_weights(trainset)

        sampler = torch.utils.data.sampler.WeightedRandomSampler(
            weights, len(weights), replacement=True)

        # self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.cfg.batch_size, sampler=sampler, num_workers=4)

        self.trainloader = torch.utils.data.DataLoader(
            trainset, batch_size=self.cfg.batch_size, num_workers=4)

        testset = SimulationDataset("test",
                                    transforms=transforms.Compose([
                                        utils.RandomCoose(['center']),
                                        utils.Preprocess(self.input_shape),
                                        utils.ToTensor(),
                                        utils.Normalize([0.485, 0.456, 0.406],
                                                        [0.229, 0.224, 0.225])
                                    ]))

        self.testloader = torch.utils.data.DataLoader(
            testset,
            batch_size=self.cfg.batch_size,
            shuffle=False,
            num_workers=4)
    def run_generator(self, save=False):

        for df_train, df_test in tqdm(self.generator):
            if self.feature_subset is not None:
                df_train = df_train[self.feature_subset+self.cat_features+self.drop_columns+['target']]
                df_test = df_test[self.feature_subset+self.cat_features+self.drop_columns+['target']]
            if self.sample_weights_type is not None:
                self.sample_weights = get_weights(df_train, type=self.sample_weights_type)

            if self.cluster is not None:
                for c in self.cluster:
                    self.model = self.model.create(df_train[df_train.cluster == c], df_test[df_test.cluster == c],
                                     categorical_features=self.cat_features,
                                     drop_columns=self.drop_columns, isScope=self.isScope,
                                     sample_weights=None, evaluation=self.evaluation, name=self.name)

                    if not self.evaluation:
                        self.predictions = pd.concat([self.predictions, self.model.run()])

            else:
                self.model = self.model.create(df_train, df_test, categorical_features=self.cat_features,
                                 drop_columns=self.drop_columns, isScope=self.isScope,
                                 sample_weights=self.sample_weights, evaluation=self.evaluation, name=self.name, )

                if not self.evaluation:
                    self.predictions = pd.concat([self.predictions, self.model.run()])

            if self.evaluation:
                self.best_iterations.append(self.model.run())
        if save:
            self.save_predictions()

        if self.evaluation:
            print(self.best_iterations)

        return self.predictions
import utils
import yolo_opencv
import os
import argparse

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--weights_name", type=str, default="yolov3.weights")
    parser.add_argument(
        "--weights_url",
        type=str,
        default="https://pjreddie.com/media/files/yolov3.weights")
    parser.add_argument("--config_name", type=str, default="yolov3.cfg")
    parser.add_argument("--classes_file", type=str, default="yolov3.txt")
    args = parser.parse_args()

    utils.get_weights(args.weights_name, args.weights_url)

    print("Recognition started. Press any key to exit.")
    yolo_opencv.live_recognition(args.weights_name, args.config_name,
                                 args.classes_file)
Esempio n. 16
0
 def get_projections(self, window, location='all', result='all', scheme='constant'):
     """
     Insert projections into each Game: 
         proj_home_GF, proj_away_GF, proj_home_GA, proj_away_GA, proj_diff_score
         proj_diff_score = (proj_home_GF+proj_away_GA)/2 - (proj_away_GF+proj_home_GA)/2
         ---> GF=goals for; GA=goals against
     params: 
         window: int    | window size (number of games) for weighting/projections
       location: string | location of games to include in projections
                          'all', 'home', or 'away' (default='all')
         result: string | 'all', 'wins', 'losses', 'R', 'notR', 'OT', or 'SO'
         scheme: string | weighting scheme: default='constant'
                          options are 'constant' or 'linear'
     """
     # location must be all, home, or away
     assert location in ['all', 'home', 'away'], 'location='+str(location)
     
     # result must be 'all', 'wins', 'losses', 'R', 'notR', 'OT', or 'SO'
     assert result in ['all', 'wins', 'losses', 'R', 'notR', 'OT', 'SO'], 'result='+str(result)
     
     # scheme must be 'constant' or 'linear'
     assert scheme in ['constant', 'linear'], 'scheme='+str(scheme)
     
     # get the projection weights
     weights = get_weights(window, scheme=scheme)
     
     # loop over teams in Season
     for team in self.teams():
         
         # get TeamSeason object
         team_season = self.get_team_season(team)
         
         # get selection of only all games (prone to double counting)
         games = team_season.get_games(location='all', result=result)
         
         # loop over team's games
         for g in games:
             
             # date of the game
             date = g.date
             
             # if current team is home team
             if team == g.home:
                 
                 # get opponent's TeamSeason
                 team_season_opponent = self.get_team_season(g.away)
                 
                 # get goals lists for home and away teams
                 home_GF_list, home_GA_list =          team_season.get_goals_lists(window, location=location, result=result, before=date)
                 away_GF_list, away_GA_list = team_season_opponent.get_goals_lists(window, location=location, result=result, before=date)
             
             # if current team is away team
             if team == g.away:
                 
                 # get opponent's TeamSeason
                 team_season_opponent = self.get_team_season(g.home)
                 
                 # get goals lists for home and away teams
                 away_GF_list, away_GA_list =          team_season.get_goals_lists(window, location=location, result=result, before=date)
                 home_GF_list, home_GA_list = team_season_opponent.get_goals_lists(window, location=location, result=result, before=date)                    
             
             # make sure N prior games were available for both teams
             if home_GF_list and home_GA_list and away_GF_list and away_GA_list:
                 
                 # initialize projection variables
                 proj_home_GF, proj_home_GA, proj_away_GF, proj_away_GA = 0.0, 0.0, 0.0, 0.0
                 
                 # loop over window size
                 for i in range(window):
                     
                     # compute the projections
                     proj_home_GF += home_GF_list[i] * weights[i]
                     proj_home_GA += home_GA_list[i] * weights[i]
                     proj_away_GF += away_GF_list[i] * weights[i]
                     proj_away_GA += away_GA_list[i] * weights[i]
                     
                 # compute the projected score differential
                 proj_diff_score = (proj_home_GF+proj_away_GA)/2.0 - (proj_away_GF+proj_home_GA)/2.0
                 
                 # add projected data to the Game object
                 g.insert_projections(proj_home_GF, proj_home_GA, proj_away_GF, proj_away_GA, proj_diff_score)
Esempio n. 17
0
def main(settingsfname, verbose=False):

    settings = utils.get_settings(settingsfname)

    subjects = settings['SUBJECTS']

    data = utils.get_data(settings, verbose=verbose)

    metadata = utils.get_metadata()

    features_that_parsed = [feature for feature in
                            settings['FEATURES'] if feature in list(data.keys())]

    settings['FEATURES'] = features_that_parsed

    utils.print_verbose("=====Feature HDF5s parsed=====", flag=verbose)

    # get model
    model_pipe = utils.build_model_pipe(settings)

    utils.print_verbose("=== Model Used ===\n"
                        "{0}\n==================".format(model_pipe), flag=verbose)

    # dictionary to store results
    subject_predictions = {}

    accuracy_scores = {}

    for subject in subjects:
        utils.print_verbose(
            "=====Training {0} Model=====".format(str(subject)),
                            flag=verbose)

        # initialise the data assembler
        assembler = utils.DataAssembler(settings, data, metadata)
        X, y = assembler.test_train_discrimination(subject)

        # get the CV iterator
        cv = utils.sklearn.cross_validation.StratifiedShuffleSplit(
            y,
                               random_state=settings['R_SEED'],
                               n_iter=settings['CVITERCOUNT'])

        # initialise lists for cross-val results
        predictions = []
        labels = []
        allweights = []

        # run cross validation and report results
        for train, test in cv:

            # calculate the weights
            weights = utils.get_weights(y[train])
            # fit the model to the training data
            model_pipe.fit(X[train], y[train], clf__sample_weight=weights)
            # append new predictions
            predictions.append(model_pipe.predict(X[test]))
            # append test weights to store (why?) (used to calculate auc below)
            weights = utils.get_weights(y[test])
            allweights.append(weights)
            # store true labels
            labels.append(y[test])

        # stack up the results
        predictions = utils.np.hstack(predictions)
        labels = utils.np.hstack(labels)
        weights = utils.np.hstack(allweights)

        # calculate the total accuracy
        accuracy = utils.sklearn.metrics.accuracy_score(labels,
                                                        predictions,
                                                        sample_weight=weights)

        print("Accuracy score for {1}: {0:.3f}".format(accuracy, subject))

        # add AUC scores to a subj dict
        accuracy_scores.update({subject: accuracy})

        # store results from each subject
        subject_predictions[subject] = (predictions, labels, weights)

    # stack subject results (don't worrry about this line)
    predictions, labels, weights = map(utils.np.hstack,
                                       zip(*list(subject_predictions.values())))

    # calculate global accuracy
    accuracy = utils.sklearn.metrics.accuracy_score(labels, predictions,
                                                    sample_weight=weights)

    print(
        "predicted accuracy score over all subjects: {0:.2f}".format(accuracy))

    # output AUC scores to file
    accuracy_scores.update({'all': accuracy})

    settings['DISCRIMINATE'] = 'accuracy_scores.csv'
    # settings['AUC_SCORE_PATH'] = 'discriminate_scores'
    utils.output_auc_scores(accuracy_scores, settings)

    return accuracy_scores
Esempio n. 18
0
 def load(self, g_path, d_path):
     self.agent.load(g_path)
     self.g_beta.load(g_path)
     self.discriminator.load_weights(get_weights(d_path))
Esempio n. 19
0
for k in range(n_experiments):
    print("Iteration:", k)
    start = time.time()
    start_clock = time.clock()
    y = np.random.binomial(1, 0.5, n_points) * 2 - 1
    h_y = utils_autologistic.run_gibbs_wrapper(y.copy(),
                                               alpha_nce,
                                               beta_nce,
                                               neighbour_matrix,
                                               n_iter=n_gibbs,
                                               history=True)

    if useCube == 1 and useThinning == 0:
        constraints = utils_autologistic.create_constraints(
            h_y, alpha_nce, beta_nce, neighbour_matrix)
        weights = get_weights(constraints)
        omega = np.sum(np.abs(weights))
        print("SUM OF PROJECTED POINTS:", np.sum(weights))
        print("Constraints respected ?", np.dot(constraints.T, weights))
        constraints = np.hstack(
            [np.ones((constraints.shape[0], 1)), constraints])
        selected, signs = cube_method(constraints, weights, N_KEEP)
        print("Number of points kept:", np.sum(np.abs(selected)))
        print("Sum of points:", np.sum(signs))
        print("Proportion of positive weights:",
              len(signs[signs == 1]) / len(signs))
        points_weights = signs[np.abs(selected) == 1] * omega / N_KEEP
        h_y = h_y[np.abs(selected) == 1, :]
    else:
        points_weights = np.ones(len(h_y))
Esempio n. 20
0
        # record training history (starts at initial point)
        training_history.append([i, loss.item(), accuracy(out, y).item()])

        # take the step
        opt.step()

        if i % args.print_freq == 0:
            print(training_history[-1])

        if args.lr_schedule:
            scheduler.step(i)

        if i > args.iterations:
            STOP = True

        weights_history.append(get_weights(net))
        if len(weights_history) > 1000:
            weights_history.popleft()

        # clear cache
        torch.cuda.empty_cache()

        if STOP:
            assert len(weights_history) == 1000

            # final evaluation and saving results
            print('eval time {}'.format(i))
            te_hist, te_outputs, te_noise_norm = eval(test_loader_eval, net,
                                                      crit, opt, args)
            tr_hist, tr_outputs, tr_noise_norm = eval(train_loader_eval,
                                                      net,
Esempio n. 21
0
def collect_entropy_policies(env, epochs, T, MODEL_DIR=''):

    video_dir = 'videos/' + args.exp_name

    direct = os.getcwd() + '/data/'
    experiment_directory = direct + args.exp_name
    print(experiment_directory)

    print(sys.argv)
    if not os.path.exists(experiment_directory):
        os.makedirs(experiment_directory)
        f = open(experiment_directory + '/args', 'w')
        f.write(' '.join(sys.argv))
        f.flush()

    indexes = [1, 5, 10, 15]
    states_visited_indexes = [0, 5, 10, 15]

    states_visited_cumulative = []
    states_visited_cumulative_baseline = []

    running_avg_p = np.zeros(shape=(tuple(ant_utils.num_states)))
    running_avg_p_xy = np.zeros(shape=(tuple(ant_utils.num_states_2d)))
    running_avg_ent = 0
    running_avg_ent_xy = 0

    running_avg_p_baseline = np.zeros(shape=(tuple(ant_utils.num_states)))
    running_avg_p_baseline_xy = np.zeros(
        shape=(tuple(ant_utils.num_states_2d)))
    running_avg_ent_baseline = 0
    running_avg_ent_baseline_xy = 0

    pct_visited = []
    pct_visited_baseline = []
    pct_visited_xy = []
    pct_visited_xy_baseline = []

    running_avg_entropies = []
    running_avg_entropies_xy = []
    running_avg_ps_xy = []
    avg_ps_xy = []

    running_avg_entropies_baseline = []
    running_avg_entropies_baseline_xy = []
    running_avg_ps_baseline_xy = []
    avg_ps_baseline_xy = []

    policies = []
    distributions = []
    initial_state = init_state(env)

    prebuf = ExperienceBuffer()
    env.reset()
    for t in range(10000):
        action = env.action_space.sample()
        obs, reward, done, _ = env.step(action)
        prebuf.store(get_state(env, obs))
        if done:
            env.reset()
            done = False

    prebuf.normalize()
    normalization_factors = prebuf.normalization_factors
    utils.log_statement(normalization_factors)
    prebuf = None
    if not args.gaussian:
        normalization_factors = []

    reward_fn = np.zeros(shape=(tuple(ant_utils.num_states)))

    for i in range(epochs):
        utils.log_statement("*** ------- EPOCH %d ------- ***" % i)

        # clear initial state if applicable.
        if not args.initial_state:
            initial_state = []
        else:
            utils.log_statement(initial_state)
        utils.log_statement("max reward: " + str(np.max(reward_fn)))

        logger_kwargs = setup_logger_kwargs("model%02d" % i,
                                            data_dir=experiment_directory)

        # Learn policy that maximizes current reward function.
        print("Learning new oracle...")
        seed = random.randint(1, 100000)
        sac = AntSoftActorCritic(lambda: gym.make(args.env),
                                 reward_fn=reward_fn,
                                 xid=i + 1,
                                 seed=seed,
                                 gamma=args.gamma,
                                 ac_kwargs=dict(hidden_sizes=[args.hid] *
                                                args.l),
                                 logger_kwargs=logger_kwargs,
                                 normalization_factors=normalization_factors)

        # The first policy is random
        if i == 0:
            sac.soft_actor_critic(epochs=0)
        else:
            sac.soft_actor_critic(epochs=args.episodes,
                                  initial_state=initial_state,
                                  start_steps=args.start_steps)
        policies.append(sac)

        p, _ = sac.test_agent(T, normalization_factors=normalization_factors)
        distributions.append(p)
        weights = utils.get_weights(distributions)

        epoch = 'epoch_%02d' % (i)
        if args.render:
            if i < 10:
                sac.record(T=args.record_steps,
                           n=1,
                           video_dir=video_dir + '/baseline/' + epoch,
                           on_policy=False)
            sac.record(T=args.record_steps,
                       n=1,
                       video_dir=video_dir + '/entropy/' + epoch,
                       on_policy=True)

        # Execute the cumulative average policy thus far.
        # Estimate distribution and entropy.
        print("Executing mixed policy...")
        average_p, average_p_xy, initial_state, states_visited, states_visited_xy = \
            execute_average_policy(env, policies, T, weights,
                                   reward_fn=reward_fn, norm=normalization_factors,
                                   initial_state=initial_state, n=args.n,
                                   render=args.render, video_dir=video_dir+'/mixed/'+epoch, epoch=i,
                                   record_steps=args.record_steps)

        print("Calculating maxEnt entropy...")
        round_entropy = entropy(average_p.ravel())
        round_entropy_xy = entropy(average_p_xy.ravel())

        # Update running averages for maxEnt.
        print("Updating maxEnt running averages...")
        running_avg_ent = running_avg_ent * (
            i) / float(i + 1) + round_entropy / float(i + 1)
        running_avg_ent_xy = running_avg_ent_xy * (
            i) / float(i + 1) + round_entropy_xy / float(i + 1)
        running_avg_p *= (i) / float(i + 1)
        running_avg_p += average_p / float(i + 1)
        running_avg_p_xy *= (i) / float(i + 1)
        running_avg_p_xy += average_p_xy / float(i + 1)

        # update reward function
        print("Update reward function")
        eps = 1 / np.sqrt(ant_utils.total_state_space)
        if args.cumulative:
            reward_fn = grad_ent(running_avg_p)
        else:
            reward_fn = 1.
            average_p += eps
            reward_fn /= average_p
        average_p = None  # delete big array

        # (save for plotting)
        running_avg_entropies.append(running_avg_ent)
        running_avg_entropies_xy.append(running_avg_ent_xy)
        if i in indexes:
            running_avg_ps_xy.append(np.copy(running_avg_p_xy))
            avg_ps_xy.append(np.copy(average_p_xy))

        print("Collecting baseline experience....")
        p_baseline, p_baseline_xy, states_visited_baseline, states_visited_xy_baseline = sac.test_agent_random(
            T, normalization_factors=normalization_factors, n=args.n)

        plotting.states_visited_over_time(states_visited,
                                          states_visited_baseline, i)
        plotting.states_visited_over_time(states_visited_xy,
                                          states_visited_xy_baseline,
                                          i,
                                          ext='_xy')

        # save for cumulative plot.
        if i in states_visited_indexes:
            # average over a whole bunch of rollouts
            # slow: so only do this when needed.
            print("Averaging unique xy states visited....")
            states_visited_xy = compute_states_visited_xy(
                env,
                policies,
                norm=normalization_factors,
                T=T,
                n=args.n,
                N=args.avg_N)
            states_visited_xy_baseline = compute_states_visited_xy(
                env,
                policies,
                norm=normalization_factors,
                T=T,
                n=args.n,
                N=args.avg_N,
                initial_state=initial_state,
                baseline=True)
            states_visited_cumulative.append(states_visited_xy)
            states_visited_cumulative_baseline.append(
                states_visited_xy_baseline)

        print("Compute baseline entropy....")
        round_entropy_baseline = entropy(p_baseline.ravel())
        round_entropy_baseline_xy = entropy(p_baseline_xy.ravel())

        # Update baseline running averages.
        print("Updating baseline running averages...")
        running_avg_ent_baseline = running_avg_ent_baseline * (
            i) / float(i + 1) + round_entropy_baseline / float(i + 1)
        running_avg_ent_baseline_xy = running_avg_ent_baseline_xy * (
            i) / float(i + 1) + round_entropy_baseline_xy / float(i + 1)

        running_avg_p_baseline *= (i) / float(i + 1)
        running_avg_p_baseline += p_baseline / float(i + 1)
        running_avg_p_baseline_xy *= (i) / float(i + 1)
        running_avg_p_baseline_xy += p_baseline_xy / float(i + 1)

        p_baseline = None

        # (save for plotting)
        running_avg_entropies_baseline.append(running_avg_ent_baseline)
        running_avg_entropies_baseline_xy.append(running_avg_ent_baseline_xy)
        if i in indexes:
            running_avg_ps_baseline_xy.append(
                np.copy(running_avg_p_baseline_xy))
            avg_ps_baseline_xy.append(np.copy(p_baseline_xy))

        utils.log_statement(average_p_xy)
        utils.log_statement(p_baseline_xy)

        # Calculate percent of state space visited.
        pct = np.count_nonzero(running_avg_p) / float(running_avg_p.size)
        pct_visited.append(pct)
        pct_xy = np.count_nonzero(running_avg_p_xy) / float(
            running_avg_p_xy.size)
        pct_visited_xy.append(pct_xy)

        pct_baseline = np.count_nonzero(running_avg_p_baseline) / float(
            running_avg_p_baseline.size)
        pct_visited_baseline.append(pct_baseline)
        pct_xy_baseline = np.count_nonzero(running_avg_p_baseline_xy) / float(
            running_avg_p_baseline_xy.size)
        pct_visited_xy_baseline.append(pct_xy_baseline)

        # Print round summary.
        col_headers = ["", "baseline", "maxEnt"]
        col1 = [
            "round_entropy_xy", "running_avg_ent_xy", "round_entropy",
            "running_avg_ent", "% state space xy", "% total state space"
        ]
        col2 = [
            round_entropy_baseline_xy, running_avg_ent_baseline_xy,
            round_entropy_baseline, running_avg_ent_baseline, pct_xy_baseline,
            pct_baseline
        ]
        col3 = [
            round_entropy_xy, running_avg_ent_xy, round_entropy,
            running_avg_ent, pct_xy, pct
        ]
        table = tabulate(np.transpose([col1, col2, col3]),
                         col_headers,
                         tablefmt="fancy_grid",
                         floatfmt=".4f")
        utils.log_statement(table)

        # Plot from round.
        plotting.heatmap(running_avg_p_xy, average_p_xy, i)
        plotting.heatmap1(running_avg_p_baseline_xy, i)

        if i == states_visited_indexes[3]:
            plotting.states_visited_over_time_multi(
                states_visited_cumulative, states_visited_cumulative_baseline,
                states_visited_indexes)

    # save final expert weights to use with the trained oracles.
    weights_file = experiment_directory + '/policy_weights'
    np.save(weights_file, weights)

    # cumulative plots.
    plotting.running_average_entropy(running_avg_entropies,
                                     running_avg_entropies_baseline)
    plotting.running_average_entropy(running_avg_entropies_xy,
                                     running_avg_entropies_baseline_xy,
                                     ext='_xy')

    plotting.heatmap4(running_avg_ps_xy,
                      running_avg_ps_baseline_xy,
                      indexes,
                      ext="cumulative")
    plotting.heatmap4(avg_ps_xy, avg_ps_baseline_xy, indexes, ext="epoch")

    plotting.percent_state_space_reached(pct_visited,
                                         pct_visited_baseline,
                                         ext='_total')
    plotting.percent_state_space_reached(pct_visited_xy,
                                         pct_visited_xy_baseline,
                                         ext="_xy")

    return policies
Esempio n. 22
0
def upscale(method: str, old_model_name: str, avg_pool_unaffected=True):

    old_model = utils.load_model('Models/{}.yaml'.format(old_model_name),
                                 'Models/{}.h5'.format(old_model_name))

    new_model = models.Sequential()

    first_layer = True
    for layer in old_model.layers:

        if type(layer) is keras.layers.convolutional.Conv1D:

            biases = layer.get_weights()[1]
            old_kernels = utils.get_kernels(layer.get_weights()[0])
            nodes = layer.kernel.shape[2].value

            if method == 'nearest_neighbor':

                new_kernels = nearest_neighbor(old_kernels)
                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(4 * 24000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'linear':

                new_kernels = linear(old_kernels)

                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(4 * 24000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'distance_weighting':

                new_kernels = distance_weighting(old_kernels)
                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(4 * 24000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'same':

                new_weights = layer.get_weights()

                if first_layer:
                    new_layer = layers.Conv1D(
                        nodes,
                        kernel_size=layer.kernel.shape[0].value,
                        activation=layer.activation,
                        input_shape=(4 * 24000, 1),
                        padding='same',
                        weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(
                        nodes,
                        kernel_size=layer.kernel.shape[0].value,
                        activation=layer.activation,
                        padding='same',
                        weights=new_weights)

                new_model.add(new_layer)

            elif method == 'dilate':

                new_kernels = dilate_kernels(old_kernels)
                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(4 * 24000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'nearest_directly':

                new_kernels = nearest_directly(old_kernels)
                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(4 * 24000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'linear_directly':

                new_kernels = linear_directly(old_kernels)
                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(4 * 24000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'inverse_directly':

                new_kernels = inverse_directly(old_kernels)
                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(4 * 24000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

        elif type(layer) is keras.layers.pooling.MaxPooling1D:

            pool_size = layer.pool_size[0]
            new_model.add(layers.MaxPooling1D(pool_size=pool_size))

        elif type(layer) is keras.layers.pooling.AveragePooling1D:

            if avg_pool_unaffected is True:

                pool_size = layer.pool_size[0]
                new_model.add(layers.AveragePooling1D(pool_size=pool_size))

            else:

                if method == 'dilate':
                    new_kernels = scale_avg_pooling(nodes,
                                                    [1 / 2, 0, 1 / 2, 0])

                elif method == 'nearest_directly':
                    new_kernels = scale_avg_pooling(
                        nodes, [1 / 2, 1 / 2, 1 / 2, 1 / 2])

                elif method == 'linear_directly':
                    new_kernels = scale_avg_pooling(
                        nodes, [1 / 2, 1 / 2, 1 / 2, 1 / 4])

                elif method == 'inverse_directly':
                    new_kernels = scale_avg_pooling(
                        nodes, [1 / 2, 1 / 2, 1 / 2, 1 / 2])

                dummy_bias = np.zeros(nodes)
                new_weights = [utils.get_weights(new_kernels), dummy_bias]

                new_layer = layers.Conv1D(nodes,
                                          kernel_size=new_kernels.shape[-1],
                                          activation='linear',
                                          padding='same',
                                          strides=2,
                                          weights=new_weights)
                new_model.add(new_layer)

        elif type(layer) is keras.layers.Flatten:

            f_dim = layer.input_shape
            new_model.add(layers.Flatten())

            # if method != 'same':
            #     new_model.add(layers.Flatten())

        elif type(layer) is keras.layers.Dense:

            original_shape = layer.get_weights()[0].shape
            output_dim = layer.get_weights()[1].shape[0]
            shape = (f_dim[1], f_dim[2], output_dim)
            weights, biases = layer.get_weights()

            old_conv_weights = weights.reshape(shape)

            old_kernels = utils.get_kernels(old_conv_weights)

            if method == 'nearest_neighbor':

                new_kernels = nearest_neighbor(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] * 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'linear':
                new_kernels = linear(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(original_shape[0] * 2,
                                             output_dim), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'distance_weighting':

                new_kernels = distance_weighting(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] * 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'same':

                new_kernels = np.concatenate((old_kernels, old_kernels),
                                             axis=2)
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] * 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

                # output_dim = layer.get_weights()[1].shape[0]
                #
                # shape = (f_dim[1], f_dim[2], output_dim)
                # new_weights = weights.reshape(shape)
                # new_layer = layers.Conv1D(output_dim,
                #                           f_dim[1],
                #                           strides=1,
                #                           activation=layer.activation,
                #                           padding='valid',
                #                           weights=[new_weights, biases])
                #
                # new_model.add(new_layer)
                #
                # new_model.add(layers.Lambda(lambda x: K.batch_flatten(x)))

            elif method == 'dilate':

                new_kernels = dilate_kernels(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] * 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'nearest_directly':

                new_kernels = nearest_directly(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] * 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'linear_directly':

                new_kernels = linear_directly(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] * 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'inverse_directly':

                new_kernels = inverse_directly(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] * 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

    return new_model
Esempio n. 23
0
    def run_async(self):
        overall_start_time = time.time()

        current_weights = get_weights(self.model)

        updates = len(self.dataloaders_dict['train']) * len(self.workers)
        for epoch in range(self.args.num_epochs):
            gradients = {}
            for worker in self.workers:
                gradients[worker.compute_gradients.remote(
                    current_weights)] = worker

            batches_processed_by_worker = {
                worker_id: 0
                for worker_id in range(self.args.num_workers)
            }
            start_time = time.time()

            for iteration in range(updates):
                ready_gradient_list, rest = ray.wait(list(gradients))
                if len(ready_gradient_list) == 0:
                    print(f'wait failed {ready_gradient_list}, {rest}')
                ready_gradient_id = ready_gradient_list[0]
                worker = gradients.pop(ready_gradient_id)
                worker_rank = ray.get(worker.get_rank.remote())
                batches_processed_by_worker[worker_rank] += 1
                self.model.train()
                current_weights = self.apply_gradients(
                    *[ray.get(ready_gradient_id)])

                if batches_processed_by_worker[worker_rank] <= len(
                        self.dataloaders_dict['train']):
                    gradients[worker.compute_gradients.remote(
                        current_weights)] = worker

            end_time = time.time()
            epoch_mins, epoch_secs = self.epoch_time(start_time, end_time)

            valid_loss, valid_acc = self.evaluate()

            print(
                f'Finished epoch {epoch+1:02} in {epoch_mins} min {epoch_secs} s'
            )
            print(
                f'\t Val. Loss: {valid_loss:.3f} |  Val. Acc: {valid_acc*100:.2f}%'
            )

        overall_end_time = time.time()
        print(
            f'Final Val. Loss: {valid_loss:.3f} |  Val. Acc: {valid_acc*100:.2f}%'
        )
        print('took overall',
              self.epoch_time(overall_start_time, overall_end_time))

        with open(self.args.model_name + f"_{self.args.num_workers}_pis",
                  "w") as out:
            out.write(
                f"Valid Acc.: {valid_acc*100}\n" +
                f"Took overall: {self.epoch_time(overall_start_time, overall_end_time)}"
            )
            out.close()

        return 1
Esempio n. 24
0
tf.reset_default_graph()

# inputs
X = tf.placeholder(tf.float32, shape=[None, pixels], name='X')
Y = tf.placeholder(tf.float32, shape=[None, y_nodes], name='Y')

# x dropout
x_drop = tf.placeholder(tf.float32, name='x_drop')

# learning rates
ae_lr = tf.placeholder(tf.float32, name='ae_lr')
dg_lr = tf.placeholder(tf.float32, name='dg_lr')
ss_lr = tf.placeholder(tf.float32, name='ss_lr')

# autoencoder weights
e_weights, e_list = xx.get_weights(e_units, 'e', xx.linear_weights)
y_weights, y_list = xx.get_weights(y_units, 'y', xx.linear_weights)
z_weights, z_list = xx.get_weights(z_units, 'z', xx.linear_weights)
s_weights, s_list = xx.get_weights(s_units, 's', xx.linear_weights)
d_weights, d_list = xx.get_weights(d_units, 'd', xx.linear_weights)

# discriminator weights
dy_weights, dy_list = xx.get_weights(dy_units, 'dy', xx.linear_weights)
dz_weights, dz_list = xx.get_weights(dz_units, 'dz', xx.linear_weights)

# variable lists
enc_y_list = e_list + y_list
enc_z_list = e_list + z_list + s_list
enc_list = e_list + y_list + z_list + s_list
dec_list = d_list
ae_list = enc_list + dec_list
Esempio n. 25
0
def main(_):
    MAX_TRAIN_EPOCHS=20000

    FLAGS = tf.compat.v1.app.flags.FLAGS.flag_values_dict()
    from utils import preprocess_flags
    FLAGS = preprocess_flags(FLAGS)
    globals().update(FLAGS)
    if doing_regression:
        assert loss == "mse"
    global threshold

    if using_mpi:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        size = comm.Get_size()
    else:
        rank=0
        size=1
    num_tasks_per_job = number_inits//size
    tasks = list(range(int(rank*num_tasks_per_job),int((rank+1)*num_tasks_per_job)))

    if rank < number_inits%size:
        tasks.append(size*num_tasks_per_job+rank)

    import os
    print(rank)
    if n_gpus>0:
        os.environ["CUDA_VISIBLE_DEVICES"]=str(rank%n_gpus)

    from tensorflow import keras

    def binary_accuracy_for_mse(y_true,y_pred):
        if zero_one:
            return keras.backend.mean(tf.cast(tf.equal(tf.cast(y_pred>0.5,tf.float32),y_true), tf.float32))
        else:
            return keras.backend.mean(tf.cast(tf.equal(tf.math.sign(y_pred),y_true), tf.float32))

    print(tf.__version__)
    if loss=="mse":
        callbacks = [EarlyStoppingByAccuracy(monitor='val_binary_accuracy_for_mse', value=acc_threshold, verbose=0, wait_epochs=epochs_after_fit)]
        if doing_regression:
            callbacks = [EarlyStoppingByLoss(monitor='val_loss', value=1e-2, verbose=0, wait_epochs=epochs_after_fit)]
    else:
        #if tf.__version__[:3] == "2.1":
        if tf.__version__[0] == "2":
            print("hi im tf 2")
            callbacks = [EarlyStoppingByAccuracy(monitor='val_accuracy', value=acc_threshold, verbose=0, wait_epochs=epochs_after_fit)]
        else:
            callbacks = [EarlyStoppingByAccuracy(monitor='val_acc', value=acc_threshold, verbose=0, wait_epochs=epochs_after_fit)]

    # callbacks += [EarlyStopping(monitor='val_loss', patience=2, verbose=0),
    #               ModelCheckpoint(kfold_weights_path, monitor='val_loss', save_best_only=True, verbose=0),
    #              ]

    '''LOAD DATA & ARCHITECTURE'''

    from utils import load_data,load_model,load_kernel
    train_images,_,ys,test_images,test_ys = load_data(FLAGS)
    print("max val", train_images.max())
    print("ys", ys)
    input_dim = train_images.shape[1]
    num_channels = train_images.shape[-1]
    print(train_images.shape, ys.shape)

    sample_weights = None
    if gamma != 1.0:
        sample_weights = np.ones(len(ys))
        if not oversampling2:
            sample_weights[m:] = gamma
        else:
            raise NotImplementedError("Gamma not equal to 1.0 with oversampling2 not implemented")

    model = load_model(FLAGS)

    set_session = tf.compat.v1.keras.backend.set_session

    config = tf.compat.v1.ConfigProto()
    config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    config.log_device_placement = False  # to log device placement (on which device the operation ran)
    sess = tf.compat.v1.Session(config=config)
    set_session(sess)  # set this TensorFlow session as the default session for Keras

    '''TRAINING LOOP'''
    #things to keep track off
    #functions = []
    test_accs = 0
    test_accs_squared = 0
    test_sensitivities = 0
    test_specificities = 0
    train_accs = 0
    train_accs_squared = 0
    weightss = None
    biasess = None
    weightss_squared = None
    biasess_squared = None
    weights_norms = 0
    biases_norms = 0
    weights_norms_squared = 0
    biases_norms_squared = 0
    iterss = 0
    funs_filename = results_folder+prefix+"_"+str(rank)+"_nn_train_functions.txt"

    print("Training NN with",loss,"and optimizer",optimizer)
    if optimizer == "langevin":
        optim = tfp.optimizer.StochasticGradientLangevinDynamics(learning_rate=0.01)
    elif optimizer == "sgd":
        #optim = keras.optimizers.SGD(lr=learning_rate)
        optim = keras.optimizers.SGD(lr=0.001,momentum=0.9,decay=1e-6)
    elif optimizer == "adam":
        optim = keras.optimizers.Adam(lr=learning_rate)
    else:
        optim = optimizer

    def get_metrics():
        if doing_regression:
            #return [keras.losses.mean_squared_error]
            return []
        elif loss=="mse":
            return [binary_accuracy_for_mse]
        else:
            return ['accuracy']

    print(loss)
    model.compile(optim,
                  loss=binary_crossentropy_from_logits if loss=="ce" else loss,
                  metrics=get_metrics())
                  #metrics=['accuracy',sensitivity])
                  #metrics=['accuracy',tf.keras.metrics.SensitivityAtSpecificity(0.99),\
                            #tf.keras.metrics.FalsePositives()])

    from initialization import get_all_layers, is_normalization_layer, reset_weights, simple_reset_weights
    if network not in ["cnn", "fc"]:
        layers = get_all_layers(model)
        are_norm = [is_normalization_layer(l) for l in layers for w in l.get_weights()]
        initial_weights = model.get_weights()

    local_index = 0
    for init in tasks:
        funs_file = open(funs_filename,"a")
        #print(init)
        #
        #TODO: move to a different file, as this is repeated in GP_train..
        ##if the labels are to be generated by a neural network in parallel
        if nn_random_labels or nn_random_regression_outputs:
            if local_index>0:
                if network in ["cnn", "fc"]:
                    simple_reset_weights(model, sigmaw, sigmab)
                else:
                    reset_weights(model, initial_weights, are_norm, sigmaw, sigmab, truncated_init_dist)
            if nn_random_labels:
                ys = model.predict(train_images)[:,0]>0
                if training:
                    test_ys = model.predict(test_images)[:,0]>0
            else:
                ys = model.predict(train_images)[:,0]
                if training:
                    test_ys = model.predict(test_images)[:,0]
        ##
        if local_index>0 or nn_random_labels or nn_random_regression_outputs:
            if network in ["cnn", "fc"]:
                simple_reset_weights(model, sigmaw, sigmab)
            else:
                reset_weights(model, initial_weights, are_norm, sigmaw, sigmab)

        local_index+=1

        ##this reinitalizes the net
        #model = load_model(FLAGS)
        #model.compile(optim,
        #              loss=binary_crossentropy_from_logits if loss=="ce" else loss,
        #              metrics=get_metrics())

        weights, biases = get_weights(model), get_biases(model)
        weights_norm, biases_norm = measure_sigmas(model)
        #print(weights_norm,biases_norm)

        #batch_size = min(batch_size, m)
        if train_one_epoch:
            model.fit(train_images.astype(np.float32), ys.astype(np.float32), verbose=1,\
                sample_weight=sample_weights, validation_data=(train_images.astype(np.float32), ys.astype(np.float32)), epochs=1, batch_size=min(m,batch_size))
            sys.stdout.flush()
        else:
            model.fit(train_images.astype(np.float32), ys.astype(np.float32), verbose=1,\
                sample_weight=sample_weights, validation_data=(train_images.astype(np.float32), ys.astype(np.float32)), epochs=MAX_TRAIN_EPOCHS,callbacks=callbacks, batch_size=min(m,batch_size))
            sys.stdout.flush()

        '''GET DATA: weights, and errors'''
        weights, biases = get_rescaled_weights(model)
        weights_norm, biases_norm = measure_sigmas(model) #TODO: make sure it works with archs with norm layers etc
        #print(weights_norm,biases_norm)

        if not doing_regression: # classification
            train_loss, train_acc = model.evaluate(train_images.astype(np.float32), ys.astype(np.float32), verbose=0)
            test_loss, test_acc = model.evaluate(test_images.astype(np.float32), test_ys.astype(np.float32), verbose=0)
        else:
            train_acc = train_loss = model.evaluate(train_images.astype(np.float32), ys, verbose=0)
            test_acc = test_loss = model.evaluate(test_images.astype(np.float32), test_ys, verbose=0)
        preds = model.predict(test_images)[:,0]
        # print(preds)
        # print(preds.shape)
        # test_false_positive_rate = test_fps/(len([x for x in test_ys if x==1]))
        def sigmoid(x):
            return np.exp(x)/(1+np.exp(x))

        #for th in np.linspace(0,1,1000):
        if loss=="mse":
            #NOTE: sensitivity and specificity are not implemented for MSE loss
            test_sensitivity = -1
            test_specificity = -1
        else:
            #print("threshold", threshold)
            #TODO: this is ugly, I should just add a flag that allows to say whether we are doing threshold selection or not!!
            if threshold != -1:
                for th in np.linspace(0,1,1000):
                    test_specificity = sum([(sigmoid(preds[i])>th)==x for i,x in enumerate(test_ys[:100]) if x==0])/(len([x for x in test_ys[:100] if x==0]))
                    if test_specificity>0.99:
                        num_0s = len([x for x in test_ys if x==0])
                        if num_0s > 0:
                            test_specificity = sum([(sigmoid(preds[i])>th)==x for i,x in enumerate(test_ys) if x==0])/(num_0s)
                        else:
                            test_specificity = -1
                        if test_specificity>0.99:
                            num_1s = len([x for x in test_ys if x==1])
                            if num_1s > 0:
                                test_sensitivity = sum([(sigmoid(preds[i])>th)==x for i,x in enumerate(test_ys) if x==1])/(num_1s)
                            else:
                                test_sensitivity = -1
                            break
            else:
                # for th in np.linspace(0,1,5): # low number of thresholds as I'm not exploring unbalanced datasets right now
                #     test_specificity = sum([(sigmoid(preds[i])>th)==x for i,x in enumerate(test_ys) if x==0])/(len([x for x in test_ys if x==0]))
                #     if test_specificity>0.99:
                #         test_sensitivity = sum([(sigmoid(preds[i])>th)==x for i,x in enumerate(test_ys) if x==1])/(len([x for x in test_ys if x==1]))
                #         break
                test_specificity = -1
                test_sensitivity = -1
        #print("Training accuracy", train_acc)
        #print('Test accuracy:', test_acc)
        #print('Test sensitivity:', test_sensitivity)
        #print('Test specificity:', test_specificity)

        if not ignore_non_fit or train_acc >= acc_threshold:
            #print("printing function to file", funs_filename)
            function = (model.predict(test_images[:test_function_size].astype(np.float32), verbose=0))[:,0]
            if loss=="mse" and zero_one:
                function = function>0.5
            else:
                function = function>0
            function=function.astype(int)
            function = ''.join([str(int(i)) for i in function])
            funs_file.write(function+"\r\n")
            funs_file.close()
            #functions.append(function)
            test_accs += test_acc
            test_accs_squared += test_acc**2
            test_sensitivities += test_sensitivity
            test_specificities += test_specificity
            train_accs += train_acc
            train_accs_squared += train_acc**2
            if weightss is None:
                weightss = weights
                biasess = biases
                weightss_squared = weights**2
                biasess_squared = biases**2
            else:
                weightss += weights
                biasess += biases
                weightss_squared += weights**2
                biasess_squared += biases**2
            weights_norms += weights_norm
            weights_norms_squared += weights_norm**2
            biases_norms += biases_norm
            biases_norms_squared += biases_norm**2
            iterss += model.history.epoch[-1]
        #keras.backend.clear_session()
        gc.collect()

    #print("Print functions to file")
    #with open(,"a") as file:
    #    file.write("\r\n".join(functions))
    #    file.write("\r\n")

    # functions = comm.gather(functions, root=0)
    if rank == 0:
        #test_accs_recv = np.empty([size,1],dtype=np.float32)
        #test_accs_squared_recv = np.empty([size,1],dtype=np.float32)
        #test_sensitivities_recv = np.empty([size,1],dtype=np.float32)
        #test_specificities_recv = np.empty([size,1],dtype=np.float32)
        #train_accs_recv = np.empty([size,1],dtype=np.float32)
        #train_accs_squared_recv = np.empty([size,1],dtype=np.float32)

        weights_shape = weightss.flatten().shape[0]
        biases_shape = biasess.flatten().shape[0]
        weightss_recv = np.zeros(weights_shape, dtype=np.float32)
        biasess_recv = np.zeros(biases_shape, dtype=np.float32)
        weightss_squared_recv = np.zeros(weights_shape, dtype=np.float32)
        biasess_squared_recv = np.zeros(biases_shape, dtype=np.float32)
        #weights_norms_recv = np.empty([size,1],dtype=np.float32)
        #weights_norms_squared_recv = np.empty([size,1],dtype=np.float32)
        #biases_norms_recv = np.empty([size,1],dtype=np.float32)
        #biases_norms_squared_recv = np.empty([size,1],dtype=np.float32)
        #iterss_recv = np.empty([size,1],dtype='i')

    else:
        #test_accs_recv = None
        #test_accs_squared_recv = None
        #test_sensitivities_recv = None
        #test_specificities_recv = None
        #train_accs_recv = None
        #train_accs_squared_recv = None

        weightss_recv = None
        weightss_squared_recv = None
        biasess_recv = None
        biasess_squared_recv = None
        #weights_norms_recv = None
        #weights_norms_squared_recv = None
        #biases_norms_recv = None
        #biases_norms_squared_recv = None
        #iterss_recv = None

    if using_mpi:
        test_accs_recv = comm.reduce(test_accs, root=0)
        test_accs_squared_recv = comm.reduce(test_accs_squared, root=0)
        test_sensitivities_recv = comm.reduce(test_sensitivities, root=0)
        test_specificities_recv = comm.reduce(test_specificities, root=0)
        train_accs_recv = comm.reduce(train_accs, root=0)
        train_accs_squared_recv = comm.reduce(train_accs_squared, root=0)

        comm.Reduce(weightss.flatten(), weightss_recv, root=0)
        comm.Reduce(biasess.flatten(), biasess_recv, root=0)
        comm.Reduce(weightss_squared.flatten(), weightss_squared_recv, root=0)
        comm.Reduce(biasess_squared.flatten(), biasess_squared_recv, root=0)
        weights_norms_recv = comm.reduce(weights_norms, root=0)
        weights_norms_squared_recv = comm.reduce(weights_norms_squared, root=0)
        biases_norms_recv = comm.reduce(biases_norms, root=0)
        biases_norms_squared_recv = comm.reduce(biases_norms_squared, root=0)
        iterss_recv = comm.reduce(iterss, root=0)
    else:
        test_accs_recv = test_accs
        test_accs_squared_recv = test_accs_squared
        test_sensitivities_recv = test_sensitivities
        test_specificities_recv = test_specificities
        train_accs_recv = train_accs
        train_accs_squared_recv = train_accs_squared

        weightss_recv=weightss.flatten()
        biasess_recv=biasess.flatten()
        weightss_squared_recv=weightss_squared.flatten()
        biasess_squared_recv=biasess_squared.flatten()
        weights_norms_recv = weights_norms
        weights_norms_squared_recv = weights_norms_squared
        biases_norms_recv = biases_norms
        biases_norms_squared_recv = biases_norms_squared
        iterss_recv = iterss

    '''PROCESS COLLECTIVE DATA'''
    if rank == 0:
        #weightss = np.stack(sum(weightss,[]))
        #weights_norms = sum(weights_norms,[])
        #biasess = np.stack(sum(biasess,[]))
        weights_mean = np.mean(weightss_recv)/number_inits #average over dimension indexing which weight it is (we've already reduced over the number_inits dimension)
        biases_mean = np.mean(biasess_recv)/number_inits
        weights_std = np.mean(weightss_squared_recv)/number_inits - weights_mean**2
        biases_std = np.mean(biasess_squared_recv)/number_inits - biases_mean**2
        weights_norm_mean = weights_norms_recv/number_inits
        weights_norm_std = weights_norms_squared_recv/number_inits - weights_norm_mean**2
        biases_norm_mean = biases_norms_recv/number_inits
        biases_norm_std = biases_norms_squared_recv/number_inits - biases_norm_mean**2

        # functions = sum(functions,[])
        test_acc = test_accs_recv/number_inits
        test_sensitivity = test_sensitivities_recv/number_inits
        test_specificity = test_specificities_recv/number_inits
        train_acc = train_accs_recv/number_inits
        print('Mean test accuracy:', test_acc)
        print('Mean test sensitivity:', test_sensitivity)
        print('Mean test specificity:', test_specificity)
        print('Mean train accuracy:', train_acc)
        test_acc = test_accs_recv/number_inits
        train_acc = train_accs_recv/number_inits
        train_acc_std = train_accs_squared_recv/number_inits - train_acc**2
        test_acc_std = test_accs_squared_recv/number_inits - test_acc**2
        mean_iters = 1.0*iterss_recv/number_inits

        useful_train_flags = ["dataset", "m", "network", "loss", "optimizer", "pooling", "epochs_after_fit", "ignore_non_fit", "test_function_size", "batch_size", "number_layers", "sigmaw", "sigmab", "init_dist","use_shifted_init","shifted_init_shift","whitening", "centering", "oversampling", "oversampling2", "channel_normalization", "training", "binarized", "confusion","filter_sizes", "gamma", "intermediate_pooling", "label_corruption", "threshold", "n_gpus", "n_samples_repeats", "layer_widths", "number_inits", "padding"]
        with open(results_folder+prefix+"nn_training_results.txt","a") as file:
            file.write("#")
            for key in sorted(useful_train_flags):
                file.write("{}\t".format(key))
            file.write("\t".join(["train_acc", "test_error", "test_acc","test_sensitivity","test_specificity","weights_std","biases_std","weights_mean", "biases_mean", "weights_norm_mean","weights_norm_std","biases_norm_mean","biases_norm_std","mean_iters","train_acc_std","test_acc_std"]))
            file.write("\n")
            for key in sorted(useful_train_flags):
                file.write("{}\t".format(FLAGS[key]))
            file.write("{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:d}\t{:.4f}\t{:.4f}\n".format(train_acc, 1-test_acc,test_acc,\
                test_sensitivity,test_specificity,weights_std,biases_std,\
                weights_mean,biases_mean,weights_norm_mean,weights_norm_std,biases_norm_mean,biases_norm_std,int(mean_iters),train_acc_std,test_acc_std)) #normalized to sqrt(input_dim)
def runLinearSVM(count_data_zScore_df,
                 sample_metadata_df,
                 numSimulations,
                 training_size_percent,
                 MODEL_LOC,
                 MODEL_STATS_LOC,
                 save=False):

    simulation_iterations = numSimulations

    sim_weights_all_df = pd.DataFrame()
    all_weights_ens = []
    all_results_ens = []

    virus_control_labels = one_hot_encode(count_data_zScore_df,
                                          sample_metadata_df)
    # loading model from pickle file
    with open(MODEL_LOC + '.pkl', 'rb') as model_pickle:
        model = pickle.load(model_pickle)

    for sim in tqdm(range(simulation_iterations)):
        X_train, X_test, y_train, y_test = train_test_split(
            count_data_zScore_df,
            virus_control_labels,
            train_size=training_size_percent,
            stratify=virus_control_labels,
            random_state=sim)

        # predict using SVM
        all_results, perm_import = pred_SVM(X_train, X_test, y_train, y_test,
                                            model)
        all_results['seed'] = sim
        all_results_ens.append(all_results)

        # feature weights
        sim_weights_df = pd.DataFrame(
            [model.named_steps["clf"].coef_[0]],
            columns=count_data_zScore_df.columns[
                model.named_steps["fs"].get_support()])
        # concatenate feature weights
        sim_weights_all_df = pd.concat([sim_weights_all_df, sim_weights_df],
                                       ignore_index=True)

        # grab feature weights
        all_weights = feat_ML(X_train, y_train, model)
        if all_weights is not None:
            all_weights['seed'] = sim
            all_weights_ens.append(all_weights)

    # concatenating feature weights from all simulations and calculating summary statistics
    all_results_ens = pd.concat(all_results_ens, sort=False).set_index('seed')
    all_results_sum = summary_stats(all_results_ens)

    # exporting feature weights and other summary stats from simulations
    if all_weights_ens:
        all_weights_ens = pd.concat(all_weights_ens,
                                    sort=False).set_index('seed')
        #Normalize weights per row to get relative importances
        all_weights_ens = get_weights(all_weights_ens)
        all_weights_ens.to_csv(MODEL_STATS_LOC + '_all_weights.csv')

    if save:
        all_results_sum.to_csv(MODEL_STATS_LOC + '_summary.csv')
        all_results_ens.to_csv(MODEL_STATS_LOC + '_ensResults.csv')
        sim_weights_all_df.to_csv(MODEL_STATS_LOC + '_weights_EachSim.csv')
        #     return(perm_import)
        mg = mygene.MyGeneInfo()
        symbol_to_entrez = pd.read_csv(
            '../data/gene_lists/gene_symbol_to_entrez.csv', index_col=0)
        symbol_to_entrez_dict = dict(
            zip(symbol_to_entrez['SYMBOL'], symbol_to_entrez['ENTREZID']))
        feature_importances_df = all_weights_ens.copy()
        feature_importances_annotation_df = feature_importances_df.copy()
        for gene in feature_importances_df.index:
            try:
                entrez_ID_temp = symbol_to_entrez_dict[gene]
                argument_temp = 'entrezgene: ' + str(entrez_ID_temp)
                name_temp = mg.query(argument_temp)['hits'][0]['name']
                feature_importances_annotation_df.loc[gene,
                                                      'annotation'] = name_temp
            except IndexError:
                feature_importances_annotation_df.loc[gene,
                                                      'annotation'] = 'None'
        feature_importances_annotation_df.to_csv(MODEL_STATS_LOC +
                                                 '_all_weights_annotation.csv')
Esempio n. 27
0
    idx_pick = calc_energy.idx_pick
    print(F'Number of selected configurations in this iteration: {len(idx_pick)}')
    if (idx_now is None) or (idx_now.shape[0] == 0):
        idx_now = idx_pick
    else:
        idx_now = np.hstack((idx_now, idx_pick))
    Y_train[idx_pick] = calc_energy.energy

    new_train = os.path.join(settings.calculations, 'it_' + str(settings.t),
                             'tr_set.xyz')
    with open(new_train, 'r+') as labeled_file:
        newxyz = labeled_file.read()
    with open(settings.train_out, 'a+') as oldxyz:
        oldxyz.write(newxyz)

    train_weights = utils.get_weights(Y_train[idx_now],
                                         settings.delta_e, settings.e_min)

    print("Fitting the model...")
    train_err = model.fit(ite=settings.t)
    err_train[idx_now] = np.abs(train_err) * np.sqrt(train_weights)

    print("Creating restart file...")
    train_set_idx = 'trainset_' + str(settings.t) + '.RESTART'
    restart_path = os.path.join(settings.output, train_set_idx)
    restart_file = pd.DataFrame()
    restart_file['idx'] = idx_now
    restart_file['energy'] = Y_train[idx_now]
    restart_file['error'] = err_train[idx_now]
    restart_file.to_csv(restart_path, sep='\t', index=False)

    # section: evaluate current trained model
Esempio n. 28
0
def main(argv):
    # set constant
    cube_size = [10, 10, 3]

    #
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--dataset', help='dataset', default='')
    parser.add_argument('-g', '--height', help='frame height', default=120)
    parser.add_argument('-w', '--width', help='frame width', default=160)
    parser.add_argument('-t', '--task', help='task to perform', default=-1)
    parser.add_argument('-c',
                        '--clip',
                        help='clip index (zero-based)',
                        default=-1)
    parser.add_argument('-s', '--set', help='test set', default=1)
    parser.add_argument('-e', '--epoch', help='epoch destination', default=0)
    parser.add_argument('-m', '--model', help='start model idx', default=0)
    args = vars(parser.parse_args())
    #
    dataset = dataset_dict[args['dataset']]
    dataset['path_train'] = '%s/Train' % dataset['path']
    dataset['path_test'] = '%s/Test' % dataset['path']
    #
    task = int(args['task'])
    h = int(args['height'])
    w = int(args['width'])
    clip_idx = int(args['clip'])
    test_set = bool(int(args['set']))
    n_epoch_destination = int(args['epoch'])
    model_idx_to_start = int(args['model'])
    model_test = model_idx_to_start
    n_row, n_col = np.array([h, w]) // cube_size[:2]
    print('Selected task = %d' % task)
    print('started time: %s' % datetime.datetime.now())
    #
    dataset['cube_dir'] = './training_saver/%s/cube_%d_%d_%d_%d_%d' % (
        dataset['name'], h, w, cube_size[0], cube_size[1], cube_size[2])
    if not os.path.exists(dataset['cube_dir']):
        pathlib.Path(dataset['cube_dir']).mkdir(parents=True, exist_ok=True)
    '''========================================='''
    ''' Task 1: Resize frame resolution dataset '''
    '''========================================='''
    if task == 1:
        load_images_and_resize(dataset,
                               new_size=[h, w],
                               train=True,
                               force_recalc=False,
                               return_entire_data=False)
        load_images_and_resize(dataset,
                               new_size=[h, w],
                               train=False,
                               force_recalc=False,
                               return_entire_data=False)
    '''========================================='''
    ''' Task 2: Split cubes in dataset and save '''
    '''========================================='''
    if task == 2:
        split_cubes(dataset,
                    clip_idx,
                    cube_size,
                    training_set=not test_set,
                    force_recalc=False,
                    dist_thresh=None)
    '''=========================================='''
    ''' Task 3: Train model and check validation '''
    '''=========================================='''
    if task == 3:
        training_cubes, training_mapping = load_all_cubes_in_set(
            dataset, h, w, cube_size, training_set=True)
        train_model_naive_with_batch_norm(dataset,
                                          training_cubes,
                                          training_mapping[:, 2],
                                          training_mapping[:, 3],
                                          n_row,
                                          n_col,
                                          n_epoch_destination,
                                          start_model_idx=model_idx_to_start,
                                          batch_size=256 * 12)
    '''====================================='''
    ''' Task 4: Test model and save outputs '''
    '''====================================='''
    if task == 4:
        sequence_n_frame = count_sequence_n_frame(dataset, test=test_set)
        test_cubes, test_mapping = split_cubes(dataset,
                                               clip_idx,
                                               cube_size,
                                               training_set=not test_set)
        test_model_naive_with_batch_norm(dataset,
                                         test_cubes,
                                         test_mapping[:, 2],
                                         test_mapping[:, 3],
                                         n_row,
                                         n_col,
                                         sequence_n_frame,
                                         clip_idx,
                                         model_idx=model_test,
                                         batch_size=256 * 12,
                                         using_test_data=test_set)
    '''====================================='''
    ''' Task 5: Calculate scores of dataset '''
    '''====================================='''
    if task == 5:
        calc_score_full_clips(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              train=False)
        calc_score_full_clips(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              train=True)
    '''========================='''
    ''' Task -5: Plot error map '''
    '''========================='''
    if task == -5:
        frame_idx = np.arange(16)
        print('selected set:', 'Test' if test_set else 'Train')
        print('selected frames:', frame_idx)
        plot_error_map(dataset,
                       np.array([h, w]),
                       cube_size,
                       clip_idx,
                       frame_idx,
                       model_test,
                       score_type_idx=3,
                       using_test_data=test_set)
    '''===================='''
    ''' Task 6: Evaluation '''
    '''===================='''
    if task == 6:
        if dataset in [Belleview, Train]:
            dataset['ground_truth'] = load_ground_truth_Boat(
                dataset, n_clip=dataset['n_clip_test'])
        elif dataset == Avenue:
            dataset['ground_truth'] = load_ground_truth_Avenue(
                dataset['test_mask_path'], dataset['n_clip_test'])
        sequence_n_frame = count_sequence_n_frame(dataset, test=True)
        labels_select_last, labels_select_first, labels_select_mid = get_test_frame_labels(
            dataset, sequence_n_frame, cube_size, is_subway=False)
        #
        for way in range(6):
            # sequence_n_frame = None
            if way != 1:
                continue
            op = np.std
            full_assess_AUC(dataset,
                            np.array([h, w]),
                            cube_size,
                            model_test,
                            labels_select_first,
                            sequence_n_frame=sequence_n_frame,
                            plot_pr_idx=None,
                            selected_score_estimation_way=way,
                            operation=op,
                            save_roc_pr=True)
    '''============================'''
    ''' Task -6: Manual evaluation '''
    '''============================'''
    if task == -6:
        if dataset in [Belleview, Train]:
            dataset['ground_truth'] = load_ground_truth_Boat(
                dataset, n_clip=dataset['n_clip_test'])
        elif dataset == Avenue:
            dataset['ground_truth'] = load_ground_truth_Avenue(
                dataset['test_mask_path'], dataset['n_clip_test'])
        sequence_n_frame = count_sequence_n_frame(dataset, test=True)
        labels_select_last, labels_select_first, labels_select_mid = get_test_frame_labels(
            dataset, sequence_n_frame, cube_size, is_subway=False)
        #
        for way in range(6):
            # sequence_n_frame = None
            if way != 1:
                continue
            op = np.std
            #
            manual_assess_AUC(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              labels_select_mid,
                              plot_pr_idx=None,
                              selected_score_estimation_way=way,
                              operation=op)
            #
            manual_assess_AUC(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              labels_select_first,
                              plot_pr_idx=None,
                              selected_score_estimation_way=way,
                              operation=op)
            #
            manual_assess_AUC(dataset,
                              np.array([h, w]),
                              cube_size,
                              model_test,
                              labels_select_last,
                              plot_pr_idx=None,
                              selected_score_estimation_way=way,
                              operation=op)
    '''==================================='''
    ''' Task 7: Multiple scale evaluation '''
    '''==================================='''
    if task == 7:
        if dataset in [Belleview, Train]:
            dataset['ground_truth'] = load_ground_truth_Boat(
                dataset, n_clip=dataset['n_clip_test'])
        elif dataset == Avenue:
            dataset['ground_truth'] = load_ground_truth_Avenue(
                dataset['test_mask_path'], dataset['n_clip_test'])
        sequence_n_frame = count_sequence_n_frame(dataset, test=True)
        labels_select_last, labels_select_first, labels_select_mid = get_test_frame_labels(
            dataset, sequence_n_frame, cube_size)
        #
        for way in range(6):
            # sequence_n_frame = None
            if way != 1:
                continue
            op = np.std
            #
            full_assess_AUC_multiple_scale(
                dataset,
                [np.array([120, 160]),
                 np.array([30, 40]),
                 np.array([20, 20])],
                cube_size,
                model_test,
                labels_select_mid,
                sequence_n_frame=sequence_n_frame,
                selected_score_estimation_way=way,
                operation=op)
            #
            full_assess_AUC_multiple_scale(
                dataset,
                [np.array([120, 160]),
                 np.array([30, 40]),
                 np.array([20, 20])],
                cube_size,
                model_test,
                labels_select_first,
                sequence_n_frame=sequence_n_frame,
                selected_score_estimation_way=way,
                operation=op)
            #
            full_assess_AUC_multiple_scale(
                dataset,
                [np.array([120, 160]),
                 np.array([30, 40]),
                 np.array([20, 20])],
                cube_size,
                model_test,
                labels_select_last,
                sequence_n_frame=sequence_n_frame,
                selected_score_estimation_way=way,
                operation=op)
    '''========================='''
    ''' Task 08: Write video    '''
    ''' Task 11: Save score plot'''
    '''========================='''
    if task == 8 or task == 11:
        frame_ranges = {'Belleview': (50, 443 + 157), 'Train': (2100, 3200)}
        if dataset in [Belleview, Train]:
            dataset['ground_truth'] = load_ground_truth_Boat(
                dataset, n_clip=dataset['n_clip_test'])
        elif dataset == Avenue:
            dataset['ground_truth'] = load_ground_truth_Avenue(
                dataset['test_mask_path'], dataset['n_clip_test'])
        write_video_result(dataset,
                           np.array([h, w]),
                           cube_size,
                           clip_idx,
                           model_test,
                           train=not test_set,
                           operation=np.std,
                           frame_gt=dataset['ground_truth'][clip_idx],
                           show_all_score=False,
                           frame_range=frame_ranges[dataset['name']]
                           if dataset in [Belleview, Train] else None,
                           show_clf=dataset in [Belleview, Train],
                           save_plot_exam_only=(task == 11))
    '''======================='''
    ''' Task -8: Write images '''
    '''======================='''
    if task == -8:
        write_example(dataset,
                      np.array([h, w]),
                      cube_size,
                      clip_idx,
                      model_test,
                      operation=np.std,
                      scale_video=not True,
                      wrapall=True)
    '''============================='''
    ''' Task 9: Visualize G filters '''
    '''============================='''
    if task == 9:
        visualize_filters(dataset,
                          cube_size,
                          n_row,
                          n_col,
                          model_idx=model_test)
    '''============================'''
    ''' Task -9: Visualize weights '''
    '''============================'''
    if task == -9:
        get_weights(dataset,
                    np.array([h, w]),
                    cube_size,
                    model_test,
                    np.std,
                    save_as_image=True)
    '''====================================='''
    ''' Task 10: Convert model to visualize '''
    '''====================================='''
    if task == 10:
        convert_model(dataset, cube_size, n_row, n_col, model_idx=model_test)

    print('finished time: %s' % datetime.datetime.now())
Esempio n. 29
0
def main(settingsfname, verbose=False):

    settings = utils.get_settings(settingsfname)

    subjects = settings['SUBJECTS']

    data = utils.get_data(settings, verbose=verbose)

    metadata = utils.get_metadata()

    features_that_parsed = [
        feature for feature in settings['FEATURES']
        if feature in list(data.keys())
    ]

    settings['FEATURES'] = features_that_parsed

    utils.print_verbose("=====Feature HDF5s parsed=====", flag=verbose)

    # get model
    model_pipe = utils.build_model_pipe(settings)

    utils.print_verbose("=== Model Used ===\n"
                        "{0}\n==================".format(model_pipe),
                        flag=verbose)

    # dictionary to store results
    subject_predictions = {}

    accuracy_scores = {}

    for subject in subjects:
        utils.print_verbose("=====Training {0} Model=====".format(
            str(subject)),
                            flag=verbose)

        # initialise the data assembler
        assembler = utils.DataAssembler(settings, data, metadata)
        X, y = assembler.test_train_discrimination(subject)

        # get the CV iterator
        cv = utils.sklearn.cross_validation.StratifiedShuffleSplit(
            y, random_state=settings['R_SEED'], n_iter=settings['CVITERCOUNT'])

        # initialise lists for cross-val results
        predictions = []
        labels = []
        allweights = []

        # run cross validation and report results
        for train, test in cv:

            # calculate the weights
            weights = utils.get_weights(y[train])
            # fit the model to the training data
            model_pipe.fit(X[train], y[train], clf__sample_weight=weights)
            # append new predictions
            predictions.append(model_pipe.predict(X[test]))
            # append test weights to store (why?) (used to calculate auc below)
            weights = utils.get_weights(y[test])
            allweights.append(weights)
            # store true labels
            labels.append(y[test])

        # stack up the results
        predictions = utils.np.hstack(predictions)
        labels = utils.np.hstack(labels)
        weights = utils.np.hstack(allweights)

        # calculate the total accuracy
        accuracy = utils.sklearn.metrics.accuracy_score(labels,
                                                        predictions,
                                                        sample_weight=weights)

        print("Accuracy score for {1}: {0:.3f}".format(accuracy, subject))

        # add AUC scores to a subj dict
        accuracy_scores.update({subject: accuracy})

        # store results from each subject
        subject_predictions[subject] = (predictions, labels, weights)

    # stack subject results (don't worrry about this line)
    predictions, labels, weights = map(
        utils.np.hstack, zip(*list(subject_predictions.values())))

    # calculate global accuracy
    accuracy = utils.sklearn.metrics.accuracy_score(labels,
                                                    predictions,
                                                    sample_weight=weights)

    print(
        "predicted accuracy score over all subjects: {0:.2f}".format(accuracy))

    # output AUC scores to file
    accuracy_scores.update({'all': accuracy})

    settings['DISCRIMINATE'] = 'accuracy_scores.csv'
    # settings['AUC_SCORE_PATH'] = 'discriminate_scores'
    utils.output_auc_scores(accuracy_scores, settings)

    return accuracy_scores
Esempio n. 30
0
import theano.tensor as T
from utils import get_xor_blobs, get_weights, draw_decision_boundary
import matplotlib.pyplot as plt
# from backprop import Backpropagation


EPSILON = 1e-4

X, Y = get_xor_blobs()

# bp = Backpropagation(layers=[2, 3, 2])
# bp.fit(X, y, n_iter=10000)
# draw_decision_boundary(bp, X, y)


w1 = shared(get_weights((3, 2)), name="w1")
w2 = shared(get_weights((2, 3)), name="w2")
b1 = shared(get_weights((3, 1)), name="b1")
b2 = shared(get_weights((2, 1)), name="b2")

x = T.dmatrix('x')
y = T.dmatrix('y')

a1 = x.T
z2 = T.dot(w1, a1) + b1.repeat(a1.shape[1], axis=1)
a2 = 1.0 / (1 + T.exp(-z2))
z3 = T.dot(w2, a2) + b2.repeat(a2.shape[1], axis=1)
a3 = 1.0 / (1 + T.exp(-z3))

predict = function([x], [a3.T])
Esempio n. 31
0
    gen = dfs_gen(train, val_dates)

#   --------------- Model -----------------

drop_cols = ['scope', 'Date', 'real_target', 'pack', 'size (GM)']
categorical_f = [x for x in categorical_f if x not in drop_cols]

prediction_df = pd.DataFrame()
pred_cluster = pd.DataFrame()

sample_weights = None

for df_train, df_test in tqdm(gen):

    if useSampleWeights:
        sample_weights = get_weights(df_train, type=weights_type)

    model = CatBoost(df_train,
                     df_test,
                     categorical_features=categorical_f,
                     drop_columns=drop_cols,
                     isScope=useScope,
                     sample_weights=sample_weights,
                     evaluation=isEvaluation)
    model_preds = model.run()

    prediction_df = pd.concat([prediction_df, model_preds])

    # ---- Predict by cluster  -----

    #  -----------   Cluster 1
Esempio n. 32
0
def downscale(method: str, old_model_name: str, avg_pool_unaffected=False):

    old_model = utils.load_model('Models/{}.yaml'.format(old_model_name),
                                 'Models/{}.h5'.format(old_model_name))

    new_model = models.Sequential()

    first_layer = True
    for layer in old_model.layers:

        if type(layer) is keras.layers.convolutional.Conv1D:

            biases = layer.get_weights()[1]
            old_kernels = utils.get_kernels(layer.get_weights()[0])
            nodes = layer.kernel.shape[2].value

            if method == 'nearest_neighbor':

                new_kernels = nearest_neighbor(old_kernels)
                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(48000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'linear':

                new_kernels = linear(old_kernels)

                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(48000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'distance_weighting':

                new_kernels = distance_weighting(old_kernels)
                new_weights = [utils.get_weights(new_kernels), biases]

                if first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              input_shape=(48000, 1),
                                              padding='same',
                                              weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(nodes,
                                              kernel_size=new_kernels.shape[2],
                                              activation=layer.activation,
                                              padding='same',
                                              weights=new_weights)

                new_model.add(new_layer)

            elif method == 'same':

                new_weights = layer.get_weights()

                if first_layer:
                    new_layer = layers.Conv1D(
                        nodes,
                        kernel_size=layer.kernel.shape[0].value,
                        activation=layer.activation,
                        input_shape=(48000, 1),
                        padding='same',
                        weights=new_weights)
                    first_layer = False

                elif not first_layer:
                    new_layer = layers.Conv1D(
                        nodes,
                        kernel_size=layer.kernel.shape[0].value,
                        activation=layer.activation,
                        padding='same',
                        weights=new_weights)

                new_model.add(new_layer)

        elif type(layer) is keras.layers.pooling.MaxPooling1D:

            pool_size = layer.pool_size[0]

            new_model.add(layers.MaxPooling1D(pool_size=pool_size))

        elif type(layer) is keras.layers.pooling.AveragePooling1D:

            nodes = layer.get_output_at(0).shape[-1].value
            pool_size = layer.pool_size[0]

            if method == 'nearest_neighbor':

                new_model.add(layers.AveragePooling1D(pool_size=pool_size))

            elif method == 'linear':

                if avg_pool_unaffected is True:
                    new_model.add(layers.AveragePooling1D(pool_size=pool_size))

                else:
                    new_kernels = down_scale_avg_pooling(
                        nodes, [3 / 2, -1 / 4, -1 / 4])
                    dummy_bias = np.zeros(nodes)
                    new_weights = [utils.get_weights(new_kernels), dummy_bias]

                    new_layer = layers.Conv1D(
                        nodes,
                        kernel_size=new_kernels.shape[-1],
                        activation='linear',
                        padding='same',
                        strides=2,
                        weights=new_weights)

                    new_model.add(new_layer)

            elif method == 'distance_weighting':

                if avg_pool_unaffected is True:
                    new_model.add(layers.AveragePooling1D(pool_size=pool_size))

                else:
                    new_kernels = down_scale_avg_pooling(
                        nodes, [-1 / 4, 1 / 2, 3 / 4])
                    dummy_bias = np.zeros(nodes)
                    new_weights = [utils.get_weights(new_kernels), dummy_bias]

                    new_layer = layers.Conv1D(
                        nodes,
                        kernel_size=new_kernels.shape[-1],
                        activation='linear',
                        padding='same',
                        strides=2,
                        weights=new_weights)
                    new_model.add(new_layer)

            elif method == 'same':
                new_model.add(layers.AveragePooling1D(pool_size=pool_size))

        elif type(layer) is keras.layers.Flatten:

            new_model.add(layers.Flatten())
            f_dim = layer.input_shape

        elif type(layer) is keras.layers.Dense:

            original_shape = layer.get_weights()[0].shape
            output_dim = layer.get_weights()[1].shape[0]
            shape = (f_dim[1], f_dim[2], output_dim)
            weights, biases = layer.get_weights()

            old_conv_weights = weights.reshape(shape)

            old_kernels = utils.get_kernels(old_conv_weights)

            if method == 'nearest_neighbor':

                new_kernels = nearest_neighbor(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] // 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'linear':
                new_kernels = linear(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(original_shape[0] // 2,
                                             output_dim), biases
                ]
                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'distance_weighting':

                new_kernels = distance_weighting(old_kernels)
                new_kernels = pad_zeros(new_kernels, old_kernels.shape[-1])
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] // 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

            elif method == 'same':

                new_kernels = np.split(old_kernels, 2, axis=2)[0]
                new_conv_weights = utils.get_weights(new_kernels)
                new_dense_weights = [
                    new_conv_weights.reshape(
                        (original_shape[0] // 2, output_dim)), biases
                ]

                new_model.add(
                    layers.Dense(output_dim,
                                 activation=layer.activation,
                                 weights=new_dense_weights))

    return new_model
Esempio n. 33
0
                    K_inh = torch.from_numpy(K_inh)
                    K_inh = K_inh.float()
                    layers[l]['S'][t, :, :, :] = S
                    layers[l]['K_inh'] = K_inh
                    K_STDP = layers[l]['K_STDP']
                    valid = S * V[0] * K_STDP
                    valid = valid.cpu().data.numpy()
                    maxval, maxind1, maxind2 = get_STDP_idxs(
                        valid,
                        H=H,
                        W=W,
                        D=C,
                        layer_idx=l,
                        offset=stdp_params['offset_STDP'][0],
                        stdp_per_layer=stdp_params['stdp_per_layer'][0])
                    w = get_weights(network[l])
                    Weight, K_STDP = STDP_learning(
                        S_sz=S.shape,
                        s=S,
                        w=w,
                        K_STDP=K_STDP,
                        maxval=maxval,
                        maxind1=maxind1,
                        maxind2=maxind2,
                        stride=1,
                        offset=stdp_params['offset_STDP'][0],
                        a_minus=stdp_params['a_minus'][0],
                        a_plus=stdp_params['a_plus'][0])
                    save_weights(network[l], Weight)
                    layers[l]['K_STDP'] = K_STDP
Esempio n. 34
0
def train():
    cfg = neural_train
    dataset =  NeuralData(list_file=cfg['list_file'], data_root=cfg['data_root'])

    fw, fh, fd, fc = cfg['fov_shape']

    # model = ffn(in_planes=2, module_nums=8)
    model = ffn()

    print("Initializing weights...")
    model.init_weights()
    
    if args.cuda:
        model = torch.nn.DataParallel(model)  # 多卡存在问题, priors加倍了
        cudnn.benchmark = True

    if args.cuda:
        model = model.cuda()
    model.train()

    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
                          weight_decay=args.weight_decay)

    criterion = BinaryFocalLoss(gamma=2)

    print('Using the specified args:')
    print(args)

    data_loader = data.DataLoader(dataset, args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True, collate_fn=detection_collate,
                                  pin_memory=True)

    writer = SummaryWriter('./logs')

    epoch = 0
    step_index = 0
    batch_iterator = None
    epoch_size = len(dataset) // args.batch_size

    # Loss counters
    vis_loss = []

    i = 0
    for iteration in range(cfg['max_iter']):
        # load train data .tif voxel
        if (not batch_iterator) or (iteration % epoch_size == 0):
            batch_iterator = iter(data_loader)
            epoch += 1
        if iteration in cfg['lr_steps']:
            step_index += 1
            adjust_learning_rate(optimizer, args.gamma, step_index)
        
        images, targets = next(batch_iterator)
        vis_loss = []

        # if args.cuda:
        #     images = images.to('cuda')
        #     targets = targets.to('cuda')
        # else:
        #     images = torch.Tensor(images)
        #     targets = torch.Tensor(targets)
        locations = prepare_data(labels=targets, patch_shape=cfg['subvol_shape'])

        # 对patch的训练
        indices = np.random.permutation(len(locations))
        for location in locations[indices]:
            # 抽取patch, 同时生成与中心点相对应的监督标签
            subvol_data, subvol_labels, relative_loc = patch_subvol(data=images, labels=targets, subvol_shape=cfg['subvol_shape'],
                                                                    deltas=np.array(cfg['fov_shape'][:3])//2, location=location)
            # 与patch相对应的soft二值mask
            subvol_mask = mask_subvol(subvol_data.shape, relative_loc)
            n, c, w, h, d = subvol_data.shape

            # Create FOV dicts, and center locations

            V = {(relative_loc[0], relative_loc[1], relative_loc[2])}  # set()
            queue = Queue()
            queue.put([relative_loc[0], relative_loc[1], relative_loc[2]])
            # Compute upper and lower bounds
            upper = [w - fw // 2, h - fh // 2, d - fd // 2]
            lower = [fw // 2, fh // 2, fd // 2]

            p_weights = []
            optimizer.zero_grad()
            cnt = 0
            while not queue.empty():
                if cnt > 10:
                    break
                cnt += 1
                # Get new list of FOV locations
                current_loc = np.array(queue.get(), np.int32)
                # Center around FOV
                fov_data = get_data(subvol_data, current_loc, cfg['fov_shape'])
                fov_labels = get_data(subvol_labels, current_loc, cfg['fov_shape'])
                # fov_labels = np.squeeze(fov_labels, axis=1)
                fov_mask = get_data(subvol_mask, current_loc, cfg['fov_shape'])
                
                # Loss-weighted
                weights = get_weights(fov_labels)
                p_weights.append(weights)
                # print("weights:", weights)
                # criterion = nn.BCEWithLogitsLoss(pos_weight=0.005*weights)

                # Add merging of old and new mask
                d_m = np.concatenate([fov_data, fov_mask], axis=1)
                if args.cuda:
                    d_m = torch.Tensor(d_m).to('cuda')
                    fov_labels = torch.Tensor(fov_labels).to('cuda')
                else:
                    d_m = torch.Tensor(d_m)
                    fov_labels = torch.Tensor(fov_labels)
               
                pred = model(d_m)
                # print(type(pred), pred.type())
                # print(torch.from_numpy(fov_mask).type())
                logit_seed = torch.add(torch.from_numpy(fov_mask).to('cuda'), other=pred)
                # logit_seed = pred
                prob_seed = expit(logit_seed.detach().cpu().numpy())
                if len(vis_loss) % 10 == 0:
                    # print(np.max(prob_seed), np.min(prob_seed), np.sum(prob_seed>0.95)/(17*17*17))
                    writer.add_scalars("prob_map", {"max": np.max(prob_seed),
                                                   "min": np.min(prob_seed),
                                                   "pos_ratio": np.sum(prob_seed>0.95)/(33*33*33),
                                                   "1/weights": 1/weights}, i)

                # Loss, Backprop
                # optimizer.zero_grad()
                # print(torch.max(pred), torch.min(pred))
                # print(torch.max(torch.sigmoid(pred)), torch.min(torch.sigmoid(pred)))
                loss0 = criterion(logit_seed, fov_labels, weights)
                loss0.backward(retain_graph=True)
                # gradClamp(model.parameters())

                # log
                if i % 10 == 0:
                    writer.add_scalars("Train/Loss", {"loss": loss0.data}, i)
                    for name, layer in model.named_parameters():
                        writer.add_histogram(name+'_grad', layer.grad.cpu().data.numpy(), i)
                    writer.add_image("Target", trans3Dto2D(fov_labels.cpu()), i)
                    writer.add_image("ProbMap", trans3Dto2D(prob_seed), i)
                i += 1
                vis_loss.append(loss0.detach().item())
                if len(vis_loss) % 10 == 0:
                    print("%d of a tif, FOV Loss: %.6f" % (len(vis_loss), loss0.data.item()))

                # 更新patch对应的soft二值mask
                set_data(subvol_mask, current_loc, logit_seed.detach().cpu().numpy())

                # Compute new locations
                new_locations = get_new_locs(logit_seed.detach().cpu().numpy(), cfg['delta'], cfg['tmove'])
                for new in new_locations:
                    new = np.array(new, np.int32) + current_loc
                    bounds = [lower[j] <= new[j] < upper[j] for j in range(3)]
                    stored_loc = tuple([new[i] for i in range(3)])
                    if all(bounds) and stored_loc not in V:
                        V.add(stored_loc)
                        queue.put(new)
            # mask = subvol_mask >= logit(0.6)
            loss1 = len(p_weights) * criterion(torch.Tensor(subvol_mask), torch.Tensor(subvol_labels), np.mean(p_weights))
            loss0.data.zero_()
            loss0.data = loss1.data
            loss0.backward()
            optimizer.step()
            print("One patch ends of Iteration(%d)/Epoch(%d)" % (iteration, epoch))
        print("One tif ends of Iteration(%d)/Epoch(%d)" % (iteration, epoch))

        if iteration % 10 == 0:
            print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss0.data.item()), end='\n')
            # print('timer: %.4f sec.    %.4f sec.' % (t2 - t1, t1 - t0))
        # if args.visdom:
        #     update_vis_plot(iteration, min(500, np.mean(vis_loss)), iter_plot, epoch_plot, 'append')
        if iteration != 0 and iteration % 20 == 0:
            print('Saving state, iter:', iteration)
            torch.save(model.state_dict(), args.save_folder +'/FFN_' + dataset.name + "_" +
                       repr(iteration) + '.pth')
    torch.save(model.state_dict(),
               args.save_folder + '/FFN_' + dataset.name + '.pth')
Esempio n. 35
0
    ax.set_title("time step {}".format(t), fontsize=10)

def weights(ddir, ts, quiet, show_title, cmap):
    params = import_params(ddir)

    try:
        nRows = int(params['nRowsIn'])
        nCols = int(params['nColsIn'])
        nOutputs = int(params['nOutputs'])
    except KeyError, err:
        print('necessary parameter not found: ' + str(err))
        sys.exit(-1)

    nInputs = nRows * nCols

    Wx, Wy = get_weights(ddir, nInputs, nOutputs)
    if Wx is None or Wy is None:
        print("failed to read weights")
        return

    time = Wx[:,0,0]
    T = len(time)
    # omit time
    Wx = Wx[:,1:,:]
    Wy = Wy[:,1:,:]

    [x, y] = np.meshgrid(np.arange(0, nInputs + 1), np.arange(0, nOutputs + 1))

    N = len(ts)

    figx, axesx = plt.subplots(N, sharex=True)