Ejemplo n.º 1
0
    def __init__(self, rand_int=0, num_of_samples=None, args=None):
        if args == None:
            args = netp.get_default_parser(num_of_samples)
        self.cov_net = args.cov_net
        self.calc_information = args.calc_information
        self.run_in_parallel = args.run_in_parallel
        self.num_ephocs = args.num_ephocs
        self.learning_rate = args.learning_rate
        self.batch_size = args.batch_size
        self.activation_function = args.activation_function
        self.interval_accuracy_display = args.interval_accuracy_display
        self.save_grads = args.save_grads
        self.num_of_repeats = args.num_of_repeats
        self.calc_information_last = args.calc_information_last
        self.num_of_bins = args.num_of_bins
        self.interval_information_display = args.interval_information_display
        self.save_ws = args.save_ws
        self.name = args.data_dir + args.data_name
        # The arch of the networks
        self.layers_sizes = netp.select_network_arch(args.net_type)
        # The percents of the train data samples
        self.train_samples = np.linspace(1, 100, 199)[[[x * 2 - 2 for x in index] for index in args.inds]]
        # The indexs that we want to calculate the information for them in logspace interval
        self.epochs_indexes = np.unique(
            np.logspace(np.log2(args.start_samples), np.log2(args.num_ephocs), args.num_of_samples, dtype=int,
                        base=2)) - 1
        max_size = np.max([len(layers_size) for layers_size in self.layers_sizes])
        # load data
        self.data_sets = load_data(self.name, args.random_labels)
        # create arrays for saving the data
        self.ws, self.grads, self.information, self.models, self.names, self.networks, self.weights = [
            [[[[None] for k in range(len(self.train_samples))] for j in range(len(self.layers_sizes))]
             for i in range(self.num_of_repeats)] for _ in range(7)]

        self.loss_train, self.loss_test, self.test_error, self.train_error, self.l1_norms, self.l2_norms = \
            [np.zeros((self.num_of_repeats, len(self.layers_sizes), len(self.train_samples), len(self.epochs_indexes)))
             for _ in range(6)]

        params = {'sampleLen': len(self.train_samples),
                  'nDistSmpls': args.nDistSmpls,
                  'layerSizes': ",".join(str(i) for i in self.layers_sizes[0]), 'nEpoch': args.num_ephocs,
                  'batch': args.batch_size,
                  'nRepeats': args.num_of_repeats, 'nEpochInds': len(self.epochs_indexes),
                  'LastEpochsInds': self.epochs_indexes[-1], 'DataName': args.data_name,
                  'lr': args.learning_rate}

        self.name_to_save = args.name + "_" + "_".join([str(i) + '=' + str(params[i]) for i in params])
        params['train_samples'], params['CPUs'], params[
            'directory'], params['epochsInds'] = self.train_samples, NUM_CORES, self.name_to_save, self.epochs_indexes
        self.params = params
        self.rand_int = rand_int
        # If we trained already the network
        self.traind_network = False
Ejemplo n.º 2
0
    def __init__(self, rand_int=0, num_of_samples=None, args=None):
        if args == None:
            args = netp.get_default_parser(num_of_samples)
        self.conv_net = args.conv_net # boolian indicating if this should be a convolutional architecture. defunct
        self.calc_information = args.calc_information # boolean indicating whether to find the info or not
        self.calcMethod = args.calcMethod # name for the method of calculating the information
        self.run_in_parallel = args.run_in_parallel # boolean for its name
        self.num_epochs = args.num_epochs # how many epochs to run the model
        self.learning_rate = args.learning_rate
        self.batch_size = args.batch_size
        self.activation_function = args.activation_function # 0 is tanh, 1 is ReLU
        self.interval_accuracy_display = args.interval_accuracy_display
        self.save_grads = args.save_grads # bool, used for plotting the gradients
        self.num_of_repeats = args.num_of_repeats
        self.calc_information_last = args.calc_information_last # whether to calculate the information after running the network
        self.num_of_bins = args.num_of_bins
        self.interval_information_display = args.interval_information_display
        self.save_ws = args.save_ws
        self.name = args.data_dir + args.data_name
        # The arch of the networks
        self.layers_sizes = netp.select_network_arch(args.net_type)
        # The percents of the train data samples
        self.train_samples = np.linspace(1, 100, 199)[[[x * 2 - 2 for x in index] for index in args.inds]]
        # The indexs that we want to calculate the information for them in logspace interval
        self.epochs_indexes = np.unique(
                np.logspace(np.log2(args.start_samples), np.log2(args.num_epochs), args.num_of_samples, dtype=int,
                            base=2)) - 1
        max_size = np.max([len(layers_size) for layers_size in self.layers_sizes])
        # load data
        self.data_sets = load_data(self.name, args.random_labels)
        # create arrays for saving the data
        # ws is the activation in the (repeat, layer, training example)
        self.ws, self.grads, self.information, self.models, self.names, self.networks, self.weights = [
                [[[[None] for k in range(len(self.train_samples))] for j in range(len(self.layers_sizes))]
                 for i in range(self.num_of_repeats)] for _ in range(7)]
        # ws has "shape" train_samples by layer_sizes by num_of_repeats
        self.loss_train, self.loss_test, self.test_error, self.train_error, self.l1_norms, self.l2_norms = [np.zeros((self.num_of_repeats, len(self.layers_sizes), len(self.train_samples), len(self.epochs_indexes))) for _ in range(6)]

        params = {'sampleLen': len(self.train_samples),
                  'nDistSmpls': args.nDistSmpls,
                  'layerSizes': ",".join(str(i) for i in self.layers_sizes[0]), 'nEpoch': args.num_epochs, 'batch': args.batch_size,
                  'nRepeats': args.num_of_repeats, 'nEpochInds': len(self.epochs_indexes),
                  'LastEpochsInds': self.epochs_indexes[-1], 'DataName': args.data_name,
                  'lr': args.learning_rate}

        self.name_to_save = args.name + "_" + "_".join([str(i) + '=' + str(params[i]) for i in params])
        params['train_samples'], params['CPUs'], params[
                'directory'], params['epochsInds'] = self.train_samples, NUM_CORES, self.name_to_save, self.epochs_indexes
        self.params = params
        self.rand_int = rand_int
        # If we trained already the network
        self.traind_network = False
Ejemplo n.º 3
0
    def __init__(self, snaps=None, args=None):
        if args == None:
            args = get_default_parser(snaps)
        self.cov = args.cov
        self.epochs = args.epochs
        self.lr = args.lr
        self.batchsize = args.batchsize
        self.act = args.act
        self.repeats = args.repeats
        self.nbins = args.nbins
        self.datafile = 'data/{}'.format(args.dataset)
        self.layers = args.layers
        self.percents = args.percents
        self.snap_epochs = np.unique(
            np.logspace(np.log2(1), np.log2(args.epochs), args.snaps,
                        dtype=int, base=2)) - 1

        self.data_sets = load_data(self.datafile)

        # create arrays for saving the data
        self.ws, self.grads, self.information, \
            self.models, self.names, self.networks, self.weights = [
                [[[[None] for k in range(len(self.percents))]
                  for j in range(len(self.layers))]
                 for i in range(self.repeats)] for _ in range(7)]

        self.loss_train, self.loss_test, self.test_error, \
                self.train_error, self.l1_norms, self.l2_norms = \
            [np.zeros((self.repeats, len(self.layers),
                       len(self.percents), len(self.snap_epochs)))
             for _ in range(6)]

        params = {
            'data': args.dataset,
            'layers': args.layers,
            'epochs': args.epochs,
            'batch': args.batchsize,
            'lr': args.lr,
            'repeats': args.repeats,
            'percents': self.percents }

        name_to_save = '|'.join([str(i) + '=' + str(params[i]) for i in params])
        self.name_to_save = name_to_save
        params['directory'] = self.name_to_save

        params['snapepochs'] = len(self.snap_epochs)
        params['CPUs'] = NUM_CORES
        params['snapepochs'] = self.snap_epochs

        self.params = params
        self.is_trained = False