Ejemplo n.º 1
0
    def train_network(self):
        
        self.X_train, self.y_train, self.X_test, self.y_test =  prepare_dataset(self.X, self.y, self.args)
        self.logger.info('Dataset prepared, Ttrain, Test splits ready to use')
        accuracies = []
        heatmaps = []
        hist, W = self.run_network(self.X_train, self.y_train, self.args, phase='TRAIN', 
                accuracies=accuracies, heatmaps=heatmaps)
        #import pickle
        #pickle.dump( accuracies, open( "accs.p", "wb" ) )
        last_heatmap = heatmaps[-1]

        ######################################### 20/06/2017 ###########################################################################
        #################### Creating a data frame for final heatmap (confusion matrix) and saving it as a csv file ####################

        columns = range(self.args.num_classes)

        conf_csv = pd.DataFrame(columns=columns)
        for index in range(self.args.num_classes):
            conf_csv.loc[index] = last_heatmap[index]

        conf_csv.to_csv("statistical_data/confusion_matrix.csv")
        
        #################################################################################################################################

        if self.args.plot_curve:
            fig = plt.figure()
            #fig.suptitle(3*'\n'.join(str(self.args).split(',')), fontsize=8)
            ax1 = fig.add_subplot(221)
            ax1.set_xlabel('Samples seen')
            ax1.set_ylabel('Accuracies')
            examples_seen, accs = zip(*accuracies)
            ax1.plot(examples_seen, accs)
            
            ax2 = fig.add_subplot(222)
            sns.heatmap(last_heatmap, annot=True, fmt='d', ax=ax2)
            ax2.set_xlabel('True values')
            ax2.set_ylabel('Predicted values')

            ax3 = fig.add_subplot(223)
            #ax2.text(0, 0, 'aaa')
            ax3.set_xlabel('Parameters')
            an = ax3.annotate(str(self.args), fontsize=10, xy=(0.1, 1), ha='left', va='top', xytext=(0, -6),
                            xycoords='axes fraction', textcoords='offset points')
            #wrapText(an)
            #ax2.text(0.5, 0.5,str(self.args), horizontalalignment='center',
            #        verticalalignment='center', transform=ax2.transAxes, fontsize=8)
            plt.show(block=True)
        return hist, W, accuracies
Ejemplo n.º 2
0
    def train_network(self):

        self.X_train, self.y_train, self.X_test, self.y_test = prepare_dataset(
            self.X, self.y, self.args)
        self.logger.info('Dataset prepared, Ttrain, Test splits ready to use')
        accuracies = []
        heatmaps = []
        hist, W = self.run_network(self.X_train,
                                   self.y_train,
                                   self.args,
                                   phase='TRAIN',
                                   accuracies=accuracies,
                                   heatmaps=heatmaps)
        #import pickle
        #pickle.dump( accuracies, open( "accs.p", "wb" ) )
        last_heatmap = heatmaps[-1]
        if self.args.plot_curve:
            fig = plt.figure()
            #fig.suptitle(3*'\n'.join(str(self.args).split(',')), fontsize=8)
            ax1 = fig.add_subplot(221)
            ax1.set_xlabel('Samples seen')
            ax1.set_ylabel('Accuracies')
            examples_seen, accs = zip(*accuracies)
            ax1.plot(examples_seen, accs)

            ax2 = fig.add_subplot(222)
            sns.heatmap(last_heatmap, annot=True, fmt='d', ax=ax2)
            ax2.set_xlabel('True values')
            ax2.set_ylabel('Predicted values')

            ax3 = fig.add_subplot(223)
            #ax2.text(0, 0, 'aaa')
            ax3.set_xlabel('Parameters')
            an = ax3.annotate(str(self.args),
                              fontsize=10,
                              xy=(0.1, 1),
                              ha='left',
                              va='top',
                              xytext=(0, -6),
                              xycoords='axes fraction',
                              textcoords='offset points')
            wrapText(an)
            #ax2.text(0.5, 0.5,str(self.args), horizontalalignment='center',
            #        verticalalignment='center', transform=ax2.transAxes, fontsize=8)
            plt.show(block=True)
        return hist, W, accuracies
Ejemplo n.º 3
0
import cv2
import numpy as np
import common
import config as cfg

list_all_labels, list_labels, list_labels, list_attributs = common.infos_xmls(
    cfg.dir_dataset, with_attribut=cfg.with_attribut, verbose=True)

if not cfg.with_attribut:
    list_attributs = None

images, labels, labels2, mask_attributs = common.prepare_dataset(
    cfg.dir_dataset,
    list_labels=list_labels,
    list_attributs=list_attributs,
    data_augmentation=False,
    verbose=True)

print("Nbr image:", len(images))
images = images / 255

for i in range(len(images)):
    image = common.prepare_image(images[i], labels[i], True)
    cv2.imshow("image",
               cv2.resize(image, (2 * cfg.image_size, 2 * cfg.image_size)))
    key = cv2.waitKey(3) & 0xFF
    if key == ord('q'):
        break
Ejemplo n.º 4
0
    def train_network(self, init_weights=None):
        pq = PriorityQueue()
        ''' keep_recon_distr : keeps a list of reconstructed distributions for
        each training sample '''

        self.X_train, self.y_train, self.X_test, self.y_test = prepare_dataset(
            self.X, self.y, self.args)
        cf = self.args  # configurations

        membranes = []
        last_spiked = []
        refrac_end = []
        last_update = []
        spike_count = [0, 0, 0, 0]
        calc_recons = [False, False, False, False]
        thr = []

        vis_size = cf.visible_size
        hid_size = cf.hidden_size

        # increase number of vis_size if training via supervized way
        # see page 29 in thesis, for the picture
        if cf.train_supervised:
            # last num_classes number of input vector will be preserved for
            # labels
            vis_size += cf.num_classes

        if init_weights is None:
            # check whether it should be loaded
            if cf.load_weights is not None:
                W = np.load(cf.load_weights)
            else:
                # fall back to random init
                W = np.random.rand(vis_size, hid_size)
        else:
            W = init_weights

        batch_W_delta = np.zeros(
            (vis_size, hid_size))  # used for batch updates

        # history keeps track of spikes/potential_change/ .. etc, it is dict
        # with key - timestep, (rounded to cf.prec digits), and the value is the
        # list of events which occured on this timestep (the list will
        # have size 1 unless several spikes occured simultiniously. Each item in
        # list is a tuple, first item in tuple is a string - event type, others
        # will be event description for example in case of 'spike' other items in
        # the tuple will be layer and address. Tuple descriptions are below.
        # -----------------------
        # ('SPIKE', layer, address)
        # ('THR', layer, address, newvalue)
        # ('MEMBRANE', layer, layer_values)
        history = defaultdict(list)

        self.log(history, 0, ('INIT_WEIGHTS', W))

        # Data and model layers
        for layer in range(4):
            layer_size = vis_size if layer % 2 == 0 else hid_size

            membranes.append(np.zeros(layer_size))
            last_spiked.append(np.zeros(layer_size))
            last_spiked[-1].fill(
                -100)  # not to cause first spikes accidentally
            last_update.append(np.zeros(layer_size))
            refrac_end.append(np.zeros(layer_size))
            th_single = np.zeros(layer_size)
            th_single.fill(cf.thr)
            thr.append(th_single)

        t_passed = 0
        self.errors = []  # errors on test set
        for (sample_num, x) in enumerate(self.X_train):

            if cf.batch_size is not None and sample_num != 0 and sample_num % cf.batch_size == 0:
                # perform batch update
                W += batch_W_delta / cf.batch_size
                batch_W_delta.fill(0)

            if cf.test_every is not None and (sample_num +
                                              1) % cf.test_every == 0:
                # run current network on test set for training curve
                #import ipdb; ipdb.set_trace()
                print 'Trained on %d, W - max: %.3f, min %.3f, avg %.3f' % (
                    sample_num + 1, np.max(W), np.min(W), np.average(W))
                self.errors.append(self.evaluate_accuracy(W))

            spike_train_sample = data_to_spike(x, cf.numspikes, cf.timespan,
                                               cf.noise_uniform)
            # spike train is a one digit encoded to pairs (spike_address, time)
            # example digit 8 can be represented ((2, 12), (2, 13), (4, 14) ...)

            spike_image = np.zeros(vis_size)
            for addr, time in spike_train_sample:
                # spike fired from '-1th' layer
                pq.put(Spike(time=time + t_passed, layer=0, address=addr))
                spike_image[addr] += 1

            if cf.train_supervised:
                # add label spikes
                step = cf.timespan / float(cf.numspikes_label)
                # spikes for the labels are in the end
                addr = vis_size - 10 + self.y_train[sample_num]

                for time in np.arange(0, cf.timespan, step):
                    pq.put(Spike(time=time + t_passed, layer=0, address=addr))

            # add spike image to history, but normalize first
            if cf.simulate or cf.log_reconstr:
                history[t_passed].append(
                    ('NEW_SPIKE_TRAIN',
                     spike_image / float(np.max(spike_image))))

            t_passed = t_passed + cf.timespan + cf.t_gap

            last_time = -1

            #reconstraction spike distribution for each sample,
            # needed for benchmarking
            recon_distr = np.zeros(vis_size)

            max_pq = 0
            while not pq.empty():
                if pq.qsize() > max_pq:
                    max_pq = pq.qsize()

                spike_triplet = pq.get()
                #import ipdb; ipdb.set_trace()
                self.process_spike(spike_triplet, cf, pq, thr, last_spiked,
                                   membranes, last_update, refrac_end,
                                   spike_count, calc_recons, W, history,
                                   recon_distr, batch_W_delta)

            if cf.log_reconstr:
                history[t_passed].append(
                    ('RECON_DISTR', recon_distr / float(np.max(recon_distr))))

            if cf.simulate:
                # log weight update
                for hidd in range(hid_size):
                    weights_to_log = np.copy(W[:, hidd])
                    self.log(history, t_passed,
                             ('UPDATE_WEIGHTS', hidd, weights_to_log, False))

        # plot errors
        print max_pq
        if cf.plot_curve:
            fig = plt.figure()
            fig.suptitle('\n'.join(str(self.args).split(',')), fontsize=8)
            ax2 = fig.add_subplot(111)
            plt.plot(self.errors)
            plt.show()

        return history, W
Ejemplo n.º 5
0
import score

list_labels = ['tete', 'humain']
list_attributs = [
    'tete:sexe:homme', 'humain:sexe:homme', 'tete:sexe:femme',
    'humain:sexe:femme', 'tete:emotion:heureux'
]
list_attributs = None

model = common.readmodel()
if model is None:
    quit()

images, labels, labels2, mask_attributs = common.prepare_dataset(
    cfg.dir_dataset,
    list_labels=list_labels,
    list_attributs=list_attributs,
    verbose=True)
nbr_attributs = 0 if cfg.with_attribut is False else len(list_attributs)

images = images / 255

dataset = tf.data.Dataset.from_tensor_slices(
    (images, labels, labels2, mask_attributs)).batch(cfg.batch_size)

del images, labels

images_test, labels_test, labels2_test, mask_attributs_test = common.prepare_dataset(
    cfg.dir_test,
    list_labels=list_labels,
    list_attributs=list_attributs,