예제 #1
0
파일: make_movie.py 프로젝트: keshava/mmvt
def create_movie(time_range, xticks, fol, dpi, fps, video_fname, cb_data_type,
                 data_to_show_in_graph, cb_title='', cb_min_max_eq=True, cb_norm_percs=None, color_map='jet',
                 bitrate=5000, fol2='', cb2_data_type='', cb2_title='', cb2_min_max_eq=True, color_map2='jet',
                 ylim=(), ylabels=(), xticklabels=(), xlabel='Time (ms)', pics_type='png', show_first_pic=False,
                 show_animation=False, overwrite=True, n_jobs=1):

    images1 = get_pics(fol, pics_type)[:len(time_range)]
    images1_chunks = utils.chunks(images1, len(images1) / n_jobs)
    if fol2 != '':
        images2 = get_pics(fol2, pics_type)
        if len(images2) != len(images1):
            raise Exception('fol and fol2 have different number of pictures!')
        images2_chunks = utils.chunks(images2, int(len(images2) / n_jobs))
    else:
        images2_chunks = [''] * int(len(images1) / n_jobs)
    params = [(images1_chunk, images2_chunk, time_range, xticks, dpi, fps,
               video_fname, cb_data_type, data_to_show_in_graph, cb_title, cb_min_max_eq, cb_norm_percs, color_map,
               bitrate, ylim, ylabels, xticklabels, xlabel, show_first_pic, fol, fol2,
               cb2_data_type, cb2_title, cb2_min_max_eq, color_map2, run, show_animation, overwrite) for \
              run, (images1_chunk, images2_chunk) in enumerate(zip(images1_chunks, images2_chunks))]
    n_jobs = utils.get_n_jobs(n_jobs)
    if n_jobs > 1:
        utils.run_parallel(_create_movie_parallel, params, n_jobs)
        video_name, video_type = op.splitext(video_fname)
        mu.combine_movies(fol, video_name, video_type[1:])
    else:
        for p in params:
            _create_movie_parallel(p)
예제 #2
0
def read_labels_parallel(subject,
                         subjects_dir,
                         atlas,
                         hemi='',
                         labels_fol='',
                         n_jobs=1):
    try:
        labels_fol = op.join(subjects_dir, subject, 'label',
                             atlas) if labels_fol == '' else labels_fol
        if hemi != '':
            labels_files = glob.glob(
                op.join(labels_fol, '*{}.label'.format(hemi)))
            labels_files.extend(
                glob.glob(op.join(labels_fol, '{}.*label'.format(hemi))))
        else:
            labels_files = glob.glob(op.join(labels_fol, '*.label'))
        files_chunks = utils.chunks(labels_files, len(labels_files) / n_jobs)
        results = utils.run_parallel(_read_labels_parallel,
                                     files_chunks,
                                     njobs=n_jobs)
        labels = []
        for labels_chunk in results:
            labels.extend(labels_chunk)
        return labels
    except:
        print(traceback.format_exc())
        return []
예제 #3
0
def solve_labels_collision(subject, subjects_dir, atlas, backup_atlas, n_jobs=1):
    now = time.time()
    print('Read labels')
    labels = utils.read_labels_parallel(subject, subjects_dir, atlas, n_jobs)
    backup_labels_fol = op.join(subjects_dir, subject, 'label', backup_atlas)
    labels_fol = op.join(subjects_dir, subject, 'label', atlas)
    if op.isdir(backup_labels_fol):
        shutil.rmtree(backup_labels_fol)
    os.rename(labels_fol, backup_labels_fol)
    utils.make_dir(labels_fol)
    hemis_verts, labels_hemi, pia_verts = {}, {}, {}
    print('Read surface ({:.2f}s)'.format(time.time() - now))
    for hemi in HEMIS:
        surf_fname = op.join(subjects_dir, subject, 'surf', '{}.pial'.format(hemi))
        hemis_verts[hemi], _ = mne.surface.read_surface(surf_fname)
        labels_hemi[hemi] = [l for l in labels if l.hemi == hemi]
    print('Calc centroids ({:.2f}s)'.format(time.time() - now))
    centroids = calc_labels_centroids(labels_hemi, hemis_verts)
    for hemi in HEMIS:
        print('Calc vertices labeling for {} ({:.2f}s)'.format(hemi, time.time() - now))
        hemi_centroids_dist = cdist(hemis_verts[hemi], centroids[hemi])
        vertices_labels_indices = np.argmin(hemi_centroids_dist, axis=1)
        labels_hemi_chunks = utils.chunks(list(enumerate(labels_hemi[hemi])), len(labels_hemi[hemi]) / n_jobs)
        params = [(labels_hemi_chunk, atlas, vertices_labels_indices, hemis_verts, labels_fol) for labels_hemi_chunk in labels_hemi_chunks]
        print('Save labels for {} ({:.2f}s)'.format(hemi, time.time() - now))
        utils.run_parallel(_save_new_labels_parallel, params, n_jobs)
예제 #4
0
def solve_labels_collision(subject, subjects_dir, atlas, backup_atlas, n_jobs=1):
    now = time.time()
    print('Read labels')
    # utils.read_labels_parallel(subject, subjects_dir, atlas, labels_fol='', n_jobs=n_jobs)
    labels = read_labels(subject, subjects_dir, atlas, n_jobs=n_jobs)
    backup_labels_fol = op.join(subjects_dir, subject, 'label', backup_atlas)
    labels_fol = op.join(subjects_dir, subject, 'label', atlas)
    if op.isdir(backup_labels_fol):
        shutil.rmtree(backup_labels_fol)
    os.rename(labels_fol, backup_labels_fol)
    utils.make_dir(labels_fol)
    hemis_verts, labels_hemi, pia_verts = {}, {}, {}
    print('Read surface ({:.2f}s)'.format(time.time() - now))
    for hemi in HEMIS:
        surf_fname = op.join(subjects_dir, subject, 'surf', '{}.pial'.format(hemi))
        hemis_verts[hemi], _ = mne.surface.read_surface(surf_fname)
        labels_hemi[hemi] = [l for l in labels if l.hemi == hemi]
    print('Calc centroids ({:.2f}s)'.format(time.time() - now))
    centroids = calc_labels_centroids(labels_hemi, hemis_verts)
    for hemi in HEMIS:
        print('Calc vertices labeling for {} ({:.2f}s)'.format(hemi, time.time() - now))
        hemi_centroids_dist = cdist(hemis_verts[hemi], centroids[hemi])
        vertices_labels_indices = np.argmin(hemi_centroids_dist, axis=1)
        labels_hemi_chunks = utils.chunks(list(enumerate(labels_hemi[hemi])), len(labels_hemi[hemi]) / n_jobs)
        params = [(labels_hemi_chunk, atlas, vertices_labels_indices, hemis_verts, labels_fol) for labels_hemi_chunk in labels_hemi_chunks]
        print('Save labels for {} ({:.2f}s)'.format(hemi, time.time() - now))
        utils.run_parallel(_save_new_labels_parallel, params, n_jobs)
예제 #5
0
def read_labels_parallel(subject, subjects_dir, atlas, n_jobs):
    labels_files = glob.glob(op.join(subjects_dir, subject, 'label', atlas, '*.label'))
    files_chunks = utils.chunks(labels_files, len(labels_files) / n_jobs)
    results = utils.run_parallel(_read_labels_parallel, files_chunks, n_jobs)
    labels = []
    for labels_chunk in results:
        labels.extend(labels_chunk)
    return labels
예제 #6
0
def read_labels_parallel(subject, subjects_dir, atlas, labels_fol='', n_jobs=1):
    labels_fol = op.join(subjects_dir, subject, 'label', atlas) if labels_fol == '' else labels_fol
    labels_files = glob.glob(op.join(labels_fol, '*.label'))
    files_chunks = utils.chunks(labels_files, len(labels_files) / n_jobs)
    results = utils.run_parallel(_read_labels_parallel, files_chunks, n_jobs)
    labels = []
    for labels_chunk in results:
        labels.extend(labels_chunk)
    return labels
예제 #7
0
def create_movie(time_range, xticks, fol, dpi, fps, video_fname, cb_data_type,
    data_to_show_in_graph, cb_title='', cb_min_max_eq=True, color_map='jet', bitrate=5000, fol2='', ylim=(),
    ylabels=(), xticklabels=(), xlabel='Time (ms)', pics_type='png', show_first_pic=False, n_jobs=1):

    images1 = get_pics(fol, pics_type)
    images1_chunks = utils.chunks(images1, len(images1) / n_jobs)
    if fol2 != '':
        images2 = get_pics(fol2, pics_type)
        if len(images2) != len(images1):
            raise Exception('fol and fol2 have different number of pictures!')
        images2_chunks = utils.chunks(images2, int(len(images2) / n_jobs))
    else:
        images2_chunks = [''] * int(len(images1) / n_jobs)
    params = [(images1_chunk, images2_chunk, time_range, xticks, dpi, fps,
               video_fname, cb_data_type, data_to_show_in_graph, cb_title, cb_min_max_eq, color_map, bitrate,
               ylim, ylabels, xticklabels, xlabel, show_first_pic, fol, fol2, run) for \
              run, (images1_chunk, images2_chunk) in enumerate(zip(images1_chunks, images2_chunks))]
    utils.run_parallel(_create_movie_parallel, params, n_jobs)
    video_name, video_type = op.splitext(video_fname)
    mu.combine_movies(fol, video_name, video_type[1:])
예제 #8
0
    def validate(self, model, plot_tsne=True, epoch=None):
        model.eval()
        full_loss = 0
        all_hidden = np.zeros((len(self.dev_strs), self.args.hidden_size))
        bs = self.args.batch_size

        for batch_idx, batch in enumerate(chunks(self.dev_arr, bs)):
            batch = torch.from_numpy(batch)
            if self.args.use_cuda:
                batch = batch.cuda(self.args.device)
            input, hidden, output = model(batch)
            all_hidden[batch_idx * bs:(batch_idx + 1) * bs] = hidden.detach()
            loss = mse(input, output)
            full_loss += loss.item()

        if plot_tsne: self.plot_tsne(model, epoch)

        _, ent_encoded, _ = model(self.ent_arr)
        ent_encoded = ent_encoded.detach().cpu().numpy()
        _, mentions_encoded, _ = model(self.mention_arr)
        mentions_encoded = mentions_encoded.detach().cpu().numpy()

        if self.args.measure == 'l2':
            index = faiss.IndexFlatL2(ent_encoded.shape[1])
        elif self.args.measure == 'ip':
            index = faiss.IndexFlatIP(ent_encoded.shape[1])
            if not self.args.norm:
                ent_encoded = normalize(ent_encoded)
                mentions_encoded = normalize(mentions_encoded)
        index.add(ent_encoded)

        _, predictions = index.search(mentions_encoded, 100)
        pred_str = self.create_pred_str(mentions=self.mention_arr[:10],
                                        preds=predictions[:10])
        logger.info(f"PREDICTIONS AT EPOCH {epoch}: \n {pred_str}")
        results = eval_ranking(predictions, self.gold, [1, 10, 100])
        self.valid_metrics.append(results[0])

        return full_loss / (batch_idx + 1), results
예제 #9
0
파일: optim.py 프로젝트: LieceC/ML_Projet
    def SGD(self,
            X,
            Y,
            batch_size,
            X_val=None,
            Y_val=None,
            f_val=lambda x: x,
            maxiter=10,
            verbose=False):
        """
    
            Parameters
            ----------
            X : 
                Données d'apprentissage.
            Y : 
                Labels d'apprentissage.
            batch_size : 
                Taille des batchs pour l'apprentissage.
            X_val : TYPE, optional
                Données de validation. The default is None.
            Y_val : TYPE, optional
                Labels de validation. The default is None.
            f_val : TYPE, optional
                Fonction à appliquer aux labels predits pour correspondre aux labels des données de validation. 
                The default is lambda x : x.
            maxiter : TYPE, optional
                Nombre d'itération d'apprentissage. The default is 10.
            verbose : TYPE, optional
                Parametre de verbosité.
                1 => affiche loss chaque itération
                2 => affiche la courbe de l'évolution de la loss en fonction du nombre d'itérations
                The default is False.
            Returns
            -------
            None.

        """
        assert len(X) == len(Y)
        assert X_val is None or (Y_val is not None
                                 and len(X_val) == len(Y_val))
        losss = []
        precision_val = []
        for i in range(maxiter):
            datax_rand, datay_rand = unison_shuffled_copies(X, Y)
            datax_rand_batch, datay_rand_batch = list(
                chunks(datax_rand,
                       batch_size)), list(chunks(datay_rand, batch_size))
            nb_batchs = len(datax_rand_batch)
            loss_batch = np.zeros(nb_batchs)

            for j in range(nb_batchs):
                loss_batch[j] = self.step(datax_rand_batch[j],
                                          datay_rand_batch[j]).mean()
                losss += [loss_batch[j]]
            if X_val is not None:  # calcul validation
                predict = self.predict(X_val)
                y_hat = f_val(predict)
                precision_val += [sum(y_hat == Y_val) / len(Y_val)]

            if verbose >= 1:
                print("iteration " + str(i) + ":")
                print("Loss")
                print("mean - " + str(loss_batch.mean()) + "\nstd - " +
                      str(loss_batch.std()))
        if verbose == 2:
            patches = [
                mpatches.Patch(color='red', label='variance sur iterations'),
                mpatches.Patch(color='green', label='moyenne sur iterations'),
                mpatches.Patch(color='blue', label='evolution sur les batchs')
            ]
            losss = np.array(losss)
            x = np.arange(1 / nb_batchs, maxiter + 1 / nb_batchs,
                          1 / nb_batchs)
            plt.plot(x, losss, color="blue")
            plt.title(
                "Evolution de la loss en fonction du nombre d'itérations")
            plt.legend(handles=patches[-1:])
            plt.show()

            x = np.arange(1, maxiter + 1 / nb_batchs, 1)
            losss_2 = losss.reshape(-1, nb_batchs)
            plt.plot(x, losss_2.mean(axis=1), color='green')
            plt.plot(x, losss_2.std(axis=1), color='red')
            plt.title(
                "Evolution de la loss en fonction du nombre d'itérations")
            plt.legend(handles=patches[:-1])
            plt.show()
        if X_val is not None:
            x = np.arange(1, maxiter + 1)
            plt.plot(x, precision_val, color="blue")
            plt.title(
                "Evolution de la précision en fonction du nombre d'itérations")
            plt.show()