def main():
    args = extraction_parser().parse_args()

    # Get output directory
    if args.output_directory is None:
        outdir = os.get_cwd()
    elif not os.path.isdir(args.output_directory):
        raise ValueError("Output directory invalid")
    else:
        outdir = args.output_directory

    # Start log
    log_name = "injection_finder_{}".format(
        os.path.split(args.registration_folder)[-1])
    fancylog.start_logging(outdir, package, filename=log_name, verbose=True)

    # Start extraction
    Extractor(
        args.img_filepath,
        args.registration_folder,
        logging,
        overwrite=args.overwrite,
        gaussian_kernel=args.gaussian_kernel,
        percentile_threshold=args.percentile_threshold,
        threshold_type=args.threshold_type,
        obj_path=args.obj_path,
        overwrite_registration=args.overwrite_registration,
    )
    def __init__(self, file: str, fix_seed: bool = False):
        super(preprocess, self).__init__()
        cwd = os.get_cwd()
        self.df = pd.read_excel(os.path.join(cwd, "data", file))
        self.coords = self.standardize(self.df)
        self.vertices = self.generate_graph(self.coords)

        ind_list = self.df.iloc[:,
                                6:8].drop_duplicates(keep='last').index.values
        df_drop = self.df.iloc[ind_list, :].copy()
        df_drop.reset_index(inplace=True)
        df_drop.drop(['Unnamed: 0'], axis=1, inplace=True)

        if fix_seed:
            random.seed(27)

        self.features = self.preset_features(df_drop)
        self.targets = self.preset_targets(df_drop)
        self.ids = self.preset_ids(df_drop)

        self.JSON_FILE = [
            MakeDictionary_GR(all_vertice, i) for i in range(len(id_array))
        ]
        self.JSON_SHUFFLE = sorted(JSON_FILE, key=lambda k: random.random())
        self.JSON_TRAIN = [JSON_SHUFFLE[i] for i in range(0, 450)]
        self.JSON_VAL = [JSON_SHUFFLE[i] for i in range(450, 500)]
        self.JSON_TEST = [
            JSON_SHUFFLE[i] for i in range(500, len(JSON_SHUFFLE))
        ]

        return self.JSON_TRAIN, self.JSON_VAL, self.JSON_TEST
 def mddata(self, inf='md.in', outf='md.out'):
     if self.md_init is None:
         try:
             return md.Md(inf, outf)
         except FileNotFoundError:
             print("No MD data found in {}".format(os.get_cwd()))
             return self.md_init
    def POST(self, type):
        file = web.input(avatar={}, background={})

        file_dir = os.get_cwd(
        ) + "/static/uploads/" + session_data['user']['username']

        if not os.path.exists(file_dir):
            os.makedir(file_dir)

        if "avatar" or "background" in file:
            filepath = file[type].filename.replace('\\', '/')
            filename = filepath.split('/')[-1]
            f = open(file_dir + "/" + filename, 'wb')
            f.write(file[type].file.read())
            f.close()

            update = {}
            update['type'] = type
            update['img'] = '/static/uploads' + session_data["user"][
                "username"] + "/" + filename
            update['username'] = session_data['user']['username']

            account_model = LoginModel.LoginModel()
            update_avatar = account_model.update_image(update)

        raise web.seeother('/settings')
Exemple #5
0
def make_at(path, dir_name):
    original_path = os.get_cwd()
    try:
        os.chdir(path)
        os.mkdir(dir_name)
    except OSError as e:
        print(e, file=sys.stderr)
        raise
    finally:
        os.chdir(original_path)
Exemple #6
0
 def __init__(self, cwd=None, env=None):
     """
     Parameters:
         
         - cwd (opt, str): the Miniconda installation path.
             Defaults to os.get_cwd()
         
         - env (opt): a YML env file. If None provided, can't install
             environment.
     """
     
     self.log = logger.InstallLogger(__name__).gen_logger()
     
     cwd = cwd or os.get_cwd()
     self.set_install_folder(cwd)
     
     self.set_miniconda_base_web_link(system.base_miniconda_web_link)
     
     self.set_miniconda_web_file(
         "Miniconda3-latest-{}-{}.{}".format(
             system.platform,
             system.bits,
             system.miniconda_file_extension
             )
         )
     
     self.set_miniconda_download_link(
         urlparse.urljoin(
             self.get_miniconda_base_web_link(),
             self.get_miniconda_web_file()
             )
         )
     
     self.set_miniconda_install_file(
         os.path.join(
             self.get_install_folder(),
             self.get_miniconda_web_file()
             )
         )
     
     self.set_miniconda_install_folder(
         os.path.join(
             self.get_install_folder(),
             system.default_miniconda_folder
             )
         )
     
     self.set_env_file(env)
     
     return
Exemple #7
0
def level(cwd=None):
    if cwd is None:
        cwd = os.get_cwd()
    """
    Return the level corresponding to the folder `cwd`.
    """
    pp = cwd.split(os.sep)[-1]
    ll = ''
    if pp.isdigit():
        ll = 'day'
    else:
        numstr = [str(i) for i in range(10)]
        ll = pp.strip(''.join(numstr))
    return ll
Exemple #8
0
def level(cwd=None):
    if cwd is None:
        cwd = os.get_cwd()
    """
    Return the level corresponding to the folder `cwd`.
    """
    leaf = cwd.split(os.sep)[-1]
    ll = ''
    if leaf.isdigit():
        ll = 'day'
    else:
        for pattern in level_patterns_s[::-1]:
            pp = re.compile(pattern)
            m = pp.match(leaf)
            if m is not None:
                ll = m.groups()[0]
                break
    return ll
            os.makedirs(output_folder)

        # For each common covariate, find average betas and pvalues and save to file
        for covar in common_covars:
            avg_beta = average_results(covar, root_folder, site_list, BETA)
            output_path = os.path.join(output_folder,
                                       get_img_fname(BETA, covar))
            avg_beta.to_filename(output_path)
            log.info(
                'Saved average beta weights for `{covar}` to {path}'.format(
                    covar=covar, path=output_path))

            avg_pval = average_results(covar, root_folder, site_list, PVAL)
            output_path = os.path.join(output_folder,
                                       get_img_fname(PVAL, covar))
            avg_pval.to_filename(output_path)
            log.info('Saved average p-values for `{covar}` to {path}'.format(
                covar=covar, path=output_path))

    else:
        log.error('No sites found')


if __name__ == '__main__':
    if len(sys.argv) > 1:
        folder = sys.argv[1]
    else:
        folder = os.get_cwd()
    log.info('Root folder: ' + folder)
    main(folder)
# This is the Crawler App

import sys
import os

DEBUG = os.environ.get('DEBUG', False)

#print sys.path
#print os.get_cwd()

if DEBUG:
    print sys.path
    print os.get_cwd()

def crawler():
    pass
import sys, os
cwd = os.get_cwd()
addedpath=cwd+"/periodic_kdtree/"
sys.path.append(addedpath)
print(sys.path)

import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['image.cmap'] = 'autumn'
import periodic_kdtree
from periodic_kdtree import PeriodicCKDTree

import scipy.spatial
import time
from mpl_toolkits.mplot3d import Axes3D
import pickle
import gzip
import networkx as netx

def read_input_parameters(param_filename='Parameters.in'):
    input_parameters = np.genfromtxt(param_filename,delimiter='=',dtype=str)
    input_parameters[:,0]
    input_parameters[:,1]

    # input_parameter_names, input_parameter_values = np.split( input_parameters, [1], axis=1)
    input_parameter_names = input_parameters[:,0]
    input_parameter_values = input_parameters[:,1]
    parameters = dict({})
Exemple #12
0
import os
import sys

input = sys.argv[0]
cwd = os.get_cwd(input)

for _, f, _ in os.walk():
    print(f)
Exemple #13
0
        os.remove('ic/ic1800x1800.txt')
    # txt=load_buffer('ic/ic1800x1800.npz')[0]#,allow_pickle=True)
    return txt


def get_txt_lst(txt_id1, width, height, worker_dir, **kwargs):
    txt_in = download_txt(txt_id1, worker_dir, **kwargs)
    array_lst = chunk_array(txt_in, width, height, typeout='float64')
    return array_lst


def get_txt(txt_id1, txt_id2, width, height, worker_dir, **kwargs):
    array_lst = get_txt_lst(txt_id1, width, height, worker_dir, **kwargs)
    # N=len(array_lst)
    try:
        txt = array_lst[txt_id2]
    except IndexError as e:
        import random
        print(f'IndexError for {(width,txt_id1,txt_id2)}')
        print(e)
        txt_id2 = random.randint(0, len(array_lst) - 1)
        print(f'Choosing txt_id2={txt_id2}...')
        txt = array_lst[txt_id2]  #-1]
    return txt


if __name__ == '__main__':
    os.get_cwd()
    for input_fn in sys.argv[1:]:
        download_txt(txt_id, worker_dir)
def img_wscore_wrapper(scans,
                       mask=None,
                       outdir=None,
                       outnm='TFMd',
                       save_style='3d'):
    ''' Will transform images by using mixture modeling to determine a "normal"
    distribution, and then normalizing ALL values to values along the "normal" 
    distribution. This procedure is done voxelwise, and voxels that do not have
    a binomial distribution are simply z-scored.

    NOTE: Input scans should be in the same space (i.e. a standard space) and
    should be smoothed for best results. 

    NOTE: Providing an image mask (see arguments below) will speed up the 
    procedure quite a bit.

    Parameters
    ----------
    scans : list
        A list of (str) paths to input images.

    mask: str, optional
        If a path to a mask image is passed, all images will be masked using
        the mask image. Note that the mask image must be a binary image in the 
        same space as images in "scans". Passing a mask image will greatly speed 
        up computation. Passing None will perform transformation across all 
        voxels. 

    outdir: str, optional
        Where to save the images to after transformation. If None is passed,
        images will output to current working directory

    outnm: str, optional, default 'TFMd'
        String to append to the front of all images created post-transformation.

    save_style: str, optional, default '3d'
        String describing in what format to save files.
        Must be one of::

            '3d': Save each new scan individually.
            '4d': Save all new scans into a single 4D image.

        Note that in the '3d' case, image filenames will contain integers
        reflecting the input order in scans. For example, the image pointed to
        by the third path in scans will be labeled as "2".


    '''

    if save_style not in ['3d', '4d']:
        raise IOError("save style must be set to '3d' or '4d'")

    if not outdir:
        print(
            'WARNING: No output directory passed. Saving images to currend working directory'
        )
        outdir = os.get_cwd()

    print('loading images')
    #     if input_style=='nilearn':
    i4d, aff = load_images_nil(scans)
    #     elif input_style=='nib':
    #         i4d,aff = load_images_nib(scans,verbose)

    masked = False
    if mask:
        if type(mask) == str:
            if os.path.isfile(mask):
                print('masking')
                i2d, mskr = mask_nil(i4d, aff, mask)
                masked = True
            else:
                raise IOError('could not find any image at path: %s' % mask)
    else:
        print('setting to 2D')
        i2d, old_shape = to_2d(i4d)

    print('beginning transformation')
    transformed = img_wscore(i2d)

    print('reverting back to image format')
    if masked:
        tfm_4d = mskr.inverse_transform(transformed).get_data()
    else:
        tfm_4d = back_to_4d(transformed, old_shape)

    if save_style == '4d':
        tfm_img = nib.Nifti1Image(tfm_4d, aff)
        flnm = os.path.join(outdir, outnm)
        tfm_img.to_filename(flnm)
    elif save_style == '3d':
        for i in range(tfm_4d.shape[-1]):
            tfm_img = nib.Nifti1Image(tfm_4d[:, :, :, i], aff)
            flnm = os.path.join(outdir, outnm + '_%s' % i)
            tfm_img.to_filename(flnm)
Exemple #15
0
    type=str,
    nargs="+",
    required=True,
    help="Text file listing all HDF5 Dataset to use for training.")

argument_parser.add_argument('-d',
    '--validation_data',
    type=str,
    nargs="+",
    required=True,
    help="Text file listing all HDF5 Dataset to use for validation.")

argument_parser.add_argument(
    '--hdf_dir',
    type=str,
    default=os.get_cwd(),
    help="Path to the folder to store the hdf dataset.")


if __main__ == "__name__":

    # set the rng to defined settings
    # from keras_impl import seedsettings
    # 
    from trainkeras import TrainKeras as TrainLib

    trainer = TrainLib()
    
    args = argument_parser.parse_args()
​
Exemple #16
0
	return None


#### Args Parser ####

# add parser options
parser = OptionParser()
parser.add_option('-m', '--volume', dest='volume', help='mount point')
parser.add_option('-p', '--path', dest='isopath', help='path of iso')

(opts, args) = parser.parse_args()

if opts.isopath != None:
	repo = opts.isopath
else:
	repo = os.get_cwd()
if opts.volume != None:
	# replace / from the end
	root = re.sub(r'/+$', '', opts.isopath)
else:
	pass

# ? reinstall directions
# print(opts.volume, opts.isopath)

#### Get Path ####
part = ""

# Add a intervention handle afterwards
for line in open('/proc/mounts').readlines():
	mnt = line.split()
 def cwd(self):
     print('os.get_cwd():', os.get_cwd())
Exemple #18
0
def sequence(parameters,
             subNumber,
             subID=None,
             simulate=False,
             nMax=None,
             win=None,
             path=None,
             oxi=None):
    """Run the entire task sequence.

    Parameters
    ----------
    parameters : dictionnary
        Dictionnary containing task parameters.
    subNumber : int
        Subject number.
    subID : str or None, default is `None`
        Subject ID.
    simulate : boolean, default is `False`
        Simulate data based on random sampling.
    nMax : int or None, default is `None`
        If provided, limit the number or trials to nMax. To do used for testing
        purpose.
    win : psychopy Window. Default is None
        The window in which displaying.
    path : str or None, default is `None`
        Folder containing the task files and Result folder.
    oxi : Instance of Oximeter
        Where recording devise.

    Notes
    -----
    If recording, will store data at the end o0f each block. The final dataframe
    is saved as `result_df`.
    """
    if path is None:
        path = os.get_cwd()
    if oxi:
        oxi.setup()
    result_df = pd.DataFrame([])

    # Generate blocks orders
    arousing = np.array([1, 3, 1, 3, 1, 3])
    nonarounsing = np.array([2, 4, 2, 4, 2, 4])
    np.random.shuffle(arousing)
    np.random.shuffle(nonarounsing)

    if (subNumber % 2) == 0:

        blockOrder = []
        for i in range(6):
            blockOrder.append(arousing[i])
            blockOrder.append(nonarounsing[i])

    elif (subNumber % 2) != 0:

        blockOrder = []
        for i in range(6):
            blockOrder.append(nonarounsing[i])
            blockOrder.append(arousing[i])
    else:
        raise ValueError('Invalid subject number.')

    # Generate learning times
    timeList = [[30, 60, 90], [30, 90, 60], [60, 30, 90], [60, 90, 30],
                [90, 30, 60], [90, 60, 30]]
    nTime = timeList[(subNumber % 6)]

    # Generate Learning/Distractors lists
    if (subNumber % 2) == 0:
        learningList, distractorList = [1, 3, 5], [2, 4, 6]
    else:
        learningList, distractorList = [2, 4, 6], [1, 3, 5]

    # Read dataframes
    df_1 = pd.read_csv(path + '/Material/1_WordList_ValHighAroHigh.csv',
                       dtype={'List': 'int'})
    df_2 = pd.read_csv(path + '/Material/2_WordList_ValHighAroLow.csv',
                       dtype={'List': 'int'})
    df_3 = pd.read_csv(path + '/Material/3_WordList_ValLowAroHigh.csv',
                       dtype={'List': 'int'})
    df_4 = pd.read_csv(path + '/Material/4_WordList_ValLowAroLow.csv',
                       dtype={'List': 'int'})
    words_df = {'1': df_1, '2': df_2, '3': df_3, '4': df_4}

    # Run the sequence
    end = False
    for nBlock, block in enumerate(blockOrder):

        if end:
            break

        # Show instructions, break and wait for space press
        if nBlock == 0:
            instructions(parameters=parameters, show_intructions=True, win=win)
        else:
            instructions(parameters=parameters, win=win)

        if oxi:
            oxi.setup()
            oxi.read(nSeconds=1)

        df = words_df[str(block)]

        # Security check
        if (len(df[df.List == learningList[0]]) != len(
                df[df.List == distractorList[0]])):
            raise ValueError("""Inconsistent list size Learning/Distractor
                                    per condition""")

        # Find the index for learning and distractor
        # Check if both values are present in the List column
        if ((learningList[0] in df.List.unique()) &
            (distractorList[0] in df.List.unique())):
            indexLearning, indexDistractor = learningList[0], distractorList[0]
            timeLearning = nTime[0]

        # Check if both values are present in the List column
        elif ((learningList[1] in df.List.unique()) &
              (distractorList[1] in df.List.unique())):
            indexLearning, indexDistractor = learningList[1], distractorList[1]
            timeLearning = nTime[1]

        # Check if both values are present in the List column
        elif ((learningList[2] in df.List.unique()) &
              (distractorList[2] in df.List.unique())):
            indexLearning, indexDistractor = learningList[2], distractorList[2]
            timeLearning = nTime[2]
        else:
            raise ValueError('Invalid lists')

        # Use the index to select the learning and distractors words
        learning_df = df[df.List == indexLearning].reset_index()
        distractor_df = df[df.List == indexDistractor].reset_index()

        # Drop the row corresponding to learning list AND distractor list
        df = df[df.List != indexLearning]
        df = df[df.List != indexDistractor]
        words_df[str(block)] = df

        # Run the learning phase
        if not simulate:
            oxi = learning(listStudied=list(learning_df.Word),
                           timeLearning=timeLearning,
                           win=win,
                           oxi=oxi)

        # Run trials
        positionLength = int(len(learning_df) / 2)
        wordPosition = np.hstack(
            (np.zeros(positionLength), np.ones(positionLength)))
        np.random.shuffle(wordPosition)

        for i in range(len(learning_df)):

            word1 = learning_df.iloc[i].Word
            word2 = distractor_df.iloc[i].Word

            if simulate:
                mean = np.random.normal(2, 0.5, 1)[0]
                std = np.random.normal(2, 0.2, 1)[0]
                rt = np.random.normal(mean, std, 1)[0]
                accuracy = np.random.choice([True, False])
                keyPressed = np.random.choice(['left', 'right'])
                confidence = np.random.choice([1, 2, 3, 4, 5, 6])
                confidence_rt = np.random.normal(mean, std, 1)[0]
            else:
                expectedCorrect, accuracy, keyPressed, rt, confidence, \
                    confidence_rt, oxi, markerStart, word1, word2 = \
                    trial(parameters, word1, word2,
                          shift=wordPosition[i], win=win, oxi=oxi)

            # Update the result Dataframe
            result_df = result_df.append(
                {
                    'RT': rt,
                    'Expected': expectedCorrect,
                    'Accuracy': accuracy,
                    'LearningTime': timeLearning,
                    'nBlock': nBlock,
                    'Confidence': confidence,
                    'Confidence_RT': confidence_rt,
                    'TrialNumber': i,
                    'MarkerStart': markerStart,
                    'Valence': parameters['Conditions'][str(block)][0],
                    'Arousal': parameters['Conditions'][str(block)][1],
                    'KeyPressed': keyPressed,
                    'LeftWord': word1,
                    'RightWord': word2
                },
                ignore_index=True)

            # If nMax elapsed end the task here
            if nMax:
                if i >= nMax:
                    end = True
                    break

        # Save the result_df after each block
        result_df.to_csv(path + '/Results/Subject_' + str(subID) + '.txt')

        if oxi:
            np.save(
                path + '/Results/Subject_' + str(subID) + str(subNumber) +
                str(nBlock) + '.npy', [oxi.recording, oxi.triggers])

    return result_df
Exemple #19
0
def clean_project_directory():
    print(os.getcwd())
    if os.path.isdir(os.getcwd() + '/metastore_db'):
        shutil.rmtree(os.getcwd() + '/metastore_db', ignore_errors=True)
    if os.path.exists(os.getcwd() + '/derby.log'):
        os.remove(os.get_cwd() + '/derby.log')
Exemple #20
0
    def apply(registery, funcname, num_workers=0, **kwargs):
        print(ub.color_text('--- APPLY {} ---'.format(funcname), 'white'))
        print(' * num_workers = {!r}'.format(num_workers))

        if num_workers == 0:
            processed_repos = []
            for repo in registery.repos:
                print(ub.color_text('--- REPO = {} ---'.format(repo), 'blue'))
                try:
                    getattr(repo, funcname)(**kwargs)
                except DirtyRepoError:
                    print(
                        ub.color_text('Ignoring dirty repo={}'.format(repo),
                                      'red'))
                processed_repos.append(repo)
        else:
            from concurrent import futures
            # with futures.ThreadPoolExecutor(max_workers=num_workers) as pool:
            with futures.ProcessPoolExecutor(max_workers=num_workers) as pool:
                tasks = []
                for i, repo in enumerate(registery.repos):
                    future = pool.submit(worker, repo, funcname, kwargs)
                    future.repo = repo
                    tasks.append(future)

                processed_repos = []
                for future in futures.as_completed(tasks):
                    repo = future.repo
                    print(
                        ub.color_text('--- REPO = {} ---'.format(repo),
                                      'blue'))
                    try:
                        repo = future.result()
                    except DirtyRepoError:
                        print(
                            ub.color_text(
                                'Ignoring dirty repo={}'.format(repo), 'red'))
                    else:
                        print(repo._getlogs())
                    processed_repos.append(repo)

        print(
            ub.color_text('--- FINISHED APPLY {} ---'.format(funcname),
                          'white'))

        SHOW_CMDLOG = 1

        if SHOW_CMDLOG:

            print('LOGGED COMMANDS')
            import os
            ORIG_CWD = MY_CWD = os.getcwd()
            for repo in processed_repos:
                print('# --- For repo = {!r} --- '.format(repo))
                for t in repo._logged_cmds:
                    cmd, cwd = t
                    if cwd is None:
                        cwd = os.get_cwd()
                    if cwd != MY_CWD:
                        print('cd ' + ub.shrinkuser(cwd))
                        MY_CWD = cwd
                    print(cmd)
            print('cd ' + ub.shrinkuser(ORIG_CWD))
    parser.add_argument(
        '--puzzle',
        help='Which puzzle to generate.  All are generated if not given.',
        type=str)
    parser.add_argument('--puzzle_folder',
                        help='Where to dump puzzle files.',
                        type=str)
    parser.add_argument('--preview',
                        help='This is used for debugging purposes.',
                        type=str)
    parser.add_argument(
        '--solve',
        help=
        'If true, try to generate a solution to the puzzle in the form of a stab-chain.',
        action='store_true')

    args = parser.parse_args()

    puzzle_folder = args.puzzle_folder if args.puzzle_folder is not None else os.get_cwd(
    )
    os.makedirs(puzzle_folder, exist_ok=True)

    for puzzle_class in Puzzle.__subclasses__():
        puzzle = puzzle_class()
        if args.puzzle is not None and puzzle.Name() != args.puzzle:
            continue
        print(
            '======================================================================='
        )
        print('Processing: ' + puzzle.Name())
        puzzle.Generate(puzzle_folder, args.solve, args.preview)
Exemple #22
0
def roistat_tomat(roistat, localizer, conditions, conditions_keys, contrast, thr, fwhm, dof, subjs_id, subjs_sex, resolution, affine, save=False, save_dir=None):
    df = roistat.df
    odf = DataFrame(index=subjs_id)
    odf = odf.reset_index().merge(df.reset_index(), how='left', on='index').set_index('index')
    d = {}
    d['info'] = {}
    d['subj'] = {}
    d['geo'] = {}
    d['mag'] = {}
    d['rlat'] = {}

    d['info']['name'] = roistat.name
    d['info']['localizer'] = localizer
    d['info']['cond'] = np.array(conditions, dtype=np.object)
    d['info']['contrast'] = contrast
    d['info']['thr'] = thr
    d['info']['fwhm'] = fwhm
    d['info']['flag'] = np.array((~np.isnan(odf.size)).astype(np.bool))
    d['info']['dof'] = dof
    d['subj']['id'] = np.array(subjs_id, dtype=np.object)
    d['subj']['sex'] = subjs_sex
    d['geo']['size'] = (odf.size*resolution[0]*resolution[1]*resolution[2]).tolist()
    peak = odf['peak']
    center = odf['center']
    gcenter = odf['gcenter']
    null_mask = odf['size'].isnull()
    t = odf[null_mask]
    dummy_series = Series([[np.nan]*3]*len(t),index=t.index)
    peak[null_mask] = dummy_series
    center[null_mask] = dummy_series
    gcenter[null_mask] = dummy_series
    peak = np.array(peak.tolist())
    center = np.array(center.tolist())
    gcenter = np.array(gcenter.tolist())
    d['geo']['peak'] = apply_affine_group(peak, affine, np.float)
    d['geo']['center'] = apply_affine_group(center, affine, np.float)
    d['geo']['gcenter'] = apply_affine_group(gcenter, affine, np.float)

    d['geo']['subj_dice'] = roistat.between_subj_relations['dice'].relation_matrix

    zkeys = ['Z'+key for key in conditions_keys]
    tkeys = ['T'+key for key in conditions_keys]
    bkeys = ['B'+key for key in conditions_keys]
    ekeys = ['E'+key for key in conditions_keys]

    d['mag']['pt'] = np.vstack([getattr(odf, key) for key in ['peak_'+key for key in tkeys]]).T
    d['mag']['pz'] = np.vstack([getattr(odf, key) for key in ['peak_'+key for key in zkeys]]).T
    d['mag']['pb'] = np.vstack([getattr(odf, key) for key in ['peak_'+key for key in bkeys]]).T
    d['mag']['pe'] = np.vstack([getattr(odf, key) for key in ['peak_'+key for key in ekeys]]).T

    d['mag']['rt'] = np.vstack([getattr(odf, key) for key in ['roi_'+key for key in tkeys]]).T
    d['mag']['rz'] = np.vstack([getattr(odf, key) for key in ['roi_'+key for key in zkeys]]).T
    d['mag']['rb'] = np.vstack([getattr(odf, key) for key in ['roi_'+key for key in bkeys]]).T
    d['mag']['re'] = np.vstack([getattr(odf, key) for key in ['roi_'+key for key in ekeys]]).T

    d['mag']['stm'] = np.vstack([getattr(odf, key) for key in ['mean_'+key for key in tkeys]]).T
    d['mag']['szm'] = np.vstack([getattr(odf, key) for key in ['mean_'+key for key in zkeys]]).T
    d['mag']['sbm'] = np.vstack([getattr(odf, key) for key in ['mean_'+key for key in bkeys]]).T
    d['mag']['stsd'] = np.vstack([getattr(odf, key) for key in ['std_'+key for key in tkeys]]).T
    d['mag']['szsd'] = np.vstack([getattr(odf, key) for key in ['std_'+key for key in zkeys]]).T
    d['mag']['sbsd'] = np.vstack([getattr(odf, key) for key in ['std_'+key for key in bkeys]]).T

    d['rlat']['overlap'] = roistat.anat_relations['overlap_HOcort_thr0'].relation_matrix

    if save:
        if save_dir is None:
            save_dir = os.get_cwd()
        save_dir = os.path.join(save_dir, roistat.name)
        make_dir(save_dir)
        outfile = os.path.join(save_dir, 'roi.mat')
        sio.savemat(outfile, {'roi':d})
    return d
import os
import os.path
from PIL import Image
work_dir = os.get_cwd()


def imgstich():

    try:
        empty = []
        image_dir = os.path.abspath("/home/caratred/Downloads/combine")
        print(image_dir)
        # list all files in directory
        files = os.listdir(image_dir)
        print(files)
        # # get all PNGs
        # png_files = filter(lambda x: x.endswith(".jpeg"), files)
        # # make file paths absolute
        # print(png_files,".....///")
        # image_files = map(lambda x: os.sep.join([image_dir, x]),files)
        for x in files:
            empty.append(os.sep.join([image_dir, x]))

        print(empty, ",,,,,,")

        n_files = len(empty)
        # print (n_files)

        target_img = None
        n_targets = 0
        collage_saved = False
    # 	if rm_father_ic:
    # 		os.remove('ic/ic1800x1800.npz')
    return txt


def get_txt_lst(txt_id1, width, height, worker_dir, **kwargs):
    txt_in = download_txt(txt_id1, worker_dir, **kwargs)
    array_lst = chunk_array(txt_in, width, height, typeout='float64')
    return array_lst


def get_txt(txt_id1, txt_id2, width, height, worker_dir, **kwargs):
    array_lst = get_txt_lst(txt_id1, width, height, worker_dir, **kwargs)
    # N=len(array_lst)
    try:
        txt = array_lst[txt_id2]
    except IndexError as e:
        import random
        print(f'IndexError for {(width,txt_id1,txt_id2)}')
        print(e)
        txt_id2 = random.randint(0, len(array_lst) - 1)
        print(f'Choosing txt_id2={txt_id2}...')
        txt = array_lst[txt_id2]  #-1]
    return txt


if __name__ == '__main__':
    worker_dir = os.get_cwd()
    for txt_id in sys.argv[1:]:
        download_txt(txt_id, worker_dir)
Exemple #25
0
from flaskext.sqlalchemy import SQLAlchemy
from flaskext.script import Manager
from flask.ext.celery import install_commands, Celery
from flaskext.mail import Mail

__all__ = [
    'app',
    'db',
    'mail',
]

sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))

if 'INSTANCE_PATH' in os.environ:
    instance_path = os.environ['INSTANCE_PATH']
    cwd = os.get_cwd()
    if not instance_path.startswith('/'):
        instance_path = os.path.abspath(os.path.join(cwd, instance_path))
else:
    instance_path = os.path.abspath(os.path.join(os.path.dirname(__file__)))


app = Flask(__name__, instance_relative_config=True, instance_path=instance_path)

app.config.from_pyfile('application.cfg')
db = SQLAlchemy(app)


from wedding.utils import debug

debug.log_queries(app)