def check_csv(file, force):

    with open(file) as f:
        content = f.read()

        for line, row in enumerate(content.split()):
            dwi_mask = [element for element in row.split(',')
                        if element]  # handling w/space
            if len(dwi_mask) != 2:
                raise FileNotFoundError(
                    f'Columns don\'t have same number of entries: check line {line} in {file}'
                )

            dirCheckFlag = 1
            for img in dwi_mask:
                if not exists(img):
                    raise FileNotFoundError(
                        f'{img} does not exist: check line {line} in {file}')

                elif dirCheckFlag:
                    # create DTI and harmonization directory
                    dtiPath = pjoin(dirname(img), 'dti')
                    check_dir(dtiPath, force)

                    harmPath = pjoin(dirname(img), 'harm')
                    check_dir(harmPath, force)

                    dirCheckFlag = 0
def get_log_handler(logpath=None, cfg=None):
    """
    Create a handler object to go in the logger
    """
    if logpath == '':
        logpath = None

    try:
        dcfg = get_config()
    except:
        dcfg = None

    envname = os.getenv('CRAWL_LOG')

    if logpath is not None:
        final_logpath = logpath
    elif envname:
        final_logpath = envname
    elif cfg:
        final_logpath = cfg.get_d('crawler', 'logpath', U.default_logpath())
    elif dcfg:
        final_logpath = dcfg.get_d('crawler', 'logpath', U.default_logpath())
    else:
        final_logpath = U.default_logpath()

    if cfg:
        maxBytes = cfg.get_size('crawler', 'logsize', 10*1024*1024)
        backupCount = cfg.get_size('crawler', 'logmax', 5)
        archdir = cfg.get_d('crawler', 'archive_dir',
                            U.pathjoin(U.dirname(final_logpath),
                                       'hpss_log_archive'))
    elif dcfg:
        maxBytes = dcfg.get_size('crawler', 'logsize', 10*1024*1024)
        backupCount = dcfg.get_size('crawler', 'logmax', 5)
        archdir = dcfg.get_d('crawler', 'archive_dir',
                             U.pathjoin(U.dirname(final_logpath),
                                        'hpss_log_archive'))
    else:
        maxBytes = 10*1024*1024
        backupCount = 5
        archdir = U.pathjoin(U.dirname(final_logpath), 'hpss_log_archive')

    fh = util.ArchiveLogfileHandler(final_logpath,
                                    maxBytes=maxBytes,
                                    backupCount=backupCount,
                                    archdir=archdir)

    strfmt = "%" + "(asctime)s [%s] " % U.hostname + '%' + "(message)s"
    fmt = logging.Formatter(strfmt, datefmt="%Y.%m%d %H:%M:%S")
    fh.setFormatter(fmt)
    fh.handleError = raiseError
    return fh
Exemple #3
0
    def organize_directory(self):
        if (self.scenes):
            # Iterate Over Scenes List
            for curr, scene in enumerate(self.scenes, 1):
                # Create Scene Folder
                util.mkdir(
                    util.form_path([self.image_path,
                                    SCENE.format(curr)]))

                # Move Images To Scene Folder
                for image in scene:
                    try:
                        # Generate Source and Destination Paths
                        src = util.absolute(image)
                        dst = util.normalize(
                            util.form_path([
                                util.dirname(image),
                                SCENE.format(curr),
                                util.basename(image)
                            ]))

                        # Move Images To Scene Folder
                        util.move(src, dst)
                    except FileNotFoundError:
                        pass

            # Update Prompt
            print("Organized All Images             ")
        else:
            util.perror("spectra: No scenes found to analyze")
Exemple #4
0
def verifyNshmForAll(csvFile, N_shm):

    for imgPath in read_imgs_masks(csvFile)[0]:
        directory = dirname(imgPath)
        prefix = basename(imgPath).split('.nii')[0]
        bvalFile = pjoin(directory, prefix + '.bval')
        verifyNshm(N_shm, bvalFile)
Exemple #5
0
def attach_to_max(contents):
    """
    Defines commands to send to Max, establishes a connection to its commandPort,
    then sends the code to inject debugpy
    """

    global run_code
    config = contents['arguments']

    # Format the simulated attach response to send it back to the debugger
    # while we set up the debugpy in the background
    attach_code = ATTACH_TEMPLATE.format(
        debugpy_path=debugpy_path,
        hostname=config['debugpy']['host'],
        port=int(config['debugpy']['port'])
    )

    # Format RUN_TEMPLATE to point to the temporary
    # file containing the code to run
    run_code = RUN_TEMPLATE.format(
        dir=dirname(config['program']),
        file_name=split(config['program'])[1][:-3] or basename(split(config['program'])[0])[:-3]
    )

    # then send attach code
    log('Sending attach code to Max')
    send_py_code_to_max(attach_code)
    log('Successfully attached to Max')

    # Then start the max debugging threads
    run(start_debugging, ((config['debugpy']['host'], int(config['debugpy']['port'])),))
Exemple #6
0
def select_windows(parameters, paths, merge_flag, obs_tag, syn_tag):
    """
    Selects windows by comparing observed and synthetic traces;
    writes results to a json file

    :param parameters: dictionary passed directly to pyflex.Config
    :param paths.obs: ASDF observed data filename
    :param paths.syn: ASDF synthetic data filename
    :param paths.output: windows will be written to a JSON file with this name
    :param paths.log: information about the quantity and quality of windows
        will be written to a JSON file with this name
    :param obs_tag: observed data are read using this ASDF tag
    :param syn_tag: synthetic data are read using this ASDF tag
    """
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = comm.rank

    cwd = dirname(__file__)

    # read data
    fullpath = join(cwd, paths.obs)
    obs = pyasdf.ASDFDataSet(fullpath, compression=None, mode="a")
    event = obs.events[0]

    # read synthetics
    fullpath = join(cwd, paths.syn)
    syn = pyasdf.ASDFDataSet(fullpath, compression=None, mode="a")

    # generate pyflex.Config objects
    config = {}
    for channel, param in parameters.items():
        config[channel] = pyflex.Config(**param)

    # wrapper is required for ASDF processing
    def wrapped_function(obs, syn):
        return pytomo3d.window.window_on_stream(obs[obs_tag],
                                                syn[syn_tag],
                                                config,
                                                station=obs.StationXML,
                                                event=event,
                                                user_modules=None,
                                                figure_mode=False,
                                                figure_dir=None,
                                                _verbose=False)

    # run window selection
    windows = obs.process_two_files_without_parallel_output(
        syn, wrapped_function)

    # save results
    if rank == 0:
        if merge_flag: windows = merge(windows)
        write_windows_json(paths.output, windows)
Exemple #7
0
def joinBshells(imgPath, ref_bvals_file=None, ref_bvals=None, sep_prefix=None):

    if ref_bvals_file:
        print('Reading reference b-shell file ...')
        ref_bvals = read_bvals(ref_bvals_file)

    print('Joining b-shells for', imgPath)

    imgPath = local.path(imgPath)
    img = load(imgPath._path)
    dim = img.header['dim'][1:5]

    inPrefix = abspath(imgPath).split('.nii')[0]
    directory = dirname(inPrefix)
    prefix = basename(inPrefix)

    bvalFile = inPrefix + '.bval'
    bvecFile = inPrefix + '.bvec'

    if sep_prefix:
        harmPrefix = pjoin(directory, sep_prefix + prefix)
    else:
        harmPrefix = inPrefix

    if not isfile(harmPrefix + '.bval'):
        copyfile(bvalFile, harmPrefix + '.bval')
    if not isfile(harmPrefix + '.bvec'):
        copyfile(bvecFile, harmPrefix + '.bvec')

    bvals = np.array(read_bvals(inPrefix + '.bval'))

    joinedDwi = np.zeros((dim[0], dim[1], dim[2], dim[3]), dtype='float32')

    for bval in ref_bvals:

        # ind= np.where(bval==bvals)[0]
        ind = np.where(abs(bval - bvals) <= BSHELL_MIN_DIST)[0]

        if bval == 0.:
            b0Img = load(inPrefix + '_b0.nii.gz')
            b0 = b0Img.get_data()
            for i in ind:
                joinedDwi[:, :, :, i] = b0

        else:
            b0_bshell = load(harmPrefix + f'_b{int(bval)}.nii.gz').get_data()

            joinedDwi[:, :, :, ind] = b0_bshell[:, :, :, 1:]

    if not isfile(harmPrefix + '.nii.gz'):
        save_nifti(harmPrefix + '.nii.gz', joinedDwi, b0Img.affine,
                   b0Img.header)
    else:
        print(harmPrefix + '.nii.gz', 'already exists, not overwritten.')
Exemple #8
0
def bscr_test(args):
    """test - run tests

    usage: bscr test [-t py.test|green|nosetests|unittest] [-n] [-d]

    Without -t, we use the first available of py.test, green, nosetests, or
    unittest. With -t, we attempt to run the tests with the specified test
    runner.

    The tests are optimized for py.test. They may not work well under green
    or nose.
    """
    c = U.cmdline([{'opts': ['--dry-run', '-n'],
                    'action': 'store_true',
                    'help': 'see what would happen'},
                   {'name': 'tester',
                    'help': 'select a test runner'}])
    (o, a) = c.parse(args)

    with util.Chdir(util.dirname(__file__)):
        target = util.pj(os.path.dirname(__file__), 'test')
        print("Running tests in %s" % target)
        if o.tester == '':
            if which('py.test'):
                util.run('py.test %s' % target, not o.dryrun)
            elif which('green'):
                util.run('green -v %s' % target, not o.dryrun)
            elif which('nosetests') and importable('nose_ignoredoc'):
                tl = glob.glob(util.pj(target, 'test_*.py'))
                util.run('nosetests -v -c nose.cfg %s' % " ".join(tl),
                         not o.dryrun)
            else:
                tl = glob.glob(util.pj(target, 'test_*.py'))
                for t in tl:
                    util.run("%s -v" % t, not o.dryrun)
                    # p = subp.Popen([t, '-v'])
                    # p.wait()
        elif o.tester == 'py.test':
            util.run('py.test %s' % target, not o.dryrun)
        elif o.tester == 'green':
            util.run('green -v %s' % target, not o.dryrun)
        elif o.tester == 'nose':
            tl = glob.glob(util.pj(target, 'test_*.py'))
            util.run('nosetests -v -c nose.cfg %s' % " ".join(tl),
                     not o.dryrun)
        elif o.tester == 'unittest':
            tl = glob.glob(util.pj(target, 'test_*.py'))
            for t in tl:
                util.run("%s -v" % t, not o.dryrun)
        else:
            raise SystemExit("unrecognized tester: '%s'" % o.tester)
def combine_adjoint_sources(paths, tag, rotate=True, auxiliary_data=False):
    """ 
    Sums adjoint sources from different ASDF files in a linear combination with
    user-supplied weights. Can be useful, for example, if previous data 
    processing, window selection, and misfit measurements steps were carried out
    separately for different pass bands (e.g. long period, short period) or 
    different phases (e.g. body waves, surface waves).

    param paths.input: ASDF files containing adjoint sources
    type paths.input: list
    param paths.weight: corresponding list of JSON files containing weights
    type paths.weight: list
    param paths.output: summed adjoint sources are written to this ASDF file
    type paths.output: str
    """

    cwd = dirname(__file__)

    adjoint_sources_all = []
    weights_all = []

    for filenames in zip_catch(paths.input, paths.weights):
        # read adjoint sources
        fullname = join(cwd, filenames[0])
        adjoint_sources_all += [
            pyasdf.ASDFDataSet(fullname, mpi=False, mode='r')
        ]

        # read user-supplied weights
        fullname = join(cwd, filenames[1])
        weights_all += [read_json(fullname)]

    # create output file
    shutil.copy(paths.input[0], paths.output)
    adjoint_sources_sum = pyasdf.ASDFDataSet(paths.output, mpi=False, mode="a")

    # overwite output file data with zeros
    for waveform in adjoint_sources_sum.waveforms:
        for trace in waveform[tag]:
            trace.data[:] = 0.

    # weighted sum of adjoint sources
    for weights, adjoint_sources in zip(weights_all, adjoint_sources_all):
        for station in weights:
            for trace1, trace2 in \
                zip(adjoint_sources_sum.waveforms[station][tag],
                    adjoint_sources.waveforms[station][tag]):
                trace1.data += weights[station][trace1.id] * trace2.data

    # write misfit
    pass
Exemple #10
0
def on_receive_from_debugger(message):
    """
    Intercept the initialize and attach requests from the debugger
    while debugpy is being set up
    """

    # Load message contents into a dictionary
    contents = json.loads(message)

    log('Received from Debugger:', message)

    # Get the type of command the debugger sent
    cmd = contents['command']

    if cmd == 'initialize':
        # Run init request once Maya connection is established and send success response to the debugger
        interface.send(json.dumps(json.loads(
            INITIALIZE_RESPONSE)))  # load and dump to remove indents
        processed_seqs.append(contents['seq'])

    elif cmd == 'attach':
        # time to attach to Maya
        run(attach_to_maya, (contents, ))

        # Change arguments to valid ones for debugpy
        config = contents['arguments']
        new_args = ATTACH_ARGS.format(
            dir=dirname(config['program']).replace('\\', '\\\\'),
            hostname=config['debugpy']['host'],
            port=int(config['debugpy']['port']),
            # filepath=config['program'].replace('\\', '\\\\')
        )

        # Update the message with the new arguments to then be sent to debugpy
        contents = contents.copy()
        contents['arguments'] = json.loads(new_args)
        message = json.dumps(contents)  # update contents to reflect new args

        log("New attach arguments loaded:", new_args)

    # Then just put the message in the debugpy queue
    debugpy_send_queue.put(message)
Exemple #11
0
def attach_to_maya(contents):
    """
    Defines commands to send to Maya, and sends the attach code to it.
    """

    global attach_code, Maya_path
    config = contents['arguments']

    # Format the simulated attach response to send it back to the debugger
    # while we set up the debugpy in the background
    attach_code = ATTACH_TEMPLATE.format(
        debugpy_path=debugpy_path,
        hostname=config['debugpy']['host'],
        port=int(config['debugpy']['port']),
        interpreter=config['interpreter'],
    )

    # Copy code to temporary file and start a Maya console with it
    try:
        send_code_to_maya(attach_code)
    except Exception as e:
        # Raising exceptions shows the text in the Debugger's output.
        # Raise an error to show a potential solution to this problem.
        log("Exception occurred: \n\n" + str(e))
        import platform
        module_path = join(dirname(__file__), 'resources', 'module')
        separator = ';' if platform.system() == 'Windows' else ':'
        raise Exception("""
                              Could not connect to Maya.

                Please ensure Maya is running. If this is your first time
                using the debug adapter, ensure the MAYA_MODULE_PATH
                environment variable is set correctly (ie contains {0}), 
                           then restart Maya and try again.
            """.format(module_path + separator))

    # Then start the Maya debugging threads
    run(start_debugging,
        ((config['debugpy']['host'], int(config['debugpy']['port'])), ))
    def load_priority_list(cls):
        """
        If one or more priority list files are configured, read them and put
        their contents first in the list of Checkables to be processed
        """
        rval = []
        cfg = CrawlConfig.get_config()
        priglob = cfg.get_d('cv', 'priority', '')
        if priglob == '':
            return rval

        pricomp = cfg.get_d('cv', 'completed',
                            U.pathjoin(U.dirname(priglob), 'completed'))

        for pripath in U.foldsort(glob.glob(priglob)):
            with open(pripath, 'r') as f:
                for line in f.readlines():
                    path = line.strip()
                    rval.append(Checkable(path=path, type='f'))
            os.rename(pripath, U.pathjoin(pricomp, U.basename(pripath)))

        return rval
def process_traces(parameters, paths, tag1, tag2):
    """
    Performs bandpass filtering and other data processing operations on 
    ASDF data; writes processed waveforms in a new ASDF file or under a new 
    ASDF tag

    :param parameters: dictionary passed directly to pytomo3d
    :param paths.input: input ASDF filename
    :param paths.output: output ASDF filename
    :param tag1: input ASDF waveforms are read using this tag
    :param tag2: output ASDF waveforms are written using this tag
    """

    from mpi4py import MPI

    cwd = dirname(__file__)

    # read data
    fullpath = join(cwd, paths.input)
    ds = pyasdf.ASDFDataSet(fullpath, compression=None, mode="a")
    event = ds.events[0]

    # add event information
    latitude, longitude, origin_time = event_stats(ds)
    parameters['event_longitude'] = longitude
    parameters['event_latitude'] = latitude
    parameters['starttime'] = origin_time + parameters['starttime']
    parameters['endtime'] = origin_time + parameters['endtime']

    # wrapper is required for ASDF processing
    def wrapped_function(stream, inventory):
        parameters.update({"inventory": inventory})
        return pytomo3d.signal.process_stream(stream, **parameters)

    # process data
    ds.process(wrapped_function, paths.output, {tag1: tag2})

    del ds
    def load_priority_list(cls):
        """
        If one or more priority list files are configured, read them and put
        their contents first in the list of Checkables to be processed
        """
        rval = []
        cfg = CrawlConfig.get_config()
        priglob = cfg.get_d('cv', 'priority', '')
        if priglob == '':
            return rval

        pricomp = cfg.get_d('cv',
                            'completed',
                            U.pathjoin(U.dirname(priglob), 'completed'))

        for pripath in U.foldsort(glob.glob(priglob)):
            with open(pripath, 'r') as f:
                for line in f.readlines():
                    path = line.strip()
                    rval.append(Checkable(path=path, type='f'))
            os.rename(pripath, U.pathjoin(pricomp, U.basename(pripath)))

        return rval
Exemple #15
0
def start_server():
    app = Application([
        (r'\/*', IndexHandler),
        (r'/ping\/*', PingHandler),
        (r'/cluster\/*', ClusterInfoHandler),
        (r'/appliance\/*', AppliancesHandler),
        (r'/appliance/(%s)\/*' % Appliance.ID_PATTERN, ApplianceHandler),
        (r'/appliance/(%s)/container\/*' % Appliance.ID_PATTERN,
         ContainersHandler),
        (r'/appliance/(%s)/volume\/*' % Appliance.ID_PATTERN,
         ApplianceVolumesHandler),
        (r'/appliance/(%s)/service\/*' % Appliance.ID_PATTERN,
         ServicesHandler),
        (r'/appliance/(%s)/job\/*' % Container.ID_PATTERN, JobsHandler),
        (r'/appliance/(%s)/ui\/*' % Appliance.ID_PATTERN, ApplianceUIHandler),
        (r'/appliance/(%s)/container/(%s)\/*' %
         (Appliance.ID_PATTERN, Container.ID_PATTERN), ContainerHandler),
        (r'/appliance/(%s)/volume/(%s)\/*' %
         (Appliance.ID_PATTERN, PersistentVolume.ID_PATTERN),
         ApplianceVolumeHandler),
        (r'/volume/(%s)\/*' % PersistentVolume.ID_PATTERN,
         GlobalVolumeHandler),
        (r'/static/(.*)', StaticFileHandler,
         dict(path='%s/static' % dirname(__file__))),
        (r'/api', SwaggerAPIHandler),
        (r'/api/ui', SwaggerUIHandler),
    ])
    ssl_options = None
    if config.pivot.https:
        ssl_options = dict(certfile='/etc/pivot/server.pem',
                           keyfile='/etc/pivot/server.key')
    server = tornado.httpserver.HTTPServer(app, ssl_options=ssl_options)
    server.bind(config.pivot.port)
    server.start(config.pivot.n_parallel)
    start_cluster_monitor()
    start_global_scheduler()
    tornado.ioloop.IOLoop.instance().start()
def write_adjoint_traces(misfit_type, misfit_parameters, filter_parameters,
                         paths, obs_tag, syn_tag):
    """
    Makes misfit measurements using observed and synthetic data;
    writes out misfit values and corresponding "adjoint sources"

    :param misfit_type: type of data misfit function, 
         e.g. waveform_difference, multitaper_misfit
    :param misfit_parameters: dictionary passed directly to pyadjoint.Config
    :param paths.obs: ASDF observed data filename
    :param paths.syn: ASDF synthetic data filename
    :param paths.windows: JSON windows filename
    :param paths.adjoint_sources: adjoint_sources will be written to an ASDF
        file with this name
    :param paths.misfit: misfit values will be written a JSON file with this name
    :param obs_tag: observed data are read using this ASDF tag
    :param syn_tag: synthetic data are read using this ASDF tag
    """

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = comm.rank

    cwd = dirname(__file__)

    # read data
    fullpath = join(cwd, paths.obs)
    obs = pyasdf.ASDFDataSet(fullpath, compression=None, mode="a")
    event = obs.events[0]

    # read synthetics
    fullpath = join(cwd, paths.syn)
    syn = pyasdf.ASDFDataSet(fullpath, compression=None, mode="a")

    # read windows
    fullpath = join(cwd, paths.windows)
    windows = read_json_mpi(paths.windows, comm)

    # generate pyadjoint.Config objects
    config = pyadjoint.Config(**misfit_parameters)

    # wrapper is required for ASDF processing
    def wrapped_function(obs, syn):
        # TODO: modify pytomo3d to make the following
        # function call more readable?
        return pytomo3d.adjoint.calculate_and_process_adjsrc_on_stream(
            obs[obs_tag],
            syn[syn_tag],
            windows[obs._station_name],
            obs.StationXML,
            config,
            event,
            misfit_type,
            filter_parameters,
            figure_mode=False,
            figure_dir=None)

    adjoint_sources = obs.process_two_files_without_parallel_output(
        syn, wrapped_function)

    if rank == 0:
        # save as ASDF waveforms
        ds = pyasdf.ASDFDataSet(paths.adjoint_sources, mpi=False, mode="a")
        tag = 'processed_adjoint'
        add_adjoint_source_waveforms(ds, adjoint_sources, tag)
        del ds

        # write misfit
        pass
Exemple #17
0
    def process_images(self):
        if (self.image_list):
            # Initialize Process Counter
            curr = 0

            # Initialize Hash List
            self.hashes = []

            # Initalize Blurred Array
            self.blurred = []

            # Load Image Data Map
            image_data = load_image_data(self.image_path)

            # Error Check Image Data Map
            if (image_data is None):
                image_data = {}

            # Calculate Hash Values
            for image in self.image_list:
                # Create Data Object
                if (not (image in image_data)):
                    image_data[image] = data(lmod=util.get_lmod(image))

                # Calculate Imagehash
                self.hashes.append(imagehash.average_hash(Image.open(image)))

                # End Imagehash Calculation----------------------------------------------------------------------------------------------------------------------------

                # Store Image Name
                input_image = image

                # Store Recent Modification Time
                curr_lmod = util.get_lmod(image)

                # Calculate Blur Coefficient
                if ((image_data[image].vari is None)
                        or (image_data[image].nmax is None)
                        or (image_data[image].rmsv is None)
                        or (image_data[image].lmod < curr_lmod)):
                    # Compute RMS Value
                    loaded_image = Image.open(image).convert('L')
                    image_stats = ImageStat.Stat(loaded_image)
                    image_rms = image_stats.rms[0]

                    # Determine RMS Deficiency
                    if (image_rms < self.rms_threshold):
                        # Create Cache Folder
                        try:
                            util.mkdir(
                                util.form_path([self.image_path, TEMP_FOLD]))
                        except FileExistsError:
                            pass

                        # Create Cache File
                        input_image = util.form_path([
                            util.dirname(util.absolute(image)), TEMP_FOLD,
                            EQ_IMAGE.format(util.basename(image))
                        ])

                        # Equalize Image Histogram
                        image_file = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
                        clahe = cv2.createCLAHE(clipLimit=1.125,
                                                tileGridSize=(4, 4))
                        eq_image = clahe.apply(image_file)
                        cv2.imwrite(input_image, eq_image)

                    # Ignore Future Warnings
                    with warnings.catch_warnings():
                        warnings.filterwarnings("ignore")

                        # Compute Laplace Matrix
                        loaded_image = rgb2gray(io.imread(input_image))
                        laplace_data = laplace(loaded_image, ksize=10)

                    # Store Image Data
                    image_data[image].vari = variance(laplace_data)
                    image_data[image].nmax = np.amax(laplace_data)
                    image_data[image].rmsv = image_rms
                    image_data[image].lmod = curr_lmod

                # Group Blurry Images
                if ((image_data[image].vari < self.var_threshold)
                        and (image_data[image].nmax < self.max_threshold)):
                    self.blurred.append(image)

                # Update Prompt
                print("\rProcessing Images - {}% ".format(
                    int(curr * 100 / len(self.image_list))),
                      end="")
                curr += 1

            # End Variance Computation---------------------------------------------------------------------------------------------------------------------------------

            # Write Computed Data To Data File
            with open(util.form_path([self.image_path, DATA_FILE]),
                      'w') as data_file:
                for image in image_data:
                    if (image in self.image_list):
                        data_file.write("{},{},{},{},{}\n".format(
                            image, image_data[image].vari,
                            image_data[image].nmax, image_data[image].rmsv,
                            image_data[image].lmod))
            # Close File
            data_file.close()

            # End Write Operation--------------------------------------------------------------------------------------------------------------------------------------

            # Initialize Diff List
            self.hash_diffs = []

            # Calculate Hash Differences
            for i in range(len(self.hashes) - 1):
                self.hash_diffs.append(
                    (self.hashes[i + 1] - self.hashes[i]) * self.precision)

            # End Hash Difference Computation--------------------------------------------------------------------------------------------------------------------------

            # Update Prompt
            print("\rProcessed All Images   ")
        else:
            util.perror("spectra: Found no images to process")
Exemple #18
0
def scan_cases(path):
    result = []
    # 项目种类
    project_kinds = util.safely_list_dir(path)
    for project_kind in project_kinds:
        projects = util.safely_list_dir(path, project_kind)

        # 遍历项目
        for project in projects:
            versions = util.safely_list_dir(path, project_kind, project)
            # 遍历版本
            for version in versions:
                pycharm_projects = util.safely_list_dir(
                    path, project_kind, project, version)
                # 遍历pycharm工程
                for pycharm_project in pycharm_projects:
                    case_dir = util.join(path, project_kind, project, version,
                                         pycharm_project, 'src', 'cases')
                    modules = util.safely_list_dir(case_dir)
                    for m in modules:
                        # 文件作为模块
                        if util.isfile(util.join(case_dir, m)):
                            if util.suffix(m) == '.py':
                                result.extend(
                                    parsePy2Dict(util.join(case_dir, m),
                                                 util.join(path, project),
                                                 'cases', '', util.filename(m),
                                                 [], None, version,
                                                 pycharm_project,
                                                 project_kind))
                        # 目录作为一级模块
                        else:
                            # 在目录下的__init__.py里获取模块描述
                            module_desc = ''
                            if util.isfile(
                                    util.join(case_dir, m, '__init__.py')):
                                f = open(util.join(case_dir, m, '__init__.py'))
                                p = ast.parse(f.read()).body
                                module_desc = p[0].value.s if p else ''
                                f.close()
                            for root, dirs, files in util.walk(
                                    util.join(case_dir, m)):
                                for file in files:
                                    if util.suffix(file) == '.py':
                                        modules = util.relative(
                                            util.join(root, file),
                                            case_dir)[0:-1]
                                        package = util.relative(
                                            util.join(root, file),
                                            util.dirname(case_dir))[0:-1]
                                        package = '.'.join(package)
                                        result.extend(
                                            parsePy2Dict(
                                                util.join(root, file),
                                                util.join(path,
                                                          project), package, m,
                                                util.filename(file), modules,
                                                module_desc, version,
                                                pycharm_project, project_kind))

    return result
Exemple #19
0
    def main(self):
        self.out = local.path(self.out)
        if self.out.exists():
            if self.overwrite:
                self.out.delete()
            else:
                logging.error(
                    "{} exists, use '--force' to overwrite it".format(
                        self.out))
                sys.exit(1)
        outxfms = self.out.dirname / self.out.stem + '-xfms.tgz'
        with TemporaryDirectory() as tmpdir, local.cwd(tmpdir):
            tmpdir = local.path(tmpdir)

            # fileinput() caused trouble reading data file in python 3, so switching to nrrd
            # if the hdr has 'nan' in space origin, the following will take care of that
            img = nrrd.read(self.dwi)
            dwi = img[0]
            hdr = img[1]

            hdr_out = hdr.copy()
            hdr_out['space origin'] = hdr_out['space origin'][0:3]

            nrrd.write('dwijoined.nhdr',
                       dwi,
                       header=hdr_out,
                       compression_level=1)

            # we want to use this hdr to write a new .nhdr file with corresponding data file
            # so delete old data file from the hdr
            if 'data file' in hdr_out.keys():
                del hdr_out['data file']
            elif 'datafile' in hdr_out.keys():
                del hdr_out['datafile']

            if 'content' in hdr_out.keys():
                del hdr_out['content']

            logging.info('Dice the DWI')

            # Since fslmerge works along the 3rd axis only, dicing also has to be along that axis
            # So, use `unu permute` to reorient the volumes to be stacked along 3rd axis only
            # Include this issue in the tutorial
            (unu['convert', '-t', 'int16', '-i', 'dwijoined.nhdr']
             | unu['dice', '-a', '3', '-o', 'Diffusion-G'])()
            vols = tmpdir.glob('Diffusion-G*.nrrd')
            vols.sort()

            logging.info('Extract the B0')
            bse_py('-i', 'dwijoined.nhdr', '-o', 'b0.nrrd')
            ConvertBetweenFileFormats('b0.nrrd', 'b0.nii.gz', 'short')

            logging.info('Register each volume to the B0')

            # use the following multi-processed loop
            pool = Pool(int(self.nproc))
            res = pool.map_async(_Register_vol, vols)
            volsRegistered = res.get()
            pool.close()
            pool.join()

            # or use the following for loop
            # volsRegistered = []
            # for vol in vols:
            #     volnii = vol.with_suffix('.nii.gz')
            #     ConvertBetweenFileFormats(vol, volnii, 'short')
            #     logging.info('Run FSL flirt affine registration')
            #     flirt('-interp' ,'sinc'
            #           ,'-sincwidth' ,'7'
            #           ,'-sincwindow' ,'blackman'
            #           ,'-in', volnii
            #           ,'-ref', 'b0.nii.gz'
            #           ,'-nosearch'
            #           ,'-o', volnii
            #           ,'-omat', volnii.with_suffix('.txt', depth=2)
            #           ,'-paddingsize', '1')
            #     volsRegistered.append(volnii)

            fslmerge('-t', 'EddyCorrect-DWI', volsRegistered)
            transforms = tmpdir.glob('Diffusion-G*.txt')
            transforms.sort()

            # nibabel loading can be avoided by setting 'data file' = EddyCorrect-DWI.nii.gz
            # and 'byteskip' = -1
            # Tashrif updated Pynrrd package to properly handle that
            new_dwi = nib.load('EddyCorrect-DWI.nii.gz').get_data()

            logging.info('Extract the rotations and realign the gradients')

            space = hdr_out['space'].lower()
            if (space == 'left'):
                spctoras = np.matrix([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
            else:
                spctoras = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
            mf = np.matrix(hdr['measurement frame'])

            # Transforms are in RAS so need to do inv(MF)*inv(SPC2RAS)*ROTATION*SPC2RAS*MF*GRADIENT
            mfras = mf.I * spctoras.I
            rasmf = spctoras * mf
            for (i, t) in enumerate(transforms):

                gDir = [
                    float(num) for num in hdr_out['DWMRI_gradient_' +
                                                  '{:04}'.format(i)].split(' ')
                    if num
                ]

                logging.info('Apply ' + t)
                tra = np.loadtxt(t)
                # removes the translation
                aff = np.matrix(tra[0:3, 0:3])
                # computes the finite strain of aff to get the rotation
                rot = aff * aff.T
                # compute the square root of rot
                [el, ev] = np.linalg.eig(rot)
                eL = np.identity(3) * np.sqrt(el)
                sq = ev * eL * ev.I
                # finally the rotation is defined as
                rot = sq.I * aff
                newdir = np.dot(mfras * rot * rasmf, gDir)

                hdr_out['DWMRI_gradient_' + '{:04}'.format(i)] = ('   ').join(
                    str(x) for x in newdir.tolist()[0])

            tar('cvzf', outxfms, transforms)

            nrrd.write(self.out, new_dwi, header=hdr_out, compression_level=1)

            if self.debug:
                tmpdir.copy(
                    join(dirname(self.out), "eddy-debug-" + str(getpid())))
Exemple #20
0
    def main(self):
        self.out = local.path(self.out)
        if self.out.exists():
            if self.overwrite:
                self.out.delete()
            else:
                logging.error("{} exists, use '--force' to overwrite it".format(self.out))
                sys.exit(1)

        outxfms = self.out.dirname / self.out.stem+'_xfms.tgz'

        with TemporaryDirectory() as tmpdir, local.cwd(tmpdir):
            tmpdir = local.path(tmpdir)

            dicePrefix = 'vol'

            logging.info('Dice the DWI')
            fslsplit[self.dwi] & FG

            logging.info('Extract the B0')
            check_call((' ').join([pjoin(FILEDIR,'bse.py'), '-i', self.dwi._path, '-o', 'b0.nii.gz']), shell= True)

            logging.info('Register each volume to the B0')
            vols = sorted(tmpdir // (dicePrefix + '*.nii.gz'))

            # use the following multi-processed loop
            pool= Pool(int(self.nproc))
            res= pool.map_async(_Register_vol, vols)
            volsRegistered= res.get()
            pool.close()
            pool.join()

            # or use the following for loop
            # volsRegistered = []
            # for vol in vols:
            #     volnii = vol.with_suffix('.nii.gz')
            #     logging.info('Run FSL flirt affine registration')
            #     flirt('-interp' ,'sinc'
            #           ,'-sincwidth' ,'7'
            #           ,'-sincwindow' ,'blackman'
            #           ,'-in', volnii
            #           ,'-ref', 'b0.nii.gz'
            #           ,'-nosearch'
            #           ,'-o', volnii
            #           ,'-omat', volnii.with_suffix('.txt', depth=2)
            #           ,'-paddingsize', '1')
            #     volsRegistered.append(volnii)


            fslmerge('-t', 'EddyCorrect-DWI.nii.gz', volsRegistered)
            transforms = tmpdir.glob(dicePrefix+'*.txt')
            transforms.sort()


            logging.info('Extract the rotations and realign the gradients')

            bvecs= read_bvecs(self.bvecFile._path)
            bvecs_new= bvecs.copy()
            for (i,t) in enumerate(transforms):

                logging.info('Apply ' + t)
                tra = np.loadtxt(t)

                # removes the translation
                aff = np.matrix(tra[0:3,0:3])

                # computes the finite strain of aff to get the rotation
                rot = aff*aff.T

                # compute the square root of rot
                [el, ev] = np.linalg.eig(rot)
                eL = np.identity(3)*np.sqrt(el)
                sq = ev*eL*ev.I

                # finally the rotation is defined as
                rot = sq.I*aff

                bvecs_new[i] = np.dot(rot,bvecs[i]).tolist()[0]



            tar('cvzf', outxfms, transforms)

            # save modified bvecs
            write_bvecs(self.out._path+'.bvec', bvecs_new)

            # save EddyCorrect-DWI
            local.path('EddyCorrect-DWI.nii.gz').copy(self.out._path+'.nii.gz')

            # copy bvals
            self.bvalFile.copy(self.out._path+'.bval')

            if self.debug:
                tmpdir.copy(pjoin(dirname(self.out),"eddy-debug-"+str(getpid())))
Exemple #21
0
def makeAtlases(target, trainingTable, outPrefix, fusion, threads, debug):

    with TemporaryDirectory() as tmpdir:

        tmpdir = local.path(tmpdir)

        L= len(trainingTable)

        multiDataFrame= pd.concat([trainingTable, pd.DataFrame({'tmpdir': [tmpdir]*L, 'target': [str(target)]*L})], axis= 1)

        logging.info('Create {} atlases: compute transforms from images to target and apply over images'.format(L))

        pool = multiprocessing.Pool(threads)  # Use all available cores, otherwise specify the number you want as an argument

        pool.map_async(train2target, multiDataFrame.iterrows())

        pool.close()
        pool.join()

        logging.info('Fuse warped labelmaps to compute output labelmaps')
        atlasimages = tmpdir // 'atlas*.nii.gz'
        # sorting is required for applying weight to corresponding labelmap
        atlasimages.sort()

        if fusion.lower() == 'wavg':

            ALPHA_DEFAULT= 0.45

            logging.info('Compute MI between warped images and target')
            pool = multiprocessing.Pool(threads)
            for img in atlasimages:
                print('MI between {} and target'.format(img))
                miFile= img+'.txt'
                pool.apply_async(func= computeMI, args= (target, img, miFile, ))

            pool.close()
            pool.join()

            mis= []
            with open(tmpdir+'/MI.txt','w') as fw:

                for img in atlasimages:
                    with open(img+'.txt') as f:
                        mi= f.read().strip()
                        fw.write(img+','+mi+'\n')
                        mis.append(float(mi))

            weights = weightsFromMIExp(mis, ALPHA_DEFAULT)

        target_header= load_nifti(target._path).header
        pool = multiprocessing.Pool(threads)  # Use all available cores, otherwise specify the number you want as an argument
        for labelname in list(trainingTable)[1:]:  # list(d) gets column names

            out = os.path.abspath(outPrefix+ f'_{labelname}.nii.gz')
            if os.path.exists(out):
                os.remove(out)
            labelmaps = tmpdir // (labelname + '*')
            labelmaps.sort()

            if fusion.lower() == 'avg':
                print(' ')
                # parellelize
                # fuseAvg(labelmaps, out, target_header)
                pool.apply_async(func= fuseAvg, args= (labelmaps, out, target_header, ))

            elif fusion.lower() == 'antsjointfusion':
                print(' ')
                # atlasimages are the warped images
                # labelmaps are the warped labels
                # parellelize
                # fuseAntsJointFusion(target, atlasimages, labelmaps, out)
                pool.apply_async(func= fuseAntsJointFusion, args= (target, atlasimages, labelmaps, out, ))

            elif fusion.lower() == 'wavg':
                print(' ')
                # parellelize
                # fuseWeightedAvg(labelmaps, weights, out, target_header)
                pool.apply_async(func= fuseWeightedAvg, args= (labelmaps, weights, out, target_header, ))

            else:
                print('Unrecognized fusion option: {}. Skipping.'.format(fusion))

        pool.close()
        pool.join()

        if debug:
            tmpdir.copy(pjoin(dirname(outPrefix), 'atlas-debug-' + str(os.getpid())))
Exemple #22
0
def new_logger(logpath='', cfg=None):
    """
    Return a new logging object for this process. The log file path is derived
    from (in order):

     - logpath if set
     - environment ($CRAWL_LOG)
     - cfg
     - default (/var/log/hpssic.log if writable, else /tmp/hpssic.log)
    """

    # -------------------------------------------------------------------------
    def cfg_get(func, section, option, defval):
        if cfg:
            rval = func(section, option, defval)
        else:
            rval = defval
        return rval

    # -------------------------------------------------------------------------
    def raiseError(record=None):
        raise

    envname = os.getenv('CRAWL_LOG')
    try:
        dcfg = get_config()
    except:
        dcfg = None

    if logpath != '':
        final_logpath = logpath
    elif envname:
        final_logpath = envname
    elif cfg:
        try:
            final_logpath = cfg.get('crawler', 'logpath')
        except NoOptionError:
            final_logpath = U.default_logpath()
        except NoSectionError:
            final_logpath = U.default_logpath()
    elif dcfg:
        try:
            final_logpath = dcfg.get('crawler', 'logpath')
        except NoOptionError:
            final_logpath = U.default_logpath()
        except NoSectionError:
            final_logpath = U.default_logpath()
    else:
        final_logpath = U.default_logpath()

    rval = logging.getLogger('hpssic')
    rval.setLevel(logging.INFO)
    host = U.hostname()

    for h in rval.handlers:
        h.close()
        del h

    if cfg:
        maxBytes = cfg.get_size('crawler', 'logsize', 10 * 1024 * 1024)
        backupCount = cfg.get_size('crawler', 'logmax', 5)
        archdir = cfg.get_d(
            'crawler', 'archive_dir',
            U.pathjoin(U.dirname(final_logpath), 'hpss_log_archive'))
    else:
        maxBytes = 10 * 1024 * 1024
        backupCount = 5
        archdir = U.pathjoin(U.dirname(final_logpath), 'hpss_log_archive')

    fh = U.ArchiveLogfileHandler(final_logpath,
                                 maxBytes=maxBytes,
                                 backupCount=backupCount,
                                 archdir=archdir)

    strfmt = "%" + "(asctime)s [%s] " % host + '%' + "(message)s"
    fmt = logging.Formatter(strfmt, datefmt="%Y.%m%d %H:%M:%S")
    fh.setFormatter(fmt)
    fh.handleError = raiseError

    while 0 < len(rval.handlers):
        z = U.pop0(rval.handlers)
        del z
    rval.addHandler(fh)

    rval.info('-' * (55 - len(host)))

    return rval
Exemple #23
0
  @property
  def marathon(self):
    return self.__marathon

  @property
  def chronos(self):
    return self.__chronos

  @property
  def exhibitor(self):
    return self.__exhibitor

  @property
  def ceph(self):
    return self.__ceph


config = Configuration.read_config('%s/config.yml'%dirname(__file__))


def get_global_scheduler():
  try:
    sched_mod = '.'.join(config.pivot.scheduler.split('.')[:-1])
    sched_class = config.pivot.scheduler.split('.')[-1]
    return getattr(importlib.import_module(sched_mod), sched_class)()
  except Exception as e:
    sys.stderr.write(str(e) + '\n')
    from schedule.universal import DefaultGlobalScheduler
    return DefaultGlobalScheduler()