예제 #1
0
class App(object):
    """
    """
    def __init__(self, args):
        self._bidsDir = args.bids_dir
        self._dicomDir = args.dicom_dir
        self._session = Session(args.session, args.participant, self._bidsDir)
        self._parser = getattr(studyparser, args.algorithm)(self._dicomDir,
                                                            self._session)
        self._yes = args.yes
        self._codeDir = os.path.join(self._bidsDir, 'code')
        utils.make_directory_tree(self._codeDir)
        self._batch = Batch(self._codeDir, self._session)

    def run(self):
        utils.new_line()
        utils.info('Parse and group DICOM directory')
        self._parser.parse_acquisitions()

        utils.new_line()
        utils.info('Sort and set up acquisitions')
        self._parser.sort_acquisitions()

        #utils.new_line()
        #utils.ok('Acquisitions of interest:')
        #for _ in self._parser.caught: utils.info(_)

        utils.new_line()
        utils.warning('Acquisitions excluded:')
        for _ in self._parser._excluded:
            utils.info(_)

        utils.new_line()
        utils.info('Create YAML file for dcm2niibatch')
        for acq in self._parser.acquisitions:
            self._batch.add(acq)
        self._batch.write()

        utils.new_line()
        utils.ok('Batch file:')
        self._batch.show()

        if self._yes:
            launchBatch = True
        else:
            msg = "Do you want to launch dcm2niibatch ?"
            launchBatch = utils.query_yes_no(msg)

        if launchBatch:
            self._batch.launch()
            for acq in self._parser.acquisitions:
                acq.update_json()
        else:
            utils.new_line()
            utils.ok("To launch dcm2niibatch later:")
            utils.info("cd {}".format(self._codeDir))
            utils.info(self._batch.command)
        return 0
예제 #2
0
    def run(self):
        #update local policy vars on an interval for stability
        if self.learner_policy.get_step() % self.update_interval == 0:
            self.pull_vars()
        #print('learner at %s, actor at %s' % (
        #        self.learner_policy.get_step(),
        #        self.local_policy.get_step()))

        #FIXME: last obs might be from different game
        #since games are 4.5k steps, not a big deal
        n_actions = self.env.action_space.n

        batch = Batch()
        state = self.last_obs  #first action in each new env is ~random
        lstm_state = self.local_policy.lstm_init_state
        done = step = 0
        while not done and step < self.steps:
            action, value, logit, lstm_state = self.local_policy.act(
                state, lstm_state)
            next_state, reward, done, _ = self.env.step(action)

            #skip the specified number of frame, aggregate rewards?
            #FIXME: dont just skip, stack the frames
            #might mess things up if predicting above
            #using non-diff and diff frames
            #aggregate and non aggregate
            #must be constant
            #note the env employs frame skipping already
            #more skipping seems to lead to a better policy though
            #for _ in range(3):
            #    if done:
            #        break
            #    next_state, reward_s, done, _ = self.env.step(action)
            #reward += reward_s

            #process observation data
            next_state = process_state(next_state)
            if type(action) == np.int64:
                action = to_onehot(action, n_actions)

            #add experience to batch
            batch.add((state, action, reward, value, done, next_state, logit,
                       lstm_state))

            #update
            step += 1
            state = next_state
            self.last_obs = state

        return batch.get()
예제 #3
0
파일: deploy.py 프로젝트: dockerian/pyapi
class Deployment(object):
    def __init__(
            self,
            package, cli_composer, deploy_status,
            use_package_path=False):
        """
        Initialize an instance of Deployment
        """
        import uuid
        if (use_package_path):
            self.batch = Batch(package.cwd)
        else:
            self.batch = Batch(tempfile.mkdtemp())
        self.cli_composer = cli_composer
        self.cwd = self.batch.cwd
        self.cwd_use_package_path = use_package_path
        self.deployed = False
        self.deployment_id = '{0}'.format(uuid.uuid1())
        self.deploy_status = deploy_status
        self.package = package
        self.store = deploy_status.store  # backend store
        self.started = False

    def cleanup(self):
        """
        Cleanup Deployment BatchPrcess working directory
        """
        try:
            LOGGER.info('Deleting deployment cwd={0}'.format(self.cwd))
            # Clean up working directory
            delete_directory_tree(self.cwd)
            LOGGER.info('Deleted deploy deployment cwd.')
        except Exception as e:
            err_message = \
                'Failed to clean up deployment cwd "{0}".' \
                .format(self.cwd)
            LOGGER.exception(err_message)

    def deploy(self):
        """
        Start a Deployment process
        """
        if (self.started):
            err = 'Deployment {0} already started'.format(self.deployment_id)
            raise Exception(err)

        self.get_deployment_batch()

        try:
            self.started = True
            self.download_package()  # preparing package

            LOGGER.info('Starting deployment ...')
            process = BatchProcess(self.batch, self.set_status)
            LOGGER.debug('Batch process: {0}'.format(process))
            self.deployed = process.execute()
        except Exception as e:
            err_message = 'Exception on BatchProcess execution.'
            LOGGER.exception(err_message)
            self.set_status('FAILED')
        else:
            LOGGER.info('DONE deployment - {0}'.format(self.deployment_id))
        finally:
            self.cleanup()

    def download_package(self):
        if (self.cwd_use_package_path):
            self.set_status('DOWNLOADING')
            pkg_filename = self.package.file_name
            pkg_contents = store.get_file_contents(pkg_filename)
            LOGGER.info('Downloading package {0} to {1}...'.format(
                pkg_filename, self.package.path))
            with open(self.package.path, 'w') as package_file:
                # write the package as a tar.gz into deployment cwd
                package_file.write(pkg_contents)
        return self.package.path

    def get_deployment_batch(self):
        """
        Get a batch of commands for the deployment
        """
        pkg_path = self.package.path
        pkg_name = self.package.name

        self.batch.clear()
        # add unpacking script to batch
        LOGGER.info('Adding batch to unpack {0} from {1}'.format(
            pkg_name, pkg_path))
        self.get_package_batch()

        # add deployment script to batch
        self.batch.add('TARGET', self.cli_composer.get_target_cmd())
        self.batch.add('LOGIN', self.cli_composer.get_login_cmd())
        self.batch.add(
            'REMOVED', self.cli_composer.get_delete_cmd(pkg_name), True)
        self.batch.add('LIST', self.cli_composer.get_list_cmd())
        self.batch.add('DEPLOYED', self.cli_composer.get_push_cmd(
            pkg_name, '{0}'.format(pkg_name)))
        self.batch.add('NEWLIST', self.cli_composer.get_list_cmd())
        self.batch.add('DIR', ['ls', '-al'])

    def get_package_batch(self):
        """
        Get a batch of commands for preparing the package
        """
        dst_path = self.cwd
        src_path = self.package.path
        pkg_name = self.package.name

        # no need this copy command if package path is used as cwd
        if (not self.cwd_use_package_path):
            copy_cmd = [
                'cp', '-rf',
                '{0}'.format(src_path),
                '{0}'.format(dst_path)]
            self.batch.add('COPY', copy_cmd)

        view_cmd = [
            'tar', '-tvf',
            '{0}'.format(src_path)]
        # Assume any foo.tar.gz contains -
        #   - foo/foo.tar.gz (the package to deploy)
        #   - manifest.json
        unpack_cmd = [
            'tar', '-zxvf',
            '{0}'.format(src_path)]
        xtract_cmd = [
            'tar', '-zxvf',
            '{0}/{1}/{2}.tar.gz'.format(dst_path, pkg_name, pkg_name)]
        dir_cmd = [
            'ls', '-al',
            '{0}/{1}'.format(dst_path, pkg_name)]
        self.batch.add('PREVIEW', view_cmd)
        self.batch.add('UNPACK', unpack_cmd)
        self.batch.add('EXTRACT', xtract_cmd)
        self.batch.add('DIR', dir_cmd)

    def get_status(self):
        '''get status by self.deployment_id
        '''
        return self.deploy_status.get_status(self.deployment_id)
        # return status
        pass

    def set_status(self, status):
        '''set status by self.deployment_id
        '''
        self.deploy_status.set_status(self.deployment_id, status)
예제 #4
0
class CSVUnifier:
    """
    Aggregates data from multiple CSVs, cleans and validates the values,
    and writes the rows to a specified output file.

    All columns in the schema must be present in the input data.
    Rows are validated before written to an output file in batches.
    Rows are always written in the order specified in the schema.
    """

    def __init__(self, batch_size, output_file, schema, validator_map):
        write_function = csv.writer(output_file,
                                    delimiter=',',
                                    lineterminator='\n').writerows
        self.batch = Batch(batch_size, write_function)
        self.header = None
        self.header_map = {}
        self.header_written = False

        self.schema = schema
        self.validator_map = validator_map

    def process(self, rows):
        for r in rows:
            if self.header is None:
                self.make_header_map(r)
                if len(self.schema) != len(self.header_map):
                    print(f'All columns in schema must be present')
                    return
                self.header = r
                if not self.header_written:
                    self.batch.add(self.schema)
                    self.header_written = True
            else:
                clean_row = self.clean(r)
                if self.valid_row(clean_row):
                    self.batch.add(clean_row)
                else:
                    print(f'Invalid row: {r}')

    def valid_row(self, row):
        for i, v in enumerate(row):
            if not self.validator_map[self.schema[i]](v):
                return False
        return True

    def clean(self, row):
        """
        Cleans, reorders, and filters values in the row
        """
        res = ['' for x in range(len(self.schema))]
        for i, v in enumerate(row):
            if i in self.header_map:
                res[self.header_map[i]] = v.strip('"')
        return res

    def make_header_map(self, header):
        """
        Maps index of a column name in header to index in schema
        """
        for i, name in enumerate(header):
            if name in self.schema:
                self.header_map[i] = self.schema.index(name)

    def reset_header(self):
        self.header = None
        self.header_map = {}

    def clean_up(self):
        self.batch.flush()