Exemplo n.º 1
0
def buildThread(command, output):
    print("Thread Start NEW")
    success, results = context.build_cmd(command, output_format=output)
    if not success:
        error_file = os.path.join(tempfile.gettempdir(),
                                  'building_' + command['endpoint'])
        with open(error_file, 'w') as f:
            f.write(results)

    print("Thread End")
Exemplo n.º 2
0
def main():

    LOG.debug('-------------NEW RUN-------------\n')
    parser = argparse.ArgumentParser(
        description=
        'Use Flame to either build a model from or apply a model to the input file.'
    )

    parser.add_argument('-f', '--infile', help='Input file.', required=False)

    parser.add_argument('-e',
                        '--endpoint',
                        help='Endpoint model name.',
                        required=False)

    parser.add_argument('-v',
                        '--version',
                        help='Endpoint model version.',
                        required=False)

    parser.add_argument('-a',
                        '--action',
                        help='Manage action.',
                        required=False)

    parser.add_argument(
        '-c',
        '--command',
        action='store',
        choices=['predict', 'build', 'manage', 'config'],
        help='Action type: \'predict\' or \'build\' or \'manage\'',
        required=True)

    # parser.add_argument('-log', '--loglevel',
    #                     help='Logger level of verbosity',)

    parser.add_argument('-p',
                        '--path',
                        help='Defines de new path for models repository.',
                        required=False)

    args = parser.parse_args()

    # init logger Level and set general config
    # another way around would be create a handler with the level
    # and append it to the global instance of logger

    # if args.loglevel:
    #     numeric_level = getattr(logging, args.loglevel.upper(), None)
    #     if not isinstance(numeric_level, int):
    #         raise ValueError('Invalid log level: {}'.format(args.loglevel))
    #     logging.basicConfig(level=numeric_level)

    # make sure flame has been configured before running any command, unless this command if used to
    # configure flame
    if args.command != 'config':
        configuration_warning()

    if args.command == 'predict':

        if (args.endpoint is None) or (args.infile is None):
            print(
                'flame predict : endpoint and input file arguments are compulsory'
            )
            return

        version = utils.intver(args.version)

        command_predict = {
            'endpoint': args.endpoint,
            'version': version,
            'infile': args.infile
        }

        LOG.info(f'Starting prediction with model {args.endpoint}'
                 f' version {version} for file {args.infile}')

        success, results = context.predict_cmd(command_predict)
        # print('flame predict : ', success, results)

    elif args.command == 'build':

        if (args.endpoint is None) or (args.infile is None):
            print(
                'flame build : endpoint and input file arguments are compulsory'
            )
            return

        command_build = {'endpoint': args.endpoint, 'infile': args.infile}

        LOG.info(f'Starting building model {args.endpoint}'
                 f' with file {args.infile}')

        success, results = context.build_cmd(command_build)
        # print('flame build : ', success, results)

    elif args.command == 'manage':
        manage_cmd(args)

    elif args.command == 'config':
        config(args.path)
        change_config_status()
Exemplo n.º 3
0
def buildThread(command, output):

    print("Thread Start")
    success, results = context.build_cmd(command, output_format=output)
    print("Thread End")
Exemplo n.º 4
0
def action_refresh(model=None, version=None, GUI=False):
    '''
    Rebuild one or many models making use of existing parameter files and
    locally stored training series. 
    '''

    import flame.context as context
    from flame.parameters import Parameters
    # from flame.documentation import Documentation
    import logging

    if GUI:
        token_file = os.path.join(tempfile.gettempdir(), 'refreshing_' + model)
        # update token file with content 'working'
        with open(token_file, 'w') as f:
            f.write('Analyzing and sorting models...')

    # list endpoints relevant for the arguments
    if model is not None:
        model_list = [model]
    else:
        model_root = pathlib.Path(utils.model_repository_path())
        model_list = [x.stem for x in model_root.iterdir() if x.is_dir()]

    # list versions relevant for the arguments
    task_list = []
    for imodel in model_list:

        if version is not None:
            task_list.append((imodel, version))
        else:
            model_root = pathlib.Path(utils.model_tree_path(imodel))
            itask_list = [(imodel, utils.modeldir2ver(x.stem))
                          for x in model_root.iterdir() if x.is_dir()]
            task_list += itask_list  # use "+=" and not "append" to merge the new list with the old one

    # analize task_list and add at the end ensemble models
    # this is needed to have low models refreshed BEFORE refreshing the high models
    # eliminating the need to refresh them recursively
    LOG.info("Analyzing and sorting models...")

    # make sure the lower models are in task_list and, if not, force the inclussion
    for itask in task_list:
        param = Parameters()
        success, results = param.loadYaml(itask[0], itask[1])

        if not success:
            continue

        if param.getVal('input_type') == 'model_ensemble':
            ens_nams = param.getVal('ensemble_names')
            ens_vers = param.getVal('ensemble_versions')
            for i in range(len(ens_nams)):
                iver = 0
                inam = ens_nams[i]
                if (i < len(ens_vers)):
                    iver = ens_vers[i]
                if ((inam, iver)) not in task_list:
                    task_list.append((inam, iver))

    # create separate lists for regular and ensemble models
    # and add ensemble models at the end
    # this needs to be carried out after the previos step because
    # some of the lower level models could be an ensemble model
    # itself
    mol_list = []
    ens_list = []
    for itask in task_list:
        param = Parameters()
        success, results = param.loadYaml(itask[0], itask[1])

        if not success:
            mol_list.append(itask)
            continue

        if param.getVal('input_type') == 'model_ensemble':
            ens_list.append(itask)
        else:
            mol_list.append(itask)

    task_list = mol_list + ens_list

    # show all models before stating
    LOG.info(
        "Starting model refreshing task for the following models and versions")
    for itask in task_list:
        LOG.info(f'   model: {itask[0]}   version: {itask[1]}')

    LOG.info("This can take some time, please be patient...")

    source_dir = os.path.dirname(os.path.abspath(__file__))
    children_dir = os.path.join(source_dir, 'children')
    master_parameters = os.path.join(children_dir, 'parameters.yaml')
    master_documentation = os.path.join(children_dir, 'documentation.yaml')

    # now send the build command for each task
    for itask in task_list:

        destinat_path = utils.model_path(itask[0], 0)  # dev

        if itask[1] != 0:
            # move version to /dev for building
            original_path = utils.model_path(itask[0], itask[1])  # veri
            security_path = destinat_path + '_security'  # dev_sec
            shutil.move(destinat_path, security_path)  # dev --> dev_sec
            shutil.move(original_path, destinat_path)  # veri --> dev

        LOG.info(
            f'   refreshing model: {itask[0]}   version: {itask[1]} ({task_list.index(itask)+1} of {len(task_list)})...'
        )
        if GUI:
            with open(token_file, 'w') as f:
                f.write(
                    f'model: {itask[0]} version: {itask[1]} ({task_list.index(itask)+1} of {len(task_list)})'
                )

        # dissable LOG output
        logging.disable(logging.ERROR)

        # update parameters
        dump_parameters = os.path.join(destinat_path, 'parameters_dump.yaml')
        success, param = action_parameters(itask[0], 0, oformat='bin')
        if success:
            param_yaml = param.dumpYAML()
            with open(dump_parameters, 'w') as f:
                for line in param_yaml:
                    f.write(line + '\n')
        else:
            LOG.info(
                '   ERROR: unable to merge parameters for model: {itask[0]}   version: {itask[1]}'
            )
            dump_parameters = None

        original_parameters = os.path.join(destinat_path, 'parameters.yaml')
        shutil.copy(master_parameters, original_parameters)

        #update documentation
        dump_documentation = os.path.join(destinat_path,
                                          'documentation_dump.yaml')
        success, documentation = action_documentation(itask[0],
                                                      0,
                                                      doc_file=None,
                                                      oformat='bin')

        original_documentation = os.path.join(destinat_path,
                                              'documentation.yaml')
        shutil.copy(master_documentation, original_documentation)

        if success:
            documentation_yaml = documentation.dumpYAML()
            with open(dump_documentation, 'w') as f:
                for line in documentation_yaml:
                    line = line.encode("ascii", "ignore")
                    line = line.decode("ascii", "ignore")
                    f.write(line + '\n')
            s2, documentation = action_documentation(itask[0],
                                                     0,
                                                     doc_file=None,
                                                     oformat='bin')
            s3, r3 = documentation.delta(itask[0], 0, dump_documentation)
        else:
            LOG.info(
                '   ERROR: unable to merge documentation for model: {itask[0]}   version: {itask[1]}'
            )

        # rebuild the model
        command_build = {
            'endpoint': itask[0],
            'infile': None,
            'param_file': dump_parameters,
            'incremental': False
        }

        success, results = context.build_cmd(command_build)

        # enable LOG output
        logging.disable(logging.NOTSET)

        if itask[1] != 0:
            shutil.move(destinat_path, original_path)  # dev --> veri
            shutil.move(security_path, destinat_path)  # dev_sec --> dev

        if not success:
            LOG.error(results)

    LOG.info("Model refreshing task finished")

    if GUI:
        # update token file with status 'ready'
        with open(token_file, 'w') as f:
            f.write('ready')

    return True, 'OK'
Exemplo n.º 5
0
def action_refresh(model=None, version=None):
    '''
    Rebuild one or many models making use of existing parameter files and
    locally stored training series. 
    '''

    import flame.context as context
    from flame.parameters import Parameters
    import logging

    # list endpoints relevant for the arguments
    if model is not None:
        model_list = [model]
    else:
        model_root = pathlib.Path(utils.model_repository_path())
        model_list = [x.stem for x in model_root.iterdir() if x.is_dir()]

    # list versions relevant for the arguments
    task_list = []
    for imodel in model_list:
        if version is not None:
            task_list.append((imodel, version))
        else:
            model_root = pathlib.Path(utils.model_tree_path(imodel))
            itask_list = [(imodel, utils.modeldir2ver(x.stem))
                          for x in model_root.iterdir() if x.is_dir()]
            task_list += itask_list  # use "+=" and not "append" to merge the new list with the old one

    # analize task_list and add at the end ensemble models
    # this is needed to have low models refreshed BEFORE refreshing the high models
    # eliminating the need to refresh them recursively
    LOG.info("Analyzing and sorting models...")

    # make sure the lower models are in task_list and, if not, force the inclussion
    for itask in task_list:
        param = Parameters()
        success, results = param.loadYaml(itask[0], itask[1])

        if not success:
            continue

        if param.getVal('input_type') == 'model_ensemble':
            ens_nams = param.getVal('ensemble_names')
            ens_vers = param.getVal('ensemble_versions')
            for i in range(len(ens_nams)):
                iver = 0
                inam = ens_nams[i]
                if (i < len(ens_vers)):
                    iver = ens_vers[i]
                if ((inam, iver)) not in task_list:
                    task_list.append((inam, iver))

    # create separate lists for regular and ensemble models
    # and add ensemble models at the end
    # this needs to be carried out after the previos step because
    # some of the lower level models could be an ensemble model
    # itself
    mol_list = []
    ens_list = []
    for itask in task_list:
        param = Parameters()
        success, results = param.loadYaml(itask[0], itask[1])

        if not success:
            mol_list.append(itask)
            continue

        if param.getVal('input_type') == 'model_ensemble':
            ens_list.append(itask)
        else:
            mol_list.append(itask)

    task_list = mol_list + ens_list

    # show all models before stating
    LOG.info(
        "Starting model refreshing task for the following models and versions")
    for itask in task_list:
        LOG.info(f'   model: {itask[0]}   version: {itask[1]}')

    LOG.info("This can take some time, please be patient...")

    # now send the build command for each task
    for itask in task_list:

        if itask[1] != 0:
            # move version to /dev for building
            original_path = utils.model_path(itask[0], itask[1])  # veri
            destinat_path = utils.model_path(itask[0], 0)  # dev
            security_path = destinat_path + '_security'  # dev_sec
            shutil.move(destinat_path, security_path)  # dev --> dev_sec
            shutil.move(original_path, destinat_path)  # veri --> dev

        LOG.info(
            f'   refreshing model: {itask[0]}   version: {itask[1]} ({task_list.index(itask)+1} of {len(task_list)})...'
        )

        # dissable LOG output
        logging.disable(logging.ERROR)

        command_build = {
            'endpoint': itask[0],
            'infile': None,
            'param_file': None,
            'incremental': False
        }

        success, results = context.build_cmd(command_build)

        # enable LOG output
        logging.disable(logging.NOTSET)

        if itask[1] != 0:
            shutil.move(destinat_path, original_path)  # dev --> veri
            shutil.move(security_path, destinat_path)  # dev_sec --> dev

        if not success:
            LOG.error(results)

    LOG.info("Model refreshing task finished")

    return True, 'OK'
Exemplo n.º 6
0
def main():

    LOG.debug('-------------NEW RUN-------------\n')
    parser = argparse.ArgumentParser(
        description=
        f'Flame version {__version__}. Use Flame to build and manage predictive models or to predict using them.'
    )

    parser.add_argument('-f', '--infile', help='Input file.', required=False)

    parser.add_argument('-e',
                        '--endpoint',
                        help='Endpoint model name.',
                        required=False)

    parser.add_argument('-s',
                        '--space',
                        help='Chemical space name.',
                        required=False)

    parser.add_argument('-v',
                        '--version',
                        help='Endpoint model version.',
                        required=False)

    parser.add_argument('-a',
                        '--action',
                        help='Manage action.',
                        required=False)

    parser.add_argument('-p',
                        '--parameters',
                        help='File with parameters for the current action.',
                        required=False)

    parser.add_argument(
        '-c',
        '--command',
        action='store',
        choices=['predict', 'search', 'build', 'sbuild', 'manage', 'config'],
        help=
        'Action type: \'predict\' or \'search\' or \'build\' \'sbuild\' or \'manage\' or \'config\'',
        required=True)

    # parser.add_argument('-log', '--loglevel',
    #                     help='Logger level of verbosity',)

    parser.add_argument(
        '-d',
        '--directory',
        help=
        'Defines the root directory for the models and spaces repositories.',
        required=False)
    parser.add_argument('-t',
                        '--documentation_file',
                        help='File with manually filled documentation fields.',
                        required=False)

    parser.add_argument(
        '-l',
        '--label',
        help='Label for facilitating the identification of the prediction.',
        required=False)

    parser.add_argument(
        '-inc',
        '--incremental',
        help=
        'The input file must be added to the existing training series. Only for "build" command.',
        action='store_true',
        required=False)

    parser.add_argument('--smarts',
                        help='SMARTS string used as input for similarity',
                        required=False)

    args = parser.parse_args()

    # init logger Level and set general config
    # another way around would be create a handler with the level
    # and append it to the global instance of logger

    # if args.loglevel:
    #     numeric_level = getattr(logging, args.loglevel.upper(), None)
    #     if not isinstance(numeric_level, int):
    #         raise ValueError('Invalid log level: {}'.format(args.loglevel))
    #     logging.basicConfig(level=numeric_level)

    if args.infile is not None:
        if not os.path.isfile(args.infile):
            LOG.error(f'Input file {args.infile} not found')
            return

    # make sure flame has been configured before running any command, unless this command if used to
    # configure flame
    if args.command != 'config':
        utils.config_test()

    if args.command == 'predict':

        if (args.endpoint is None) or (args.infile is None):
            LOG.error(
                'flame predict : endpoint and input file arguments are compulsory'
            )
            return

        version = utils.intver(args.version)

        if args.label is None:
            label = 'temp'
        else:
            label = args.label

        command_predict = {
            'endpoint': args.endpoint,
            'version': version,
            'label': label,
            'infile': args.infile
        }

        LOG.info(
            f'Starting prediction with model {args.endpoint}'
            f' version {version} for file {args.infile}, labelled as {label}')

        success, results = context.predict_cmd(command_predict)
        if not success:
            LOG.error(results)

    elif args.command == 'search':

        if (args.space is None) or (args.infile is None
                                    and args.smarts is None):
            LOG.error(
                'flame search : space and input file arguments are compulsory')
            return

        version = utils.intver(args.version)
        if args.label is None:
            label = 'temp'
        else:
            label = args.label

        command_search = {
            'space': args.space,
            'version': version,
            'infile': args.infile,
            'smarts': args.smarts,
            'runtime_param': args.parameters,
            'label': label
        }

        LOG.info(
            f'Starting search on space {args.space}'
            f' version {version} for file {args.infile}, labelled as {label}')

        success, results = context.search_cmd(command_search)

        if not success:
            LOG.error(results)

    elif args.command == 'build':

        if (args.endpoint is None):
            LOG.error('flame build : endpoint argument is compulsory')
            return

        command_build = {
            'endpoint': args.endpoint,
            'infile': args.infile,
            'param_file': args.parameters,
            'incremental': args.incremental
        }

        LOG.info(f'Starting building model {args.endpoint}'
                 f' with file {args.infile} and parameters {args.parameters}')

        success, results = context.build_cmd(command_build)

        if not success:
            LOG.error(results)

    elif args.command == 'sbuild':

        if (args.space is None):
            LOG.error('flame sbuild : space argument is compulsory')
            return

        command_build = {
            'space': args.space,
            'infile': args.infile,
            'param_file': args.parameters
        }

        LOG.info(f'Starting building model {args.space}'
                 f' with file {args.infile} and parameters {args.parameters}')

        success, results = context.sbuild_cmd(command_build)

        if not success:
            LOG.error(results)

    elif args.command == 'manage':
        success, results = context.manage_cmd(args)
        if not success:
            LOG.error(results)

    elif args.command == 'config':
        success, results = config.configure(args.directory,
                                            (args.action == 'silent'))
        if not success:
            LOG.error(f'{results}, configuration unchanged')