示例#1
0
def get_history_from_nels(tracker):

    tracker_id = tracker['id']
    logger.info(f'{tracker_id}: push export start')

    try:

        master_api.update_import(tracker_id,
                                 {'state': 'nels-transfer-running'})

        tmpfile = "{}/{}.tgz".format(tempfile.mkdtemp(dir=tmp_dir),
                                     tracker['id'])
        print(f"TMPFILE {tmpfile}")

        ssh_info = get_ssh_credential(tracker['nels_id'])
        logger.debug(f"{tracker_id} ssh info {ssh_info}")

        cmd = f'scp -o StrictHostKeyChecking=no -o BatchMode=yes -i {ssh_info["key_file"]} "{ssh_info["username"]}@{ssh_info["hostname"]}:{tracker["source"]}" {tmpfile}'
        #        logger.debug(f"CMD: {cmd}")
        run_cmd(cmd, 'pull data')
        master_api.update_import(tracker_id, {
            'state': 'nels-transfer-ok',
            'tmpfile': tmpfile
        })
        submit_mq_job(tracker_id, "import")
    except Exception as e:
        import traceback
        traceback.print_tb(e.__traceback__)

        master_api.update_import(tracker_id, {'state': 'nels-transfer-error'})
        logger.debug(
            f" tracker-id:{tracker['id']} transfer to NeLS error: {e}")
示例#2
0
def galaxy_init(galaxy_config: dict) -> None:
    # initialites galaxy configuration using some galaxy setups from galaxy.yml ('galaxy')
    # (database_connection, file_path, id_secret)

    logger.info("init from galaxy-config ")

    # Galaxy specific things:
    if 'galaxy' not in galaxy_config:
        raise RuntimeError('galaxy entry not found in galaxy config')

    if 'database_connection' not in galaxy_config['galaxy']:
        raise RuntimeError(
            'database_connection  entry not found in galaxy config')
    global db
    db.connect(galaxy_config['galaxy']['database_connection'])

    if 'file_path' not in galaxy_config['galaxy']:
        raise RuntimeError('file_path  entry not found in galaxy config')
    global galaxy_file_path
    galaxy_file_path = galaxy_config['galaxy']['file_path']

    if 'id_secret' not in galaxy_config['galaxy']:
        id_secret = "USING THE DEFAULT IS NOT SECURE!"
    else:
        id_secret = galaxy_config['galaxy']['id_secret']

    utils.init(id_secret)

    return
示例#3
0
    def server_stop(self, id: str, timeout: int = 300):
        """ stops a server
        
        Args:
        id: the name of the server
        timeout: max time (s) to wait for the server to shotdown

        Returns:
        None
        
        Raises:
        TimeoutError: if the server is not in shutdown status within the timeout time
        """

        self.check_connection()
        logger.debug("Stopping server id{} ".format(id))

        server = self._connection.compute.get_server(id)
        self._connection.compute.stop_server(server)
        while (True):
            server = self._connection.compute.get_server(id)
            if (server.status.lower() == 'shutoff'):
                return

            timeout -= 1
            if (not timeout):
                raise TimeoutError('timeout before the VM was shutdown')

            logger.debug("sleeping in server stop TO:{} status:{}".format(
                timeout, server.status))
            time.sleep(1)

        logger.info("Server stopped id:{} ".format(id))
示例#4
0
def get_configuration(config_file: str) -> Munch:

    config = readin_config_file(config_file)
    #  pp.pprint( config )
    #  print( config.daemon.use_db_settings  )

    if 'use_db_settings' in config.daemon and config.daemon.use_db_settings == True:
        logger.info('Using the database for config/settings')
        store_config_in_db_if_empty(config.daemon.database, config)
        config = config_from_db(config.daemon.database)

    return config
示例#5
0
    def patch(self):
        logger.debug("patch TOS")
        user_tos = self.get_tos()
        data = tornado.json_decode(self.request.body)

        if 'status' in data and data['status'] == 'accepted':
            logger.info("Updating TOS for {}".format(user_tos['user_id']))
            user_tos['status'] = 'accepted'
            user_tos['tos_date'] = datetime.datetime.now()
            db.update_tos(user_tos)
            return self.send_response_204()

        return self.send_response_404()
示例#6
0
def run_daemon() -> None:
    """ Creates the ecc daemon loop that creates and destroys nodes etc.
    """

    while (True):

        # get the current number of nodes and jobs
        ecc.update_nodes_status()

        nodes_total = ecc.nodes_total()
        nodes_idle = ecc.nodes_idle()
        jobs_pending = slurm_utils.jobs_pending()

        print(
            f"nodes_total: {nodes_total}, nodes_idle: {nodes_idle}, jobs_pending: {jobs_pending}"
        )

        # Below the min number of nodes needed for our setup
        if nodes_total < config.ecc.nodes_min:
            logger.info(
                "We are below the min number of nodes, creating {} nodes".
                format(config.ecc.nodes_min - nodes_total))

            ecc.create_nodes(cloud_init_file=config.ecc.cloud_init,
                             count=config.ecc.nodes_min - nodes_total)

        ### there are jobs queuing, let see what we should do

        # Got room to make some additional nodes
        elif (jobs_pending and nodes_idle == 0
              and nodes_total <= int(config.ecc.nodes_max)):

            logger.info(
                "We got stuff to do, creating some additional nodes...")

            ecc.create_nodes(cloud_init_file=config.ecc.cloud_init, count=1)

        # We got extra nodes not needed and we can delete some without going under the min cutoff, so lets get rid of some
        elif jobs_pending == 0 and nodes_idle and nodes_total > config.ecc.nodes_min:

            nr_of_nodes_to_delete = nodes_total - int(config.ecc.nodes_min)

            logger.info(f"Deleting {nr_of_nodes_to_delete} idle nodes... ")
            ecc.delete_idle_nodes(nr_of_nodes_to_delete)

        else:
            logger.info("Nothing to change.")

        logger.info("Napping for {} second(s).".format(config.ecc.sleep))
        time.sleep(config.ecc.sleep)
示例#7
0
def get_configurations(config_files: []) -> Munch:

    config = Munch()

    for config_file in config_files:
        next_config = readin_config_file(config_file)
        # Merge contents of dict2 in dict1
        config.update(next_config)

    if 'use_db_settings' in config.daemon and config.daemon.use_db_settings == True:
        logger.info('Using the database for config/settings')
        store_config_in_db_if_empty(config.daemon.database, config)
        config = config_from_db(config.daemon.database)

    return config
示例#8
0
def init(args):
    global config
    if args.config:
        config = config_utils.readin_config_file(args.config[0])
        logger.init(name=program_name,
                    log_file=config.ecc.get('logfile', None))
        logger.set_log_level(args.verbose)
        logger.info(f'{program_name} (v:{version})')
        config.ecc.name_regex = config.ecc.name_template.format("(\d+)")
        ecc.set_config(config)
        ecc.openstack_connect(config.openstack)
        cloudflare_utils.init(config.ecc.cloudflare_apikey,
                              config.ecc.cloudflare_email)
    else:
        logger.init(name=program_name)
        logger.set_log_level(args.verbose)
        logger.info(f'{program_name} (v:{version})')
示例#9
0
def main():

    parser = argparse.ArgumentParser(description='consumes a mq ')

    parser.add_argument('-c',
                        '--config',
                        required=True,
                        help="conductor config file ",
                        default="conductor.yml")
    parser.add_argument('-l',
                        '--logfile',
                        default=None,
                        help="Logfile to write to, default is stdout")
    parser.add_argument('-v',
                        '--verbose',
                        default=4,
                        action="count",
                        help="Increase the verbosity of logging output")

    args = parser.parse_args()

    config = init(args.config)

    if args.logfile:
        logger.init(name='nga_runner', log_file=args.logfile, rotate_logs=True)
    else:
        logger.init(name='nga_runner')

    logger.set_log_level(args.verbose)
    logger.info(f'startup (v:{version})')

    api_requests.set_token(config['key'])

    global mq
    mq.connect(uri=config['mq_uri'], prefetch_count=1)

    try:
        mq.consume(route='default', callback=do_work)

    except KeyboardInterrupt:
        mq.channel.stop_consuming()

    # Wait for all to complete

#   logger.debug('waiting for threads')
    mq.channel.close()
示例#10
0
def run_push_export(tracker):

    tracker_id = tracker['id']
    logger.info(f'{tracker_id}: push export start')

    try:

        instance = tracker['instance']

        master_api.update_export(tracker_id,
                                 {'state': 'nels-transfer-running'})

        history = instances[instance]['api'].get_history_export(
            export_id=tracker['export_id'])
        logger.debug(f"{tracker_id} history: {history}")
        create_time = str(tracker['create_time']).replace("-", "").replace(
            ":", "").replace(" ", "_")
        #        logger.debug( f'{tracker_id} Create time {create_time}')
        create_time = re.sub(r'\.\d+', '', create_time)
        #        logger.debug( f'{tracker_id} Create time {create_time}')
        history['name'] = history['name'].replace(" ", "_")
        dest_file = f"{tracker['destination']}/{history['name']}-{create_time}.tgz"
        logger.debug(f"{tracker_id} dest file: {dest_file}")

        ssh_info = get_ssh_credential(tracker['nels_id'])
        #        logger.debug(f"{tracker_id} ssh info {ssh_info}")

        cmd = f'scp -o StrictHostKeyChecking=no -o BatchMode=yes -i {ssh_info["key_file"]} {tracker["tmpfile"]} "{ssh_info["username"]}@{ssh_info["hostname"]}:{dest_file}"'
        #        logger.debug(f"CMD: {cmd}")
        run_cmd(cmd, 'push data')
        master_api.update_export(tracker_id, {'state': 'nels-transfer-ok'})
        cmd = f"rm {tracker['tmpfile']}"
        master_api.update_export(tracker_id, {'state': 'finished'})
        logger.debug(f"CMD: {cmd}")
        run_cmd(cmd, 'cleanup')
        logger.info(f'{tracker_id}: push export done')
    except Exception as e:
        import traceback
        traceback.print_tb(e.__traceback__)

        master_api.update_export(tracker_id, {'state': 'nels-transfer-error'})
        logger.debug(
            f" tracker-id:{tracker['id']} transfer to NeLS error: {e}")
示例#11
0
    def volume_create(self, size: int, name: str = None, **kwargs) -> str:
        """ Create a volume

        Args:
          size in GB
          name of the volume (set to none for UID name)

        Returns:
          id of volume (str)

        Raises:
          RuntimeError if 

        """

        volume = self._connection.create_volume(size=size, name=name)

        logger.info("Created volume id {} with the size of {}GB".format(
            volume.id, size))

        return volume.id
示例#12
0
    def post(self, instance, state_id):

        # logger.debug(f"POST VALUES: {self.request.body}")
        nels_id = int(self.get_body_argument("nelsId", default=None))
        location = self.get_body_argument("selectedFiles", default=None)

        if instance == instance_id:
            logger.debug("Direct access to state")
            state = states.get(state_id)
        else:
            logger.debug("Callback access to state")
            state = instances[instance]['api'].get_state(state_id)

        if state is None:
            self.send_response_404()

        logger.debug(f"State info for export: {state}")

        try:
            instance_name = instances[instance]['name']
            user = state['user']
            history_id = state['history_id']
            tracking_id = self._register_export(instance_name, user,
                                                history_id, nels_id, location)

            tracking_id = utils.encrypt_value(tracking_id)

            submit_mq_job(tracking_id, "export")

            logger.info(f"Redirecting to {instances[instance]['url']}")
            self.redirect(instances[instance]['url'])

        except Exception as e:

            logger.error(f"Error during export registation: {e}")
            logger.debug(f"State info for export: {state}")
            logger.debug(f"nels_id: {nels_id}")
            logger.debug(f"location: {location}")

            self.send_response_400()
示例#13
0
def import_history(tracker):

    tracker_id = tracker['id']
    logger.info(f'{tracker_id}: import started')

    try:

        user_id = tracker['user_id']
        user = master_api.get_user(user_id)
        print(
            f"LAST BLIP :::::::: {master_url},{user['api_key']}, {tracker['tmpfile']}"
        )
        galaxy_instance = GalaxyInstance(master_url,
                                         key=user['api_key'],
                                         verify=certifi.where())

        print("Starting import transfer")
        tyt = galaxy_instance.histories.import_history(tracker['tmpfile'])
        print("import transfer done")
        master_api.update_export(tracker_id,
                                 {'state': 'history-import-triggered'})
        # track job!

    except Exception as e:
        import traceback
        traceback.print_tb(e.__traceback__)

        master_api.update_import(tracker_id, {'state': 'history-import-error'})
        logger.debug(f" tracker-id:{tracker['id']} import history: {e}")
        return

    # clean up!
    cmd = f"rm {tracker['tmpfile']}"
    master_api.update_import(tracker_id, {'state': 'finished'})
    logger.debug(f"CMD: {cmd}")
    run_cmd(cmd, 'cleanup')
    logger.info(f'{tracker_id}: history import done')
示例#14
0
def run_fetch_export(tracker):

    logger.info(f'{tracker["id"]}: fetch export start')

    export_id = tracker['export_id']
    tracker_id = tracker['id']
    instance = tracker['instance']

    outfile = "{}/{}.tgz".format(tempfile.mkdtemp(dir=tmp_dir), export_id)
    master_api.update_export(tracker_id, {
        'tmpfile': outfile,
        'state': 'fetch-running'
    })

    try:

        cmd = f"curl -H 'Authorization: bearer {instances[instance]['nga_key']}' -Lo {outfile} {instances[instance]['nga_url']}/history/download/{export_id}/"
        logger.debug(f'{tracker["id"]}: fetch-cmd: {cmd}')
        run_cmd(cmd)
        logger.debug('{tracker["id"]}: fetch cmd done')
        master_api.update_export(tracker_id, {
            'tmpfile': outfile,
            'state': 'fetch-ok'
        })
        submit_mq_job(tracker_id, "export")

    except Exception as e:
        master_api.update_export(tracker_id, {
            'tmpfile': outfile,
            'state': 'fetch-error',
            'log': str(e)
        })
        logger.error(f"{tracker['id']} fetch error: {e}")

    logger.info(f'{tracker["id"]}: fetch export done')

    return
示例#15
0
def init(config_file) -> {}:
    config = config_utils.readin_config_file(config_file)
    logger.info("init from config ")

    # set incoming and proxy keys
    api_requests.set_token(config.get('proxy_key', None))

    global master_url, nels_url, instances, master_api, tmp_dir, sleep_time
    master_url = config['master_url'].rstrip("/")
    nels_url = config['nels_url'].rstrip("/")
    instances = {}
    master_api = api_requests.ApiRequests(master_url, config['key'])

    global nels_storage_client_key, nels_storage_client_secret, nels_storage_url, sleep_time
    nels_storage_client_key = config['nels_storage_client_key']
    nels_storage_client_secret = config['nels_storage_client_secret']
    nels_storage_url = config['nels_storage_url'].rstrip("/")

    tmp_dir = config.get('tmp_dir', tmp_dir)
    sleep_time = config.get('sleep_time', sleep_time)

    tmp_instances = config['instances']

    for iid in tmp_instances:

        if 'active' not in tmp_instances[
                iid] or not tmp_instances[iid]['active']:
            continue

        instances[iid] = tmp_instances[iid]
        instance = tmp_instances[iid]
        instance['api'] = api_requests.ApiRequests(
            instance['nga_url'].rstrip("/"), instance['nga_key'])

        instances[instance['name']] = instance

    return config
示例#16
0
def run_history_export(tracker):

    logger.info(f'{tracker["id"]}: history export start')

    instance = tracker['instance']
    #    print( instance )
    try:
        info = instances[instance]['api'].get_info()
        if info['free_gb'] < 30:
            # Not enough free disk space to do this, alert sysadmin
            logger.error("Not enough free space for export, email admin.")
            master_api.update_export(tracker['id'],
                                     {'state': 'disk-space-error'})
            return
    except Exception as e:
        #        traceback.print_tb(e.__traceback__)
        logger.error(f"{tracker['id']}: Fetch info error {e}")

    try:
        galaxy_instance = GalaxyInstance(instances[instance]['url'],
                                         key=instances[instance]['api_key'],
                                         verify=certifi.where())
    except Exception as e:
        logger.error(f"{tracker['id']}: Trigger export through bioblend: {e}")
        master_api.update_export(tracker['id'], {'state': 'bioblend-error'})
        return

    try:
        export_id = galaxy_instance.histories.export_history(
            tracker['history_id'], maxwait=1, gzip=True)
    except Exception as e:
        logger.error(
            f"{tracker['id']}/{tracker['instance']}: bioblend trigger export {e}"
        )
        master_api.update_export(tracker['id'], {
            'state': 'bioblend-error',
            'log': e['err_msg']
        })
        return

    while True:

        if export_id is None or export_id == '':
            history = instances[instance]['api'].get_history_export(
                history_id=tracker['history_id'])
            logger.debug(f"history id not found !{history}")
            if history is not None and history != '':
                master_api.update_export(tracker['id'], {
                    "export_id": history['export_id'],
                    'state': 'new'
                })
                export_id = history['export_id']
            else:
                logger.error(
                    f"{tracker['id']}: No history id associated with {export_id}"
                )
                raise RuntimeError(
                    f"{tracker['id']}: No history id associated with {export_id}"
                )
        else:
            #            print( f" API :: {instance['api']}" )
            export = instances[instance]['api'].get_history_export(
                export_id=export_id)
            logger.debug(export)
            master_api.update_export(tracker['id'], {
                "export_id": export_id,
                'state': export['state']
            })

            if export['state'] in ['ok', 'error']:
                submit_mq_job(tracker['id'], 'export')
                logger.info(f'{tracker["id"]}: history export done')

                return

        logger.debug("entering sleep cycle")
        time.sleep(sleep_time)
示例#17
0
def main():
    # Endpoints setup

    parser = argparse.ArgumentParser(
        description='nels-galaxy-api: extending the functionality of galaxy')

    parser.add_argument('-c',
                        '--config-file',
                        required=True,
                        help="nels-galaxy-api config file")
    parser.add_argument('-l',
                        '--logfile',
                        default=None,
                        help="Logfile to write to, default is stdout")
    parser.add_argument('-v',
                        '--verbose',
                        default=4,
                        action="count",
                        help="Increase the verbosity of logging output")
    parser.add_argument('-D',
                        '--development',
                        default=False,
                        action="store_true",
                        help="run in developemt mode")

    args = parser.parse_args()

    if args.development:
        global DEV
        DEV = True

    if args.logfile:
        logger.init(name='nels-galaxy-api', log_file=args.logfile)
    else:
        logger.init(name='nels-galaxy-api')

    logger.set_log_level(args.verbose)
    logger.info(f'startup nels_galaxy_api (v:{version})')

    config = init(args.config_file)

    # Base functionality
    urls = [
        ('/', RootHandler),  # Done
        (r'/info/?$', Info),  # Done
        (r'/state/(\w+)/?$', State),  # Done

        # for the cli...
        (r'/users/?$', Users),  # Done
        (r"/user/({email_match})/histories/?$".format(
            email_match=string_utils.email_match), UserHistories),  # Done
        (r"/user/({email_match})/exports/?$".format(
            email_match=string_utils.email_match), UserExports),
        # all, brief is default #Done
        (r"/user/({email_match})/imports/?$".format(
            email_match=string_utils.email_match), UserImports),
        # all, brief is default #Done
        (r"/user/({email_match})/api-key/?$".format(
            email_match=string_utils.email_match), UserApikey),  # to test

        # for proxying into the usegalaxy tracking api, will get user email and instance from the galaxy client.
        (r"/user/exports/?$", ExportsListProxy),  # done
        (r"/user/export/(\w+)/?$", UserExport),  # done
        (r"/user/imports/?$", UserImportsList),  #
        (r"/user/import/(\w+)/?$", UserImport),  # done
        (r'/user/(\w+)/?$', User),  # Done
        (r'/history/export/request/?$',
         HistoryExportRequest),  # Register export request #Done
        (r'/history/import/request/?$', HistoryImportRequest),  #
        (r'/history/export/(\w+)?$',
         HistoryExport),  # export_id, last one pr history is default # skip
        (r'/history/export/?$', HistoryExport
         ),  # possible to search by history_id               # ship
        (r'/history/import/(\w+)?$',
         HistoryImport),  # export_id, last one pr history is default # skip
        (r'/history/import/?$', HistoryImport
         ),  # possible to search by history_id               # ship
        (r'/history/exports/(all)/?$', HistoryExportsList
         ),  # for the local instance, all, brief is default # done
        (r'/history/exports/?$', HistoryExportsList
         ),  # for the local instance, all, brief is default       # done
        (r'/history/imports/(all)/?$', HistoryImportsList
         ),  # for the local instance, all, brief is default # done
        (r'/history/imports/?$', HistoryImportsList
         ),  # for the local instance, all, brief is default       # done
        (r'/history/download/(\w+)/?$', HistoryDownload
         ),  # fetching exported histories                     # skip
    ]

    # Terms of service server:
    if 'tos_server' in config and config['tos_server']:
        urls += [(r'/tos/?$', Tos)]

    # for the orchestrator functionality:
    if 'master' in config and config['master']:
        logger.debug("setting the master endpoints")
        urls += [
            (r'/rabbitmq_status/?$',
             RabbitMQStatus),  # check rabbitmq connection status
            (r'/export/(\w+)/requeue/?$',
             RequeueExport),  # requeue  export request
            (r'/import/(\w+)/requeue/?$',
             RequeueImport),  # requeue  export request
            (r"/export/(\w+)/(\w+)/?$",
             Export),  # instance-id, state-id (post) #done
            (r'/export/(\w+)/?$',
             Export),  # get or patch an export request # skip
            (r"/import/(\w+)/?$", Import),  # state-id (post) #
            (r"/exports/({email_match})/?$".format(
                email_match=string_utils.email_match), ExportsList),
            # user_email # done
            (r"/exports/({email_match})/(\w+)/?$".format(
                email_match=string_utils.email_match), ExportsList),
            # user_email, instance. If user_email == all, export all entries for instance # done
            (r"/exports/(all)/(\w+)/?$", ExportsList),  # done
            (r"/imports/({email_match})/?$".format(
                email_match=string_utils.email_match), ImportsList),
            # user_id #
            # user_email, instance. If user_email == all, export all entries for instance
            (r'/exports/?$', ExportsList
             ),  # All entries in the table, for the cli (differnt key?) # done
            (r'/imports/?$', ImportsList
             ),  # All entries in the table, for the cli (differnt key?) # done
            (r'/jobs/?$', JobsList
             ),  # All entries in the table, for the cli (differnt key?) # done

            # For testing the setup -> ONLY FOR THE MASTER
            (r'/proxy/?$', ProxyTest
             ),  # an  endpoint for testing the proxy connection #done

            # Might drop these two
            (r'/decrypt/(\w+)/?$', Decrypt),
            (r'/encrypt/(\w+)/?$', Encrypt)
        ]

    if DEV:
        sid = states.set({'id': 1234, 'name': 'tyt'})
        logger.info(f"TEST STATE ID: {sid}")

    logger.info(f"Running on port: {config.get('port', 8008)}")
    try:
        tornado.run_app(urls, port=config.get('port', 8008))
    except KeyboardInterrupt:
        logger.info(f'stopping nels_galaxy_api')
示例#18
0
def init(config_file: dict) -> None:
    # Initialises setup from config file and galaxy config file and
    # also initialites global variables (galaxy_url, master_url, nels_url, instance_id, tos_grace_period),
    # sets (proxy_keys, instances, no_proxy)

    config = config_utils.readin_config_file(config_file)
    galaxy_config = config_utils.readin_config_file(config['galaxy_config'])

    galaxy_init(galaxy_config)

    logger.info("init from config ")

    # set incoming and proxy keys
    tornado.set_token(config.get('key', None))
    api_requests.set_token(config.get('proxy_key', None))

    global galaxy_url, master_url, nels_url, instance_id
    galaxy_url = config['galaxy_url'].rstrip("/")
    master_url = config['master_url'].rstrip("/")
    instance_id = config['id'].rstrip("/")
    nels_url = config['nels_url'].rstrip("/")

    if 'tos_server' in config and config['tos_server']:
        logger.info("Running with the tos-server")

        db.create_tos_table()
        global tos_grace_period
        tos_grace_period = config.get('grace_period', 14)

    if 'master' in config and config['master']:
        logger.info("Running with the master API")
        db.create_export_tracking_table()
        db.create_export_tracking_logs_table()
        db.create_import_tracking_table()
        db.create_import_tracking_logs_table()

        mq.connect(uri=config['mq_uri'])

        global proxy_keys, instances, no_proxy
        proxy_keys = {}
        instances = {}

        no_proxy = True

        tmp_instances = config['instances']

        for iid in tmp_instances:

            if 'active' not in tmp_instances[
                    iid] or not tmp_instances[iid]['active']:
                continue

            instances[iid] = tmp_instances[iid]
            instance = tmp_instances[iid]

            instances[instance['name']] = instance
            instances[instance['name']]['api'] = api_requests.ApiRequests(
                instance['nga_url'].rstrip("/"), instance['nga_key'])
            if instance['proxy_key'] in proxy_keys.keys():
                logger.warn(
                    f"Proxy key for {instance['name']} is also used for {proxy_keys[instance['proxy_key']]}"
                )

            proxy_keys[instance['proxy_key']] = instance['name']

    #    global mq

    return config