Exemple #1
0
    def extract(self, ex_path, version, location=None, sdk_build=False):
        if os.path.exists(ex_path):
            utils.rmtree(ex_path, ignore_errors=True)

        path = location or self.save_file_path(version, sdk_build=sdk_build)

        file = self.extract_class(path, *self.extract_args)
        # currently, python's extracting mechanism for zipfile doesn't
        # copy file permissions, resulting in a binary that
        # that doesn't work. Copied from a patch here:
        # http://bugs.python.org/file34873/issue15795_cleaned.patch
        if path.endswith('.zip'):
            members = file.namelist()
            for zipinfo in members:
                minfo = file.getinfo(zipinfo)
                target = file.extract(zipinfo, ex_path)
                mode = minfo.external_attr >> 16 & 0x1FF
                os.chmod(target, mode)
        else:
            file.extractall(ex_path)

        if path.endswith('.tar.gz'):
            dir_name = utils.path_join(
                ex_path,
                os.path.basename(path).replace('.tar.gz', ''))
        else:
            dir_name = utils.path_join(
                ex_path,
                os.path.basename(path).replace('.zip', ''))

        if os.path.exists(dir_name):
            for p in os.listdir(dir_name):
                abs_file = utils.path_join(dir_name, p)
                utils.move(abs_file, ex_path)
            utils.rmtree(dir_name, ignore_errors=True)
Exemple #2
0
def alpha_beta(board, depth, player, alpha, beta, maxim):
    if depth == 0 or utils.lastMove(board, player):
        return utils.score(board, player), -1
    valid = utils.get_moves(board, player)
    if maxim:
        v = -99999
        for move in valid:
            (tmp_board, tot) = utils.move(move % 8, int(move / 8), player,
                                          copy.deepcopy(board))
            v = max(
                alpha_beta(tmp_board, depth - 1, utils.opponent(player), alpha,
                           beta, False)[0], v)
            alpha = max(alpha, v)
            if beta <= alpha:
                break
        return v, move
    else:  # minimizingPlayer
        v = 99999
        for move in valid:
            (tmp_board, tot) = utils.move(move % 8, int(move / 8), player,
                                          copy.deepcopy(board))
            v = min(
                alpha_beta(tmp_board, depth - 1, utils.opponent(player), alpha,
                           beta, True)[0], v)
            beta = min(beta, v)
            if beta <= alpha:
                break
        return v, move
def _create_postgres_pass_file(host, db_name, username, password):
    pgpass_path = '/root/.pgpass'
    ctx.logger.info('Creating postgresql pgpass file: {0}'.format(
        pgpass_path))
    postgresql_default_port = 5432
    pgpass_content = '{host}:{port}:{db_name}:{user}:{password}'.format(
        host=host,
        port=postgresql_default_port,
        db_name=db_name,
        user=username,
        password=password
    )
    # .pgpass file used by mgmtworker in snapshot workflow,
    # and need to be under th home directory of the user who run the snapshot
    # (currently root)
    if os.path.isfile(pgpass_path):
        ctx.logger.debug('Deleting {0} file..'.format(
            pgpass_path
        ))
        os.remove(pgpass_path)
    with tempfile.NamedTemporaryFile(delete=False) as temp_file:
        temp_file.write(pgpass_content)
        temp_file.flush()
        utils.chmod('0600', temp_file.name)
        utils.move(source=temp_file.name,
                   destination=pgpass_path,
                   rename_only=True)
        ctx.logger.debug('Postgresql pass file {0} created'.format(
            pgpass_path))
def _deploy_security_configuration():
    ctx.logger.info('Deploying REST Security configuration file...')

    # Generating random hash salt and secret key
    security_configuration = {
        'hash_salt': base64.b64encode(os.urandom(32)),
        'secret_key': base64.b64encode(os.urandom(32)),
        'encoding_alphabet': _random_alphanumeric(),
        'encoding_block_size': 24,
        'encoding_min_length': 5
    }

    # Pre-creating paths so permissions fix can work correctly
    # in mgmtworker
    for path in utils.MANAGER_RESOURCES_SNAPSHOT_PATHS:
        utils.mkdir(path)
    utils.chown(
        CLOUDIFY_USER, CLOUDIFY_GROUP,
        utils.MANAGER_RESOURCES_HOME)
    utils.sudo(['ls', '-la', '/opt/manager'])

    current_props = runtime_props['security_configuration']
    current_props.update(security_configuration)
    runtime_props['security_configuration'] = current_props

    fd, path = tempfile.mkstemp()
    os.close(fd)
    with open(path, 'w') as f:
        json.dump(security_configuration, f)
    rest_security_path = join(runtime_props['home_dir'], 'rest-security.conf')
    utils.move(path, rest_security_path)
    utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, rest_security_path)
def configure_manager(manager_config_path, manager_config):
    '''Sets config defaults and creates the config file'''
    _, temp_config = tempfile.mkstemp()
    config = ConfigParser()

    config.add_section('Credentials')
    config.set('Credentials', 'subscription_id',
               manager_config['subscription_id'])
    config.set('Credentials', 'tenant_id', manager_config['tenant_id'])
    config.set('Credentials', 'client_id', manager_config['client_id'])
    config.set('Credentials', 'client_secret', manager_config['client_secret'])

    config.add_section('Azure')
    config.set('Azure', 'location', manager_config['location'])

    with open(temp_config, 'w') as temp_config_file:
        config.write(temp_config_file)

    utils.mkdir(os.path.dirname(manager_config_path), use_sudo=True)
    utils.move(temp_config, manager_config_path)

    # Install prerequisites for the azure-storage Python package
    utils.yum_install('gcc', service_name='azure-storage')
    utils.yum_install('python-devel', service_name='azure-storage')
    utils.yum_install('openssl-devel', service_name='azure-storage')
    utils.yum_install('libffi-devel', service_name='azure-storage')
    utils.yum_install('python-cffi', service_name='azure-storage')
def configure_riemann():
    riemann_config_path = '/etc/riemann'
    ctx.logger.info('Deploying Riemann manager.config...')
    utils.move(
        '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config',  # NOQA
        '{0}/conf.d/manager.config'.format(riemann_config_path))

    ctx.logger.info('Deploying Riemann conf...')
    utils.deploy_blueprint_resource(
        '{0}/main.clj'.format(CONFIG_PATH),
        '{0}/main.clj'.format(riemann_config_path),
        RIEMANN_SERVICE_NAME)

    # our riemann configuration will (by default) try to read these environment
    # variables. If they don't exist, it will assume
    # that they're found at "localhost"
    # export REST_HOST=""
    # export RABBITMQ_HOST=""

    # we inject the management_ip for both of these to Riemann's systemd
    # config.
    # These should be potentially different
    # if the manager and rabbitmq are running on different hosts.
    utils.systemd.configure(RIEMANN_SERVICE_NAME)
    utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
Exemple #7
0
def tos(n):
    n_minus1 = n - 1
    L = list(range(0, n))
    O = [L]
    P = [(0, 0, n_minus1)]
    while P:
        print("\n")
        print("L =", L)
        print("P =", P)
        (i, j, l) = P[-1]
        if j != n_minus1:
            (next_i, next_j) = (i, j + 1)
        else:
            (next_i, next_j) = (i + 1, i + 2)
        print("(i+, j+): ", "(" + repr(next_i) + " , " + repr(next_j) + ")")
        if next_i < l:
            P[-1] = (next_i, next_j, l)
            L = move(L, next_i, next_j)
            O.append(L)
            P.append((0, 0, next_i))
            print("Move to", L)
        else:
            P.pop()
            if P:
                (parent_i, parent_j, parent_l) = P[-1]
                L = move(L, parent_j, parent_i - 1)
                print("Backtrack to", L, "by taking",
                      "(" + repr(parent_j) + " ," + repr(parent_i - 1) + ")")
    return O
Exemple #8
0
def configure_riemann():
    riemann_config_path = '/etc/riemann'
    ctx.logger.info('Deploying Riemann manager.config...')
    utils.move(
        '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config',  # NOQA
        '{0}/conf.d/manager.config'.format(riemann_config_path))

    ctx.logger.info('Deploying Riemann conf...')
    utils.deploy_blueprint_resource(
        '{0}/main.clj'.format(CONFIG_PATH),
        '{0}/main.clj'.format(riemann_config_path),
        RIEMANN_SERVICE_NAME)

    # our riemann configuration will (by default) try to read these environment
    # variables. If they don't exist, it will assume
    # that they're found at "localhost"
    # export REST_HOST=""
    # export RABBITMQ_HOST=""

    # we inject the management_ip for both of these to Riemann's systemd
    # config.
    # These should be potentially different
    # if the manager and rabbitmq are running on different hosts.
    utils.systemd.configure(RIEMANN_SERVICE_NAME)
    utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def _create_postgres_pass_file(host, db_name, username, password):
    pgpass_path = '/root/.pgpass'
    ctx.logger.info('Creating postgresql pgpass file: {0}'.format(pgpass_path))
    postgresql_default_port = 5432
    pgpass_content = '{host}:{port}:{db_name}:{user}:{password}'.format(
        host=host,
        port=postgresql_default_port,
        db_name=db_name,
        user=username,
        password=password)
    # .pgpass file used by mgmtworker in snapshot workflow,
    # and need to be under th home directory of the user who run the snapshot
    # (currently root)
    if os.path.isfile(pgpass_path):
        ctx.logger.debug('Deleting {0} file..'.format(pgpass_path))
        os.remove(pgpass_path)
    with tempfile.NamedTemporaryFile(delete=False) as temp_file:
        temp_file.write(pgpass_content)
        temp_file.flush()
        utils.chmod('0600', temp_file.name)
        utils.move(source=temp_file.name,
                   destination=pgpass_path,
                   rename_only=True)
        ctx.logger.debug(
            'Postgresql pass file {0} created'.format(pgpass_path))
def configure_manager(manager_config_path,
                      manager_config):
    '''Sets config defaults and creates the config file'''
    _, temp_config = tempfile.mkstemp()
    config = ConfigParser()

    config.add_section('Credentials')
    config.set('Credentials', 'subscription_id',
               manager_config['subscription_id'])
    config.set('Credentials', 'tenant_id',
               manager_config['tenant_id'])
    config.set('Credentials', 'client_id',
               manager_config['client_id'])
    config.set('Credentials', 'client_secret',
               manager_config['client_secret'])

    config.add_section('Azure')
    config.set('Azure', 'location',
               manager_config['location'])

    with open(temp_config, 'w') as temp_config_file:
        config.write(temp_config_file)

    utils.mkdir(os.path.dirname(manager_config_path), use_sudo=True)
    utils.move(temp_config, manager_config_path)

    # Install prerequisites for the azure-storage Python package
    utils.yum_install('gcc', service_name='azure-storage')
    utils.yum_install('python-devel', service_name='azure-storage')
    utils.yum_install('openssl-devel', service_name='azure-storage')
    utils.yum_install('libffi-devel', service_name='azure-storage')
    utils.yum_install('python-cffi', service_name='azure-storage')
def run_burger_extractor(version, overwrite=False):
    i = f"output/burger_{version}.json"
    if not os.path.isfile(i):
        print(c.BOLD, "First you need to run Burger on ", version, c.RESET)
        return False

    os.chdir('burger-extractor')

    print("Running Burger extractor...")

    version_before = utils.get_version_before(version)
    assert version_before # should never happen

    l = f"node src/index.js ../{i} {version_before}"
    print(c.WARNING, ">", l, c.RESET)
    os.system(l)

    for filename in os.listdir('out'):
        if filename.endswith('.json'):
            utils.journal_write(f"../output/minecraft-data/{version}/{filename}")

    os.makedirs(f"../output/minecraft-data/{version}/", exist_ok=True)
    utils.move("out/*.json", f"../output/minecraft-data/{version}/")

    os.chdir('..')

    return True
Exemple #12
0
def explore(arm):
    """ Use this to extract positions after we move the dvrk. """
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print("pos, rot: {}, {}".format(pos, rot))
    pos[2] += 0.02
    U.move(arm, pos, rot)
    print("pos, rot: {}, {}".format(pos, rot))
    def extract(self, ex_path, version):
        if os.path.exists(ex_path):
            utils.rmtree(ex_path, ignore_errors=True)

        path = self.save_file_path(version)

        file = self.extract_class(path,
                                  *self.extract_args)
        # currently, python's extracting mechanism for zipfile doesn't
        # copy file permissions, resulting in a binary that
        # that doesn't work. Copied from a patch here:
        # http://bugs.python.org/file34873/issue15795_cleaned.patch
        if path.endswith('.zip'):
            members = file.namelist()
            for zipinfo in members:
                minfo = file.getinfo(zipinfo)
                target = file.extract(zipinfo, ex_path)
                mode = minfo.external_attr >> 16 & 0x1FF
                os.chmod(target, mode)
        else:
            file.extractall(ex_path)

        if path.endswith('.tar.gz'):
            dir_name = utils.path_join(ex_path, os.path.basename(path).replace('.tar.gz',''))
        else:
            dir_name = utils.path_join(ex_path, os.path.basename(path).replace('.zip',''))

        if os.path.exists(dir_name):
            for p in os.listdir(dir_name):
                abs_file = utils.path_join(dir_name, p)
                utils.move(abs_file, ex_path)
            utils.rmtree(dir_name, ignore_errors=True)
def _deploy_security_configuration():
    ctx.logger.info('Deploying REST Security configuration file...')

    # Generating random hash salt and secret key
    security_configuration = {
        'hash_salt': base64.b64encode(os.urandom(32)),
        'secret_key': base64.b64encode(os.urandom(32)),
        'encoding_alphabet': _random_alphanumeric(),
        'encoding_block_size': 24,
        'encoding_min_length': 5
    }

    # Pre-creating paths so permissions fix can work correctly
    # in mgmtworker
    for path in utils.MANAGER_RESOURCES_SNAPSHOT_PATHS:
        utils.mkdir(path)
    utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, utils.MANAGER_RESOURCES_HOME)
    utils.sudo(['ls', '-la', '/opt/manager'])

    current_props = runtime_props['security_configuration']
    current_props.update(security_configuration)
    runtime_props['security_configuration'] = current_props

    for key in ['admin_username', 'admin_password']:
        security_configuration[key] = current_props[key]

    fd, path = tempfile.mkstemp()
    os.close(fd)
    with open(path, 'w') as f:
        json.dump(security_configuration, f)
    rest_security_path = join(runtime_props['home_dir'], 'rest-security.conf')
    utils.move(path, rest_security_path)
    utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, rest_security_path)
    utils.chmod('g+r', rest_security_path)
def deploy_script(script_name):
    config_file_temp_destination = join(tempfile.gettempdir(), script_name)
    ctx.download_resource_and_render(
        join('components', 'manager-ip-setter', 'scripts', script_name),
        config_file_temp_destination)
    remote_script_path = join(MANAGER_IP_SETTER_DIR, script_name)
    utils.move(config_file_temp_destination, remote_script_path)
    utils.chmod('+x', remote_script_path)
    utils.systemd.configure(MANAGER_IP_SETTER_SERVICE_NAME)
Exemple #16
0
    def _move(source, dest):
        if not lexists(source):
            raise Error("no such file or directory " + `source`)

        if not exists(dirname(dest)):
            os.makedirs(dirname(dest))

        utils.remove_any(dest)
        utils.move(source, dest)
    def move(self, path):
        utils.can_be_removed(path)
        entry = Entry(path)
        if not self.dry:
            dist = os.path.join(self.location, entry.id)
            utils.move(entry.location, dist)
            self.db.create(entry)

        return entry
def _deploy_security_configuration():
    ctx.logger.info('Deploying REST Security configuration file...')
    security_configuration = \
        ctx.instance.runtime_properties['security_configuration']
    fd, path = tempfile.mkstemp()
    os.close(fd)
    with open(path, 'w') as f:
        f.write(security_configuration)
    utils.move(path, join(REST_SERVICE_HOME, 'rest-security.conf'))
Exemple #19
0
def retrieve_landfire(opt_search, local_path, file_name):
    file_path = osp.join(local_path, file_name + '.tif')
    info_path = file_path + '.size'
    if osp.exists(file_path):
        if osp.getsize(file_path) == int(open(info_path).read()):
            logging.info('file {} already exists locally'.format(file_path))
            return file_path

    # submit a job to the Download Service
    requestID = server_service('initiateDownload', opt_search)
    logging.info('request ID: {}'.format(requestID))

    arg = 'downloadID={}'.format(requestID)
    stat = None
    retries = 50
    while stat != 400 and retries:
        # call Download service with request id to get status
        status = server_service('getDownloadStatus', arg)
        logging.info('status: {}'.format(status))
        stat = int(status.split(',')[0])
        retries -= 1

    if not retries and stat != 400:
        logging.error(
            'maximum number of retries, the server is not responding...')
        raise LandfireError('Failed downloading the data...')

    # once a status of 400 has been received, retrieve from the URL
    url = server_service('getData', arg)
    logging.info('download URL: {}'.format(url))
    download_url(url, local_path + '.zip')

    # send complete message back to server so it can cleanup the job
    status = server_service('setDownloadComplete', arg)
    logging.info('status: {}'.format(status))

    # unzip and save
    local_zip = local_path + '.zip'
    local_tmp = osp.join(local_path, 'tmp')
    with zipfile.ZipFile(local_zip, 'r') as f:
        f.extractall(local_tmp)
    tif_files = glob.glob(osp.join(local_tmp, '*.tif'))
    if len(tif_files) == 1:
        tif_file = tif_files[0]
        file_size = osp.getsize(tif_file)
        move(tif_file, file_path)
        with open(ensure_dir(info_path), 'w') as f:
            f.write(str(file_size))
    else:
        logging.warning('not enought or too many TIF files, skipping...')
        return None
    remove(local_zip)
    remove(local_zip + '.size')
    delete(local_tmp)
    logging.info('data correctly retrieved as {}'.format(file_path))
    return file_path
def configure_logging():
    ctx.logger.info('Configuring Management worker logging...')
    logging_config_dir = '/etc/cloudify'
    config_name = 'logging.conf'
    config_file_destination = join(logging_config_dir, config_name)
    config_file_source = join(CONFIG_PATH, config_name)
    utils.mkdir(logging_config_dir)
    config_file_temp_destination = join(tempfile.gettempdir(), config_name)
    ctx.download_resource(config_file_source, config_file_temp_destination)
    utils.move(config_file_temp_destination, config_file_destination)
def configure_logging():
    ctx.logger.info('Configuring Management worker logging...')
    logging_config_dir = '/etc/cloudify'
    config_name = 'logging.conf'
    config_file_destination = join(logging_config_dir, config_name)
    config_file_source = join(CONFIG_PATH, config_name)
    utils.mkdir(logging_config_dir)
    config_file_temp_destination = join(tempfile.gettempdir(), config_name)
    ctx.download_resource(config_file_source, config_file_temp_destination)
    utils.move(config_file_temp_destination, config_file_destination)
def install_optional(rest_venv):
    props = ctx_properties

    dsl_parser_source_url = props['dsl_parser_module_source_url']
    rest_client_source_url = props['rest_client_module_source_url']
    plugins_common_source_url = props['plugins_common_module_source_url']
    script_plugin_source_url = props['script_plugin_module_source_url']
    agent_source_url = props['agent_module_source_url']
    pip_constraints = props['pip_constraints']

    rest_service_source_url = props['rest_service_module_source_url']

    constraints_file = utils.write_to_tempfile(pip_constraints) \
        if pip_constraints else None

    # this allows to upgrade modules if necessary.
    ctx.logger.info('Installing Optional Packages if supplied...')

    if dsl_parser_source_url:
        utils.install_python_package(dsl_parser_source_url, rest_venv,
                                     constraints_file)
    if rest_client_source_url:
        utils.install_python_package(rest_client_source_url, rest_venv,
                                     constraints_file)
    if plugins_common_source_url:
        utils.install_python_package(plugins_common_source_url, rest_venv,
                                     constraints_file)
    if script_plugin_source_url:
        utils.install_python_package(script_plugin_source_url, rest_venv,
                                     constraints_file)
    if agent_source_url:
        utils.install_python_package(agent_source_url, rest_venv,
                                     constraints_file)

    if rest_service_source_url:
        ctx.logger.info('Downloading cloudify-manager Repository...')
        manager_repo = \
            utils.download_cloudify_resource(rest_service_source_url,
                                             SERVICE_NAME)
        ctx.logger.info('Extracting Manager Repository...')
        tmp_dir = utils.untar(manager_repo, unique_tmp_dir=True)
        rest_service_dir = join(tmp_dir, 'rest-service')
        resources_dir = join(tmp_dir, 'resources/rest-service/cloudify/')

        ctx.logger.info('Installing REST Service...')
        utils.install_python_package(rest_service_dir, rest_venv,
                                     constraints_file)

        ctx.logger.info('Deploying Required Manager Resources...')
        utils.move(resources_dir, utils.MANAGER_RESOURCES_HOME)

        utils.remove(tmp_dir)

    if constraints_file:
        os.remove(constraints_file)
Exemple #23
0
def deploy_utils():
    temp_destination = join(tempfile.gettempdir(), 'utils.py')
    ctx.download_resource_and_render(
        join('components', 'utils.py'),
        temp_destination,
    )
    utils_path = join(MANAGER_IP_SETTER_DIR, 'utils.py')
    utils.move(temp_destination, utils_path)

    utils.chmod('550', utils_path)
    utils.chown('root', utils.CLOUDIFY_GROUP, utils_path)
def deploy_utils():
    temp_destination = join(tempfile.gettempdir(), 'utils.py')
    ctx.download_resource_and_render(
        join('components', 'utils.py'),
        temp_destination,
    )
    utils_path = join(MANAGER_IP_SETTER_DIR, 'utils.py')
    utils.move(temp_destination, utils_path)

    utils.chmod('550', utils_path)
    utils.chown('root', utils.CLOUDIFY_GROUP, utils_path)
def preconfigure_restservice():

    rest_service_home = '/opt/manager'

    ctx.logger.info('Deploying REST Security configuration file...')
    sec_config = str(ctx.target.node.properties['security'])
    fd, path = tempfile.mkstemp()
    os.close(fd)
    with open(path, 'w') as f:
        f.write(sec_config)
    utils.move(path, os.path.join(rest_service_home, 'rest-security.conf'))

    utils.systemd.configure('restservice')
Exemple #26
0
def preconfigure_restservice():

    rest_service_home = '/opt/manager'

    ctx.logger.info('Deploying REST Security configuration file...')
    sec_config = str(ctx.target.node.properties['security'])
    fd, path = tempfile.mkstemp()
    os.close(fd)
    with open(path, 'w') as f:
        f.write(sec_config)
    utils.move(path, os.path.join(rest_service_home, 'rest-security.conf'))

    utils.systemd.configure('restservice')
def preconfigure_restservice():

    rest_service_home = '/opt/manager'

    ctx.logger.info('Deploying REST Security configuration file...')
    sec_config = utils.load_manager_config_prop('security')
    fd, path = tempfile.mkstemp()
    os.close(fd)
    with open(path, 'w') as f:
        f.write(sec_config)
    utils.move(path, os.path.join(rest_service_home, 'rest-security.conf'))

    utils.systemd.configure(REST_SERVICE_NAME, render=False)
Exemple #28
0
 def execute(self, userdata):
     move('Depth', 'Command', 3.0)
     for i in range(self.DIVE_TIME):
         if self.preempt_requested():
             self.service_preempt()
             return 'preempted'
         rospy.sleep(self.TIME_SLICE)
     move('Forward', 'Command', -.75)
     for i in range(4):
         if self.preempt_requested():
             self.service_preempt()
             return 'preempted'
         rospy.sleep(self.TIME_SLICE)
     return 'succeeded'         
Exemple #29
0
 def calc(self) -> Tuple[List[Tuple[int, float]]]:
     """
     计算一目均衡图中的转折线, 基准线, 延迟线, 先行线A与先行线B
     """
     ohlcs = self._get_ohlcs_for_calc()
     turns = max_min_avg(ohlcs, 9)
     bases = max_min_avg(ohlcs, 26)
     delays = move(ohlcs, -26, value=lambda o: o[4])
     _turn_base_avgs = [(z[0][0], (z[0][1] + z[1][1]) / 2)
                        for z in zip(turns, bases)]
     antes_1 = move(_turn_base_avgs, 26)
     _max_min_avg52s = max_min_avg(ohlcs, 52)
     antes_2 = move(_max_min_avg52s, 26)
     return turns, bases, delays, antes_1, antes_2
 def execute(self, userdata):
     move('Depth', 'Command', 2.0)
     for i in range(4):
         if self.preempt_requested():
             self.service_preempt()
             return 'preempted'
         rospy.sleep(1)
     forward(.75)
     for i in range(self.GO_FORWARD_TIMEOUT):
         if self.preempt_requested():
             self.service_preempt()
             return 'preempted'
         rospy.sleep(1)
     return 'succeeded'
Exemple #31
0
    def close(self):
        "Close a Logger object"
        # Don't try and log if Dir::UrgencyLog is not configured
        if self.log_file is None:
            return

        self.log_file.flush()
        self.log_file.close()

        if self.writes:
            new_filename = "%s/install-urgencies-%s" % (self.log_dir, self.timestamp)
            move(self.log_filename, new_filename)
        else:
            os.unlink(self.log_filename)
Exemple #32
0
def install_java():
    java_source_url = ctx_properties['java_rpm_source_url']

    ctx.logger.info('Installing Java...')
    utils.set_selinux_permissive()
    utils.copy_notice(SERVICE_NAME)

    utils.yum_install(java_source_url, SERVICE_NAME)

    utils.mkdir(LOG_DIR)

    # Java install log is dropped in /var/log.
    # Move it to live with the rest of the cloudify logs
    java_install_log = '/var/log/java_install.log'
    if os.path.isfile(java_install_log):
        utils.move(java_install_log, LOG_DIR)
 def _move_to_random_home_position(self):
     """ Moves to one of the home positions we have set up. 
     
     One change: I adjust the angles to be in line with what we have. The
     reason is that we'll keep referring back to this angle when we move, and
     I want it to be inside the actual ranges of yaw, pitch, and roll we're
     considering. Hence, the `better_rotation` that's here. So, we move to
     the random home position, and just fix up the rotation.
     """
     index = np.random.randint(len(self.orien))
     home_pos = self.info['pos_' + self.orien[index]]
     home_rot = self.info['rot_' + self.orien[index]]
     U.move(self.arm, home_pos, home_rot)
     #better_rotation = [self._sample_yaw(), self._sample_pitch(), self._sample_roll()]
     better_rotation = self._sample_safe_rotation(home_rot)
     U.move(self.arm, home_pos, better_rotation)
def install_java():
    java_source_url = ctx_properties['java_rpm_source_url']

    ctx.logger.info('Installing Java...')
    utils.set_selinux_permissive()
    utils.copy_notice(SERVICE_NAME)

    utils.yum_install(java_source_url, SERVICE_NAME)

    utils.mkdir(LOG_DIR)

    # Java install log is dropped in /var/log.
    # Move it to live with the rest of the cloudify logs
    java_install_log = '/var/log/java_install.log'
    if os.path.isfile(java_install_log):
        utils.move(java_install_log, LOG_DIR)
    def take_action(self, state, action):

        # randomly select the move direction according to
        # the distribution of probabilities
        move_direction = move(self.move_prob[action])

        new_x = state[0]
        new_y = state[1]

        if move_direction == 0 and new_y > 0:
            new_y -= 1
        elif move_direction == 1 and new_y < self.size - 1:
            new_y += 1
        elif move_direction == 2 and new_x > 0:
            new_x -= 1
        elif move_direction == 3 and new_x < self.size - 1:
            new_x += 1

        if new_x == 0 and new_y == 0:
            return [(new_x, new_y), 100.0]
        elif new_x == 0 and new_y == self.size - 1:
            return [(new_x, new_y), 10.0]
        elif new_x == self.size - 1 and new_y == 0:
            return [(new_x, new_y), 10.0]
        elif new_x >= self.size / 2 and new_y >= self.size / 2:
            return [(new_x, new_y), -1.0]

        return [(new_x, new_y), 0.0]
    def restore(self, partial_id, on_collision='increment_number'):
        entry = self.get_from_db(partial_id)
        if not self.dry:
            self.db.remove_by_partial_id(partial_id)
            entry.location = utils.move(os.path.join(self.location, entry.id),
                                        entry.location, on_collision)

        return entry
	def inference_step(self,session,encoder_input,decoder_output,sample_input):
		"""
		Performs inference with beam search in the decoder.
		session: tensorflow session
		encoder_input: [1 x K]*enc_unroll
		decoder_output: true actions [dec_unroll]
		sample_input: Sample instance of current sample
		return : loss, correct_pred (True|False)
		"""
		feed_dict = {}
		end_state = sample_input._path[-1]
		for i in xrange(self._encoder_unrollings):
			feed_dict[self._test_encoder_inputs[i]] = encoder_input[i]

		# initial values for cell variables
		[st,ct] = session.run([self._test_s0,self._test_c0],feed_dict=feed_dict)
		state = sample_input._path[0]
		prev_state = sample_input._path[0]
		_map = self._maps[sample_input._map_name]
		
		loss = 0.0 	# must be averaged over predicted sequence
		n_preds = -1
		for i in xrange(self._decoder_unrollings):
			# one hot vector of current true action
			onehot_act = np.zeros((1,self._num_actions),dtype=np.float32)
			onehot_act[0,decoder_output[i]] = 1.0
			# get world vector for current position
			x,y,pose = state
			place = _map.locationByCoord[(x,y)]
			yt = get_sparse_world_context(_map, place, pose, self._map_feature_dict,self._map_objects_dict)
			# set placeholder for current roll
			feed_dict[self._test_decoder_output] = onehot_act
			feed_dict[self._test_st] = st
			feed_dict[self._test_ct] = ct
			feed_dict[self._test_yt] = yt

			output_feed = [
				self._next_st,
				self._next_ct,
				self._test_prediction,
				self._test_loss,
				]
			st,ct,prediction,step_loss = session.run(output_feed,feed_dict=feed_dict)
			loss += step_loss
			# greedy prediction
			pred_act = prediction.argmax()
			# move according to prediction
			prev_state = state
			state = move(state,pred_act,_map)
			if state == -1:
				n_preds = i+1
				break
		if state != -1:
			prev_state = state
		loss /= (n_preds if n_preds!=-1 else self._decoder_unrollings)

		return loss,int(end_state==prev_state)	# for single-sentence
Exemple #38
0
def install_consul():
    consul_binary = join(HOME_DIR, 'consul')

    utils.mkdir(dirname(consul_binary))
    utils.mkdir(CONFIG_DIR)

    consul_package = \
        utils.download_cloudify_resource(ctx_properties['consul_package_url'],
                                         SERVICE_NAME)

    temp_dir = tempfile.mkdtemp()
    try:
        with zipfile.ZipFile(consul_package) as consul_archive:
            consul_archive.extractall(temp_dir)

        utils.move(join(temp_dir, 'consul'), consul_binary)
        utils.chmod('+x', consul_binary)
    finally:
        utils.remove(temp_dir)
def install_consul():
    consul_binary = join(HOME_DIR, 'consul')

    utils.mkdir(dirname(consul_binary))
    utils.mkdir(CONFIG_DIR)

    consul_package = \
        utils.download_cloudify_resource(ctx_properties['consul_package_url'],
                                         SERVICE_NAME)

    temp_dir = tempfile.mkdtemp()
    try:
        with zipfile.ZipFile(consul_package) as consul_archive:
            consul_archive.extractall(temp_dir)

        utils.move(join(temp_dir, 'consul'), consul_binary)
        utils.chmod('+x', consul_binary)
    finally:
        utils.remove(temp_dir)
Exemple #40
0
def get_manager_config():
    """
    Extracting specific files from cloudify-manager repo, with clean-ups after
    """
    cloudify_resources_url = ctx_properties['cloudify_resources_url']

    ctx.logger.info('Downloading cloudify-manager Repository...')
    manager_repo = utils.download_cloudify_resource(cloudify_resources_url,
                                                    SERVICE_NAME)
    ctx.logger.info('Extracting Manager Repository...')
    manager_dir = utils.untar(manager_repo, unique_tmp_dir=True)

    ctx.logger.info('Deploying Riemann manager.config...')
    config_src_path = join(manager_dir, 'plugins', 'riemann-controller',
                           'riemann_controller', 'resources', 'manager.config')
    utils.move(config_src_path,
               '{0}/conf.d/manager.config'.format(RIEMANN_CONFIG_PATH))
    utils.remove(manager_dir)
    utils.remove(manager_repo)
Exemple #41
0
def send_to_destination(destination, groups, move=False, dry_run=False, verbose=False):
    if not dry_run:
        utils.mkdir_p(destination)
    for day, pics in groups.iteritems():
        day_dir = os.path.join(destination, day)
        if not dry_run:
            utils.mkdir_p(day_dir)
        for pic in pics:
            dst_file = os.path.join(day_dir, os.path.basename(pic))
            if move:
                if verbose or dry_run:
                    print 'Moving {} to {}'.format(pic, dst_file)
                if not dry_run:
                    utils.move(pic, dst_file)
            else:
                if verbose or dry_run:
                    print 'Copying {} to {}'.format(pic, dst_file)
                if not dry_run:
                    utils.copy(pic, dst_file)
Exemple #42
0
def get_in_good_starting_position(arm, which='arm1'):
    """ 
    Only meant so we get a good starting position, to compensate for how
    commanding the dVRK to go to a position/rotation doesn't actually work that
    well, particularly for the rotations. Some human direct touch is needed.
    """
    assert which == 'arm1'
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print("(starting method) starting position and rotation:")
    print(pos, rot)
    U.move(arm, HOME_POS_ARM1, HOME_ROT_ARM1, speed='slow')
    time.sleep(2)
    print("(starting method) position and rotation after moving:")
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print(pos, rot)
    print("(Goal was: {} and {}".format(HOME_POS_ARM1, HOME_ROT_ARM1))
    R = U.rotation_matrix_3x3_axis(angle=180, axis='z')
    print("With desired rotation matrix:\n{}".format(R))
    print("Now exiting...")
    sys.exit()
def install_consul():
    consul_binary = '/opt/cloudify/consul/consul'
    consul_config_dir = '/etc/consul.d'

    utils.mkdir(dirname(consul_binary))
    utils.mkdir(consul_config_dir)

    consul_package = \
        utils.download_cloudify_resource(ctx_properties['consul_package_url'],
                                         CONSUL_SERVICE_NAME)

    temp_dir = tempfile.mkdtemp()
    try:
        with zipfile.ZipFile(consul_package) as consul_archive:
            consul_archive.extractall(temp_dir)

        utils.move(join(temp_dir, 'consul'), consul_binary)
        utils.chmod('+x', consul_binary)
    finally:
        utils.remove(temp_dir)
def install_consul():
    consul_binary = '/opt/cloudify/consul/consul'
    consul_config_dir = '/etc/consul.d'

    utils.mkdir(dirname(consul_binary))
    utils.mkdir(consul_config_dir)

    consul_package = \
        utils.download_cloudify_resource(ctx_properties['consul_package_url'],
                                         CONSUL_SERVICE_NAME)

    temp_dir = tempfile.mkdtemp()
    try:
        with zipfile.ZipFile(consul_package) as consul_archive:
            consul_archive.extractall(temp_dir)

        utils.move(join(temp_dir, 'consul'), consul_binary)
        utils.chmod('+x', consul_binary)
    finally:
        utils.remove(temp_dir)
Exemple #45
0
def min_max(board, depth, player, maxim):
    if depth == 0 or utils.lastMove(board, player):
        return utils.score(board, player)
    valid = utils.get_moves(board, player)
    if maxim:
        bestValue = -99999
        for move in valid:
            (tmp_board, tot) = utils.move(move, player, copy.deepcopy(board))
            v = min_max(tmp_board, depth - 1, utils.opponent(player), False)
            bestValue = max(bestValue, v)
        # print(np.array(tmp_board).reshape((8,8)))
        return bestValue
    else:  # minimizingPlayer
        bestValue = 99999
        for move in valid:
            (tmp_board, tot) = utils.move(move, player, copy.deepcopy(board))
            v = min_max(tmp_board, depth - 1, utils.opponent(player), True)
            bestValue = min(bestValue, v)
        # print(np.array(tmp_board).reshape((8,8)))
        return bestValue
def install_optional(rest_venv):
    props = ctx_properties

    dsl_parser_source_url = props['dsl_parser_module_source_url']
    rest_client_source_url = props['rest_client_module_source_url']
    plugins_common_source_url = props['plugins_common_module_source_url']
    script_plugin_source_url = props['script_plugin_module_source_url']
    agent_source_url = props['agent_module_source_url']

    rest_service_source_url = props['rest_service_module_source_url']

    # this allows to upgrade modules if necessary.
    ctx.logger.info('Installing Optional Packages if supplied...')
    if dsl_parser_source_url:
        utils.install_python_package(dsl_parser_source_url, rest_venv)
    if rest_client_source_url:
        utils.install_python_package(rest_client_source_url, rest_venv)
    if plugins_common_source_url:
        utils.install_python_package(plugins_common_source_url, rest_venv)
    if script_plugin_source_url:
        utils.install_python_package(script_plugin_source_url, rest_venv)
    if agent_source_url:
        utils.install_python_package(agent_source_url, rest_venv)

    if rest_service_source_url:
        ctx.logger.info('Downloading cloudify-manager Repository...')
        manager_repo = \
            utils.download_cloudify_resource(rest_service_source_url,
                                             SERVICE_NAME)
        ctx.logger.info('Extracting Manager Repository...')
        tmp_dir = utils.untar(manager_repo, unique_tmp_dir=True)
        rest_service_dir = join(tmp_dir, 'rest-service')
        resources_dir = join(tmp_dir, 'resources/rest-service/cloudify/')

        ctx.logger.info('Installing REST Service...')
        utils.install_python_package(rest_service_dir, rest_venv)

        ctx.logger.info('Deploying Required Manager Resources...')
        utils.move(resources_dir, utils.MANAGER_RESOURCES_HOME)

        utils.remove(tmp_dir)
Exemple #47
0
def test(arm1, fname, test_method):
    """ Roll out the open loop policy. No human intervention. 
    
    test_method
    -----------
        0: roll entirely open-loop, no human intervention.
        1: roll each time step, then human provides correction (saves new file).
    """
    positions = U.load_pickle_to_list(fname)
    print("loaded {} positions from {}".format(len(positions), fname))
    if test_method == 0:
        print("We're going to run entirely open-loop.")
    elif test_method == 1:
        print("NOTE! You'll have to provide corrections after each movement.")
        revised_positions = []
    arm1.close_gripper()

    for i, (pos, rot) in enumerate(positions):
        U.move(arm1, pos, rot)
        arm1.close_gripper()
        real_pos, real_rot = U.get_pos_rot_from_arm(arm1, nparrays=True)
        print("\n({}) Target position: {},{}".format(i, pos, rot))
        print("    Actual position: {},{}".format(real_pos, real_rot))

        if test_method == 1:
            string = "Now correct the position as needed, then press any key,"+ \
                    " other than ESC (which will terminate the entire program)."
            U.call_wait_key(cv2.imshow(string, d.left_image), exit=True)
            revised_pos, revised_rot = U.get_pos_rot_from_arm(arm1,
                                                              nparrays=True)
            revised_positions.append((revised_pos, revised_rot))
            print("    Revised position: {},{}".format(revised_pos,
                                                       revised_rot))
        else:
            time.sleep(2)

    if test_method == 1:
        new_fname = fname[:-2] + '_revised.p'
        print("Storing {} positions in file {}".format(len(revised_positions),
                                                       new_fname))
        U.store_pickle(new_fname, revised_positions)
Exemple #48
0
def storyline(yyyyMMdd):
    info = moves.user_storyline_daily(yyyyMMdd, trackPoints={'false'}, access_token='access_token')
    print info[0]['date']
    segments = info[0]['segments']
    # print json.dumps(segments, indent=2)
    res = ''
    for segment in segments:
        if segment['type'] == 'place':
            res = utils.place(segment, res)
        elif segment['type'] == 'move':
            res = utils.move(segment, res)
        res += '<hr>'
    return res
def _deploy_security_configuration():
    ctx.logger.info('Deploying REST Security configuration file...')

    # Generating random hash salt and secret key
    security_configuration = {
        'hash_salt': base64.b64encode(os.urandom(32)),
        'secret_key': base64.b64encode(os.urandom(32))
    }

    # Update the runtime properties with the new values. The conversion to
    # and from a JSON string is necessary due to how __getitem__ and
    # __setitem__ are implemented in ctx-py.py
    runtime_props = ctx.instance.runtime_properties
    current_props = json.loads(runtime_props['security_configuration'])
    current_props.update(security_configuration)
    runtime_props['security_configuration'] = json.dumps(current_props)

    fd, path = tempfile.mkstemp()
    os.close(fd)
    with open(path, 'w') as f:
        json.dump(security_configuration, f)
    utils.move(path, join(REST_SERVICE_HOME, 'rest-security.conf'))
def get_manager_config():
    """
    Extracting specific files from cloudify-manager repo, with clean-ups after
    """
    cloudify_resources_url = ctx_properties['cloudify_resources_url']

    ctx.logger.info('Downloading cloudify-manager Repository...')
    manager_repo = utils.download_cloudify_resource(
        cloudify_resources_url, SERVICE_NAME)
    ctx.logger.info('Extracting Manager Repository...')
    manager_dir = utils.untar(manager_repo, unique_tmp_dir=True)

    ctx.logger.info('Deploying Riemann manager.config...')
    config_src_path = join(
        manager_dir, 'plugins', 'riemann-controller',
        'riemann_controller', 'resources', 'manager.config'
    )
    utils.move(
        config_src_path,
        '{0}/conf.d/manager.config'.format(RIEMANN_CONFIG_PATH)
    )
    utils.remove(manager_dir)
    utils.remove(manager_repo)
	def get_end_pos(self, actions, start_pos_grid, map_name):
		"""
		actions: [1 x num_actions]*dec_unrolls | prob distributions of actions
		start_pos_grid: (xg,yg,pose)
		"""
		_map = self._maps[map_name]
		state = start_pos_grid
		prev_state = start_pos_grid
		for action_distro in actions:
			action = action_distro.argmax()
			prev_state = state
			state = move(state,action,_map)
			if state == -1:
				return prev_state
		return state
	def get_end_pos(self, actions, start_pos_grid, map_name):
		"""
		actions: [action_id]*dec_unrolls
		start_pos_grid: (xg,yg,pose)
		"""
		_map = self._maps[map_name]
		state = start_pos_grid
		prev_state = start_pos_grid
		for action in actions:
			prev_state = state
			if state == -1:
				ipdb.set_trace()

			state = move(state,action,_map)
			if state == -1:
				return prev_state
		return state
	def step_inference(self,session,encoder_inputs,decoder_output,sample_input,beam_size=10):
		"""
		Performs inference with beam search in the decoder.
		session: tensorflow session
		encoder_inputs: [1 x K]*enc_unroll
		decoder_output: true actions [dec_unroll]
		sample_input: Sample instance of current sample
		beam_size: beam size to use in beam search
		return : loss, correct_pred (True|False)
		"""
		feed_dict = {}
		n_enc_unrolls = len(encoder_inputs)
		n_dec_unrolls = len(decoder_output)
		feed_dict[self._encoder_unrollings] = n_enc_unrolls
		feed_dict[self._decoder_unrollings] = n_dec_unrolls

		end_state = sample_input._path[-1]
		for i in xrange(self._max_encoder_unrollings):
			if i < n_enc_unrolls:
				feed_dict[self._encoder_inputs[i]] = encoder_inputs[i]
			else:
				feed_dict[self._encoder_inputs[i]] = np.zeros(shape=(1,self._vocab_size),dtype=np.float32)

		# initial values for cell variables
		[st,ct] = session.run([self._test_s0,self._test_c0],feed_dict=feed_dict)
		pos_state = sample_input._path[0]
		prev_state = sample_input._path[0]
		_map = self._maps[sample_input._map_name]
		
		### DECODER
		def run_decoder_step(_st,_ct,_state,len_seq):
			# one hot vector of current true action
			onehot_act = np.zeros((1,self._num_actions),dtype=np.float32)
			if len_seq < n_dec_unrolls:
				onehot_act[0,decoder_output[len_seq]] = 1.0
			# get world vector for current position
			x,y,pose = _state
			place = _map.locationByCoord[(x,y)]
			yt = get_sparse_world_context(_map, place, pose, self._map_feature_dict, self._map_objects_dict)
			# set placeholder for current roll
			feed_dict[self._test_decoder_output] = onehot_act
			feed_dict[self._test_st] = _st
			feed_dict[self._test_ct] = _ct
			feed_dict[self._test_yt] = yt

			output_feed = [
				self._next_st,
				self._next_ct,
				self._test_prediction,
				self._test_loss,
				]
			st,ct,prediction,step_loss = session.run(output_feed,feed_dict=feed_dict)
			return [st,ct,prediction,step_loss]

		## BEAM SEARCH vars
		max_sequence=40
		nodes = {} 			# nodes[v] = parent(v)
		act_id = {} 		# act_id[node] = action id
		dist = {}
		node_loss={}		# node_loss[node] = loss of sequence so far
		terminal_nodes=[]	# [(final_prob,node)]
		Q = []				# [(log_prob,node)]
		n_nodes = 0

		#ipdb.set_trace()

		# first move
		st,ct,prediction,loss = run_decoder_step(st,ct,pos_state,0)
		for i in range(self._num_actions):
			logprob = np.log(prediction[0,i]+1e-12)
			Q.append((logprob,n_nodes))
			new_node = BeamS_Node(_id=n_nodes,
										logprob=logprob,
										loss=loss,
										parent=-1,
										pos_state=pos_state,
										dec_st=st,dec_ct=ct,
										dist=0,
										act_id=i
										)
			nodes[n_nodes]=new_node
			n_nodes+=1

		while len(Q)!=0:	# keep rolling until stop criterion
			new_Q = []
			# use all current elmts in Q
			for prob,curr_node_id in Q:
				curr_node = nodes[curr_node_id]
				# discard long sequences and PAD-ended sequences
				if any([curr_node._dist>max_sequence,
						curr_node._act_id==PAD_decode,
						curr_node._pos_state==-1,
					]):
					continue
				# check if it's terminal
				if curr_node._act_id==STOP: # if it's STOP:
					terminal_nodes.append((prob,curr_node_id))
					continue
				# get next prob dist
				pos_state = move(curr_node._pos_state,curr_node._act_id,_map)
				if pos_state==-1:	# invalid move in current map
					continue
				st,ct,new_prediction,step_loss = run_decoder_step(curr_node._dec_st,curr_node._dec_ct,pos_state,curr_node._dist)
				new_prediction = np.log(new_prediction+1.e-12)

				for i in range(self._num_actions):
					logprob = prob+new_prediction[0,i]
					new_Q.append((logprob,n_nodes))
					new_node = BeamS_Node(_id=n_nodes,
										logprob=logprob,
										loss=step_loss,
										parent=curr_node_id,
										pos_state=pos_state,
										dec_st=st,dec_ct=ct,
										dist=curr_node._dist+1,
										act_id=i
										)
					nodes[n_nodes]=new_node
					n_nodes+=1
			new_Q.sort(reverse=True)
			Q = new_Q[:beam_size]
		#END-WHILE-BEAM_SEARCH
		terminal_nodes.sort(reverse=True)
		pred_actions = []
		node=nodes[terminal_nodes[0][1]]
		pred_end_state=node._pos_state
		loss = node._loss
		idx = node._id
		while idx!=-1:
			node = nodes[idx]
			pred_actions.append(node._act_id)
			idx = node._parent
		pred_actions.reverse()
			
		loss /= len(pred_actions)

		"""
		if self.kk>1890:
			## DEBUG
			print("True seq: %s" % (','.join([actions_str[act] for act in decoder_output])))
			print("Pred seq: %s" % (','.join([actions_str[act] for act in pred_actions])))

			ipdb.set_trace()
		self.kk+=1
		"""
		return loss,int(end_state==pred_end_state)	# for single-sentence
def deploy_manager_sources():
    """Deploys all manager sources from a single archive.
    """
    archive_path = ctx_properties['manager_resources_package']
    archive_checksum_path = \
        ctx_properties['manager_resources_package_checksum_file']
    skip_checksum_validation = ctx_properties['skip_checksum_validation']
    agent_archives_path = utils.AGENT_ARCHIVES_PATH
    utils.mkdir(agent_archives_path)
    if archive_path:
        sources_agents_path = os.path.join(
            utils.CLOUDIFY_SOURCES_PATH, 'agents')
        # this will leave this several hundreds of MBs archive on the
        # manager. should find a way to clean it after all operations
        # were completed and bootstrap succeeded as it is not longer
        # necessary
        utils.mkdir(utils.CLOUDIFY_SOURCES_PATH)
        resource_name = os.path.basename(archive_path)
        destination = os.path.join(utils.CLOUDIFY_SOURCES_PATH, resource_name)

        ctx.logger.info('Downloading manager resources package...')
        resources_archive_path = \
            utils.download_cloudify_resource(
                archive_path, NODE_NAME, destination=destination)
        # This would ideally go under utils.download_cloudify_resource but as
        # of now, we'll only be validating the manager resources package.

        if not skip_checksum_validation:
            ctx.logger.info('Validating checksum...')
            skip_if_failed = False
            if not archive_checksum_path:
                skip_if_failed = True
                archive_checksum_path = archive_path + '.md5'
            md5_name = os.path.basename(archive_checksum_path)
            destination = os.path.join(utils.CLOUDIFY_SOURCES_PATH, md5_name)
            resources_archive_md5_path = utils.download_cloudify_resource(
                archive_checksum_path, NODE_NAME, destination=destination)
            if not utils.validate_md5_checksum(resources_archive_path,
                                               resources_archive_md5_path):
                    if skip_if_failed:
                        ctx.logger.warn('Checksum validation failed. '
                                        'Continuing as no checksum file was '
                                        'explicitly provided.')
                    else:
                        ctx.abort_operation(
                            'Failed to validate checksum for {0}'.format(
                                resources_archive_path))
            else:
                ctx.logger.info('Resources Package downloaded successfully...')
        else:
            ctx.logger.info(
                'Skipping resources package checksum validation...')

        utils.untar(
            resources_archive_path,
            utils.CLOUDIFY_SOURCES_PATH,
            skip_old_files=True)

        def splitext(filename):
            # not using os.path.splitext as it would return .gz instead of
            # .tar.gz
            if filename.endswith('.tar.gz'):
                return '.tar.gz'
            elif filename.endswith('.exe'):
                return '.exe'
            else:
                ctx.abort_operation(
                    'Unknown agent format for {0}. '
                    'Must be either tar.gz or exe'.format(filename))

        def normalize_agent_name(filename):
            # this returns the normalized name of an agent upon which our agent
            # installer retrieves agent packages for installation.
            # e.g. Ubuntu-trusty-agent_3.4.0-m3-b392.tar.gz returns
            # ubuntu-trusty-agent
            return filename.split('_', 1)[0].lower()

        def backup_agent_resources(agents_dir):
            ctx.logger.info('Backing up agents in {0}...'.format(agents_dir))
            if not os.path.isdir(utils.AGENTS_ROLLBACK_PATH):
                utils.mkdir(utils.AGENTS_ROLLBACK_PATH)
                utils.copy(agents_dir, utils.AGENTS_ROLLBACK_PATH)

        def restore_agent_resources(agents_dir):
            ctx.logger.info('Restoring agents in {0}'.format(
                utils.AGENTS_ROLLBACK_PATH))
            if os.path.isdir(agents_dir):
                utils.remove(agents_dir)
            utils.mkdir(agents_dir)
            utils.copy(os.path.join(utils.AGENTS_ROLLBACK_PATH, 'agents', '.'),
                       agents_dir)

        manager_scripts_path = os.path.join(
            utils.MANAGER_RESOURCES_HOME, 'packages', 'scripts')
        manager_templates_path = os.path.join(
            utils.MANAGER_RESOURCES_HOME, 'packages', 'templates')
        if utils.is_upgrade:
            backup_agent_resources(agent_archives_path)
            utils.remove(agent_archives_path)
            utils.mkdir(agent_archives_path)
            utils.remove(manager_scripts_path)
            utils.remove(manager_templates_path)
            ctx.logger.info('Upgrading agents...')
        elif utils.is_rollback:
            ctx.logger.info('Restoring agents...')
            restore_agent_resources(agent_archives_path)

        for agent_file in os.listdir(sources_agents_path):

            agent_id = normalize_agent_name(agent_file)
            agent_extension = splitext(agent_file)
            utils.move(
                os.path.join(sources_agents_path, agent_file),
                os.path.join(agent_archives_path, agent_id + agent_extension))
    def make_output_dirs(self):
        self.output_err = ''
        try:
            self.progress_text = 'Removing old output directory...\n'

            output_dir = utils.path_join(self.output_dir(), self.project_name())
            if os.path.exists(output_dir):
                utils.rmtree(output_dir, ignore_errors=True)

            temp_dir = utils.path_join(TEMP_DIR, 'webexectemp')
            if os.path.exists(temp_dir):
                utils.rmtree(temp_dir, ignore_errors=True)

            self.progress_text = 'Making new directories...\n'

            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            os.makedirs(temp_dir)

            self.copy_files_to_project_folder()

            json_file = utils.path_join(self.project_dir(), 'package.json')

            global_json = utils.get_data_file_path('files/global.json')

            if self.output_package_json:
                with codecs.open(json_file, 'w+', encoding='utf-8') as f:
                    f.write(self.generate_json())


            with codecs.open(global_json, 'w+', encoding='utf-8') as f:
                f.write(self.generate_json(global_json=True))

            zip_file = utils.path_join(temp_dir, self.project_name()+'.nw')

            app_nw_folder = utils.path_join(temp_dir, self.project_name()+'.nwf')

            utils.copytree(self.project_dir(), app_nw_folder,
                           ignore=shutil.ignore_patterns(output_dir))

            zip_files(zip_file, self.project_dir(), exclude_paths=[output_dir])
            for ex_setting in self.settings['export_settings'].values():
                if ex_setting.value:
                    self.progress_text = '\n'
                    name = ex_setting.display_name
                    self.progress_text = u'Making files for {}...'.format(name)
                    export_dest = utils.path_join(output_dir, ex_setting.name)
                    versions = re.findall('(\d+)\.(\d+)\.(\d+)', self.selected_version())[0]

                    minor = int(versions[1])
                    if minor >= 12:
                        export_dest = export_dest.replace('node-webkit', 'nwjs')

                    if os.path.exists(export_dest):
                        utils.rmtree(export_dest, ignore_errors=True)

                    # shutil will make the directory for us
                    utils.copytree(get_data_path('files/'+ex_setting.name),
                                   export_dest,
                                    ignore=shutil.ignore_patterns('place_holder.txt'))
                    utils.rmtree(get_data_path('files/'+ex_setting.name), ignore_errors=True)
                    self.progress_text += '.'

                    if 'mac' in ex_setting.name:
                        uncomp_setting = self.get_setting('uncompressed_folder')
                        uncompressed = uncomp_setting.value
                        app_path = utils.path_join(export_dest,
                                                self.project_name()+'.app')

                        try:
                            utils.move(utils.path_join(export_dest,
                                                     'nwjs.app'),
                                       app_path)
                        except IOError:
                            utils.move(utils.path_join(export_dest,
                                                     'node-webkit.app'),
                                       app_path)

                        plist_path = utils.path_join(app_path, 'Contents', 'Info.plist')

                        plist_dict = plistlib.readPlist(plist_path)

                        plist_dict['CFBundleDisplayName'] = self.project_name()
                        plist_dict['CFBundleName'] = self.project_name()
                        version_setting = self.get_setting('version')
                        plist_dict['CFBundleShortVersionString'] = version_setting.value
                        plist_dict['CFBundleVersion'] = version_setting.value

                        plistlib.writePlist(plist_dict, plist_path)


                        self.progress_text += '.'

                        app_nw_res = utils.path_join(app_path,
                                                  'Contents',
                                                  'Resources',
                                                  'app.nw')

                        if uncompressed:
                            utils.copytree(app_nw_folder, app_nw_res)
                        else:
                            utils.copy(zip_file, app_nw_res)
                        self.create_icns_for_app(utils.path_join(app_path,
                                                              'Contents',
                                                              'Resources',
                                                              'nw.icns'))

                        self.progress_text += '.'
                    else:
                        ext = ''
                        windows = False
                        if 'windows' in ex_setting.name:
                            ext = '.exe'
                            windows = True

                        nw_path = utils.path_join(export_dest,
                                               ex_setting.dest_files[0])

                        if windows:
                            self.replace_icon_in_exe(nw_path)

                        self.compress_nw(nw_path)

                        dest_binary_path = utils.path_join(export_dest,
                                                        self.project_name() +
                                                        ext)
                        if 'linux' in ex_setting.name:
                            self.make_desktop_file(dest_binary_path, export_dest)

                        join_files(dest_binary_path, nw_path, zip_file)

                        sevenfivefive = (stat.S_IRWXU |
                                         stat.S_IRGRP |
                                         stat.S_IXGRP |
                                         stat.S_IROTH |
                                         stat.S_IXOTH)
                        os.chmod(dest_binary_path, sevenfivefive)

                        self.progress_text += '.'

                        if os.path.exists(nw_path):
                            os.remove(nw_path)

        except Exception:
            error = u''.join([unicode(x) for x in traceback.format_exception(sys.exc_info()[0],
                                                                             sys.exc_info()[1],
                                                                             sys.exc_info()[2])])
            self.logger.error(error)
            self.output_err += error
        finally:
            utils.rmtree(temp_dir, ignore_errors=True)
def install_riemann():
    langohr_source_url = ctx_properties['langohr_jar_source_url']
    daemonize_source_url = ctx_properties['daemonize_rpm_source_url']
    riemann_source_url = ctx_properties['riemann_rpm_source_url']
    # Needed for Riemann's config
    cloudify_resources_url = ctx_properties['cloudify_resources_url']
    rabbitmq_username = ctx_properties['rabbitmq_username']
    rabbitmq_password = ctx_properties['rabbitmq_password']

    riemann_config_path = '/etc/riemann'
    riemann_log_path = '/var/log/cloudify/riemann'
    langohr_home = '/opt/lib'
    extra_classpath = '{0}/langohr.jar'.format(langohr_home)

    # Confirm username and password have been supplied for broker before
    # continuing.
    # Components other than logstash and riemann have this handled in code.
    # Note that these are not directly used in this script, but are used by the
    # deployed resources, hence the check here.
    if not rabbitmq_username or not rabbitmq_password:
        ctx.abort_operation(
            'Both rabbitmq_username and rabbitmq_password must be supplied '
            'and at least 1 character long in the manager blueprint inputs.')

    rabbit_props = utils.ctx_factory.get('rabbitmq')
    ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
        utils.get_rabbitmq_endpoint_ip(
                rabbit_props.get('rabbitmq_endpoint_ip'))
    ctx.instance.runtime_properties['rabbitmq_username'] = \
        rabbit_props.get('rabbitmq_username')
    ctx.instance.runtime_properties['rabbitmq_password'] = \
        rabbit_props.get('rabbitmq_password')

    ctx.logger.info('Installing Riemann...')
    utils.set_selinux_permissive()

    utils.copy_notice(RIEMANN_SERVICE_NAME)
    utils.mkdir(riemann_log_path)
    utils.mkdir(langohr_home)
    utils.mkdir(riemann_config_path)
    utils.mkdir('{0}/conf.d'.format(riemann_config_path))

    langohr = utils.download_cloudify_resource(langohr_source_url,
                                               RIEMANN_SERVICE_NAME)
    utils.sudo(['cp', langohr, extra_classpath])
    ctx.logger.info('Applying Langohr permissions...')
    utils.sudo(['chmod', '644', extra_classpath])
    utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME)
    utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME)

    utils.logrotate(RIEMANN_SERVICE_NAME)

    ctx.logger.info('Downloading cloudify-manager Repository...')
    manager_repo = utils.download_cloudify_resource(cloudify_resources_url,
                                                    RIEMANN_SERVICE_NAME)
    ctx.logger.info('Extracting Manager Repository...')
    utils.untar(manager_repo, '/tmp')
    ctx.logger.info('Deploying Riemann manager.config...')
    utils.move(
        '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config',  # NOQA
        '{0}/conf.d/manager.config'.format(riemann_config_path))

    ctx.logger.info('Deploying Riemann conf...')
    utils.deploy_blueprint_resource(
        '{0}/main.clj'.format(CONFIG_PATH),
        '{0}/main.clj'.format(riemann_config_path),
        RIEMANN_SERVICE_NAME)

    # our riemann configuration will (by default) try to read these environment
    # variables. If they don't exist, it will assume
    # that they're found at "localhost"
    # export MANAGEMENT_IP=""
    # export RABBITMQ_HOST=""

    # we inject the management_ip for both of these to Riemann's systemd
    # config.
    # These should be potentially different
    # if the manager and rabbitmq are running on different hosts.
    utils.systemd.configure(RIEMANN_SERVICE_NAME)
    utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
Exemple #57
0
 def execute(self, userdata):
     move('Depth', move.COMMAND, self._target)
     return super(GotoBuoyDepth, self).execute(userdata)
Exemple #58
0
    create_dir("%s/%s" % (config['directory'], target_dir))

    lib_info = call("bower info %s -j --allow-root" % target)
    if not lib_info:
        print '[ERROR] Cannot collect information about library'
        break

    versions = [
        version for version in json.loads(lib_info)['versions']
        if not check_skip(version, config['skipWords'])
    ]

    for version in versions:
        target_directory = "%s/%s/%s" % (config['directory'], target_dir, version)
        if not create_dir(target_directory) and listdir(target_directory):
            print '[DEBUG] Version %s#%s: Already exists' % (target, version)
            continue

        call("bower install %s#%s -j --allow-root --force-latest --production" % (target, version), True)

        tmp_directory = "tmp/%s" % target_dir
        if path.isdir(tmp_directory):
            move(listdir(tmp_directory), tmp_directory, target_directory)
            print '[INFO] Version %s#%s: Downloaded' % (target, version)
            rmtree(tmp_directory)
        else:
            print '[ERROR] Cannot download %s#%s' % (target, version)
            
remove('%s/cdn.description' % config['directory'])
call('tree %(d)s -d -L 2 | grep -v %(d)s | grep -v directories > %(d)s/cdn.description' % {'d': config['directory']})