def validate_account_stake(snapshot, balances, args):
    step('Verifying user account stake')
    invalid_stake = 0
    print_count = 0
    total_accounts = len(balances)

    for account in balances:

        total = asset2int(balances[account][CSV_EOS_BALANCE])

        liquid, net, cpu = get_account_stake(snapshot, account,
                                             args.core_symbol)

        # TODO: validate F(stake) ?
        if total != liquid + cpu + net:
            print_count = print_some(
                print_count,
                '{0} => TOTAL: [{1}] L:[{2}] C:[{3}] N:[{4}]'.format(
                    account, total, liquid, cpu, net))
            invalid_stake += 1

    if invalid_stake > 0:
        print "> %d accounts with invalid stake" % (invalid_stake)
    else:
        success()

    return True
Example #2
0
def news_list(request):
    if request.method == 'GET':
        page_num = request.GET.get('page_num', 1)
        page_size = request.GET.get('page_size', 10)
        news_query = News.objects.filter(status=1).order_by('-create_time').all()
        paginator = Paginator(news_query, int(page_size))
        try:
            news_list = paginator.page(int(page_num))
        except PageNotAnInteger:
            # If page is not an integer, deliver first page.
            news_list = paginator.page(1)
        except EmptyPage:
            news_list = []
        if not news_list:
            return success({})

        ret = []
        for news in news_list:
            news_dict = {
                'id': news.id,
                'title': news.title,
                'content': render_content_html(news.content),
                'img': news.img,
                'status': news.status,
                'create_time': news.create_time,
                'update_time': news.update_time,
            }
            ret.append(news_dict)
        return success(ret)
Example #3
0
def all_pro_info(request):
    if request.method == 'GET':
        page_num = request.GET.get('page_num', 1)
        page_size = request.GET.get('page_size', 10)
        ptype = int(request.GET.get('ptype', 0) or 0)
        params = {
            'status': 1
        }
        if ptype:
            params['ptype'] = ptype
        pros = Projects.objects.filter(**params).order_by('-weight', '-create_time').all()
        paginator = Paginator(pros, int(page_size))
        try:
            projects = paginator.page(int(page_num))
        except PageNotAnInteger:
            # If page is not an integer, deliver first page.
            projects = paginator.page(1)
        except EmptyPage:
            projects = []
        if not projects:
            return success({})
        ret = []
        for proj in projects:
            pro_dict = {
                'id': proj.id,
                'title': proj.title,
                'descr': proj.descr,
                'img': proj.img,
                'ptype': proj.ptype,
                'status': proj.status,
                'create_time': proj.create_time,
                'update_time': proj.update_time,
            }
            ret.append(pro_dict)
        return success(ret)
def validate_EOS_token(snapshot, args):

    step('Verifying EOS token')

    symbol = args.core_symbol
    key = name_to_string(symbol2int(symbol), False)
    sym = str(symbol2int(symbol))

    eos_token = snapshot['tables']['eosio.token'][key]['stat'][sym]['data']

    ok = True
    if asset2int(eos_token['max_supply']) != 100000000000000:
        if ok:
            warning()
            ok = False
        print "> EOS max supply != 10000M (%s)" % eos_token['max_supply']

    tokens = 0
    code = snapshot['tables']['eosio.token']
    for s in code:
        if 'stat' in code[s]: tokens += 1

    if tokens != 1:
        if ok:
            warning()
            ok = False
        print "> more than one token found in eosio.token"

    if ok:
        success()

    return True
Example #5
0
def init():
    """
    Initialize plouf project for this repository, creating plouffile.
    """

    if valid_repo():
        click.confirm('A \"%s\" has been found in this repository, override it?' % plouf_files["config"], abort=True, prompt_suffix='')

    data = {
        'name': click.prompt('project name', default=utils.get_dirname()),
        'description': click.prompt('description', default=''),
        'author': click.prompt('author', default=''),
        'version': click.prompt('version', default='0.1.0')
    }

    click.echo(json.dumps(data, indent=2))
    click.confirm('Is this ok?', default=True, abort=True)
    
    try:
        with open(get_pf_path(), 'w') as pf:
            json.dump(data, pf, indent=4)
        
        utils.success('Initialized empty plouf repository.')

    except Exception as e:
        click.echo(
            click.style(e, fg="red"),
            err=True
        )

    pass
Example #6
0
def queryColumnConfig():
    data = utils.getRequestData()
    dataBase = data.get("database")
    table = data.get("table")
    columns = json.loads(data.get("columns"))
    columns = ('"' + _ + '"' for _ in columns)
    queryColumnStr = ",".join(columns)
    if dataBase == '':
        return utils.success(list())
    table = data.get("table")
    page = int(data.get("page"))
    limit = int(data.get("limit"))
    sql = r"select column_name, character_maximum_length, column_comment, column_comment cn_name" \
          r", case when data_type = 'int' then 'int' when data_type = 'datetime' then 'datetime' else 'string' end as data_type" \
          r", '1:1' layout, 100 column_width, if(column_comment='', 'off', 'on') creat_i18n" \
          r", if(column_name='id', 'on', 'off') is_hidden, if(column_name='id', 'off', 'on') is_query" \
          r", if(column_name='id', 'on', 'off') is_add_hidden, if(column_name='id', 'on', 'off') is_edit_hidden" \
          r", if(column_name='id', 'off', 'on') is_edit, if(column_name='id', 'off', 'on') is_require" \
          r" from information_schema.columns where table_schema =  '%s' and table_name ='%s' and column_name in (%s)" \
          % (dataBase, table, queryColumnStr)
    data = list(utils.queryData(sql))
    for d in data:
        if d['cn_name']:
            d['en_name'] = utils.translateWord(d['cn_name'])
    return utils.success(data, page, limit)
Example #7
0
def cli(question_file, answers_dir, answer_file, test):
    """Given a set of questions and the answers directory with top 100
    documents for each question, generate the answer file
    """
    success('---NLP Project Three: Question Answer---')

    question_filepath = os.path.realpath(question_file)
    answers_dirpath = os.path.realpath(answers_dir)
    answer_filepath = os.path.realpath(answer_file)

    log('Answering: {}\n Using: {}\n Into: {}'.format(question_filepath,
                                                      answers_dirpath,
                                                      answer_filepath))

    if test:
        warn('Testing, not normal execution...')
        _test_endpoint(question_filepath, answers_dirpath, answer_filepath)
        return

    try:
        questions = get_questions(question_filepath)
        if len(questions) is not 232:
            warn('devset has 232 questions (Got {})'.format(len(questions)))

        answers = {}
        for question in questions:
            answers[question.qid] = get_answers(question, answers_dirpath)
        if len(answers) is not len(questions):
            warn('Got {} answers for {} questions'.format(
                len(answers), len(questions)))

        write_answers(answers, answer_filepath)
        success('Wrote answers to {}'.format(answer_filepath))
    except NotImplementedError as e:
        error('TODO: {}'.format(e))
Example #8
0
def get_latest_submission():
    sub = Submission.query.filter_by(user_id=g.user.id).order_by(
        desc(Submission.timestamp)).first()
    if not sub:
        return success(None)

    return success(sub.to_dict())
Example #9
0
def validate_system_accounts(snapshot):

    step('Verifying system accounts')

    found = []
    for name, account in snapshot['accounts'].iteritems():
        tick()
        if name in system_accounts:

            if account['privileged'] != system_accounts[name]['privileged']:
                fail()
                print "> %s account wrong privileged setting" % (name)
                return False

            # Verify resignement
            if name != "eosio.null" and name != "eosio.prods":
                actor = system_accounts[name]['actor']
                permission = system_accounts[name]['permission']
                if authority_controlled_by_one_actor(account['permissions']["owner"], actor, permission) != True or \
                   authority_controlled_by_one_actor(account['permissions']["owner"], actor, permission) != True:
                    fail()
                    print "> %s account NOT PROPERLY RESIGNED" % (name)
                    return False

            found.append(name)

    not_found = set(system_accounts.keys()) - set(found)
    if len(not_found):
        fail()
        print "> missing system accounts %s" % (','.join(not_found))
        return False

    success()
    return True
Example #10
0
def order(input_dir, output, manifest, debug):
    try:
        paths = sanitize_paths(input_dir, output, manifest)
        input_dirpath, output_filepath, manifest_filepath = paths
    except ValueError as e:
        warn('Error: {}. Exiting.'.format(e.message))
        return

    graph_d3_data, manifest_data = process(input_dirpath, debug)

    if os.path.exists(output_filepath):
        warn('Overwriting the existing file at {}'.format(output_filepath))

    if os.path.exists(manifest_filepath):
        warn('Overwriting the existing file at {}'.format(manifest_filepath))

    with open(output_filepath, 'w') as output_file:
        output_file.truncate()
        output_file.write(json.dumps(graph_d3_data, indent=2))

    with open(manifest_filepath, 'w') as manifest_file:
        manifest_file.truncate()
        manifest_file.write(json.dumps(manifest_data, indent=2))

    success('Wrote partial ordering graph of {} to {}'.format(input_dirpath, output_filepath))
    success('Created manifest file at {}'.format(manifest_filepath))
Example #11
0
def copy_sources():
    """
    Creates a copy of the NGAS sources in the target host.
    """

    # We still don't open the git repository to the world, so for the time
    # being we always make a tarball from our repository and copy it over
    # ssh to the remote host, where we expand it back

    nsd = ngas_source_dir()

    # Because this could be happening in parallel in various machines
    # we generate a tmpfile locally, but the target file is the same
    local_file = tempfile.mktemp(".tar.gz")
    create_sources_tarball(local_file)

    # transfer the tar file if not local
    if not is_localhost():
        target_tarfile = '/tmp/ngas_tmp.tar'
        put(local_file, target_tarfile)
    else:
        target_tarfile = local_file

    # unpack the tar file into the ngas_src_dir
    # (mind the "p", to preserve permissions)
    run('mkdir -p {0}'.format(nsd))
    with cd(nsd):
        run('tar xpf {0}'.format(target_tarfile))
        if not is_localhost():
            run('rm {0}'.format(target_tarfile))

    # Cleaning up now
    local('rm {0}'.format(local_file))

    success("NGAS sources copied")
Example #12
0
def validate_genesis(snapshot, genesis):
    step('Validating genesis')
    ddiff = DictDiffer(snapshot['genesis_state'], genesis)
    added = ddiff.added()
    removed = ddiff.removed()
    changed = ddiff.changed()

    if len(added) != 0 or len(removed) != 0 or len(changed) != 0:
        fail()
        if len(added) != 0:
            print "> Snapshot has '%s' and your genesis dont" % (
                ','.join(added))
        if len(removed) != 0:
            print "> Your genesis has %s and snapshot dont" % (
                ','.join(removed))
        if len(changed) != 0:
            print "> Your genesis and snapshot have different '%s'" % (
                ','.join(changed))

        print "# Snapshot genesis"
        print json.dumps(snapshot['genesis_state'], indent=2, sort_keys=True)
        print
        print "# Your genesis"
        print json.dumps(genesis, indent=2, sort_keys=True)
        return False

    success()
    return True
Example #13
0
async def query_dns(domain, nameservers, results, limit, pbar=None):
    random.shuffle(nameservers)
    response = None
    for ns in nameservers[:5] + ['8.8.8.8']:
        async with limit:
            try:
                asyncresolver.nameservers = [ns]
                asyncresolver.timeout = 5
                asyncresolver.lifetime = 5

                response = None
                with trio.move_on_after(5):
                    response = await asyncresolver.resolve(domain, 'A')

                if response:
                    success(f"Found: {domain}")
                    if results is not None:
                        results.append((domain, [ip.to_text() for ip in response]))
                    break
            except resolver.NXDOMAIN:
                break
            except (resolver.NoAnswer, name.EmptyLabel, resolver.NoNameservers) as e:
                # warning(f"Exception for {domain}: {e}, {type(e)}")
                continue
            except dns.exception.Timeout:
                # warning(f"DNS Timeout for domain={domain}, nameserver={ns}")
                # await trio.sleep(1)
                pass

    if pbar:
        pbar.update()
    return response
Example #14
0
def run_all_tests(bsqs, alpha, filtered, verbose):
  n = len(bsqs)
  ibsqs = dict(zip(range(n), bsqs))
  for test in [frequency_test, serial_test, run_test, fourier_transform_test,
               cumsum_test, backward_cumsum_test]:
    print "Running {}".format(test.__name__)
    failed = set()
    for i, bsq in ibsqs.iteritems():
      p_value, reason = test(bsq)
      if p_value < alpha:
        print error("  {0} has failed (bit position={1}, p-value={2})".format(
                                                   test.__name__, i, p_value))
        print "  Reason:\n  {}".format(reason.replace("\n", "\n  "))
        failed.add(i)
      elif verbose:
        print success("  {0} has passed (bit position={1}, p-value={2})".format(
                                                     test.__name__, i, p_value))
    if filtered:
      ibsqs = dict([ (i, bsq) for i, bsq in ibsqs.iteritems() if i not in failed ])
      print "Keeping the following positions for further testing: ", ibsqs.keys()
  if filtered:
    print "Random bits (%d):" % len(ibsqs), ibsqs.keys()
  if len(ibsqs) > 1:
    print "Running correlation_test"
    for bsq1i, bsq2i in itertools.combinations(ibsqs.keys(), 2):
      p_value, reason = phi_coefficient_test(bsqs[bsq1i], bsqs[bsq2i])
      if p_value < alpha:
        print error("  correlation_test has failed (bit positions=({0},{1})," \
                    " p-value={2})".format(bsq1i, bsq2i, p_value))
        print "  Reason:\n  {}".format(reason.replace("\n", "\n  "))
      elif verbose:
        print success("  correlation_test  has passed (bit positions=" \
                      "({0},{1}), p-value={2})".format(bsq1i, bsq2i, p_value))
  else:
    print "Tokens are only one bit long. Not running the correlation tests."
def create_final_image(state):
    """Create docker image from container"""

    puts(blue("Building image"))

    # First need to cleanup container before we stop and commit it.
    # We execute most of the commands via ssh, until we actually remove ssh
    # itself and forcefully remove unnecessary system-level folders
    execute(cleanup_container)
    cont = state.container
    cont.exec_run('yum --assume-yes remove fipscheck fipscheck-lib openssh-server')
    cont.exec_run('rm -rf /var/log')
    cont.exec_run('rm -rf /var/lib/yum')

    conf = {'Cmd': ["/usr/bin/su", "-", "APP", "-c", "/home/APP/APP_rt/bin/ngamsServer -cfg /home/APP/APP/cfg/ngamsServer.conf -autoOnline -force -v 4"]}
    image_repo = docker_image_repository()

    try:
        cont.stop()
        cont.commit(repository=image_repo, tag='latest', conf=conf)
        success("Created Docker image %s:latest" % (image_repo,))
    except Exception as e:
        failure("Failed to build final image: %s" % (str(e)))
        raise
    finally:
        # Cleanup the docker environment from all our temporary stuff
        cont.remove()
Example #16
0
def run(mx, my):
    utils.debug('run called')
    x, y = 0, 0
    trees = 0
    ok = 0
    c = 0

    try:
        while True:
            if c == 0:
                c += 1
                continue
            c += 1
            x += mx
            y += my
            level = _map[y]
            location = x % len(level)
            if level[location] == "#":
                #tree = level[location]
                utils.warn(level, end='\n', text="TREE")
                trees += 1
            else:
                utils.success(level, end='\n', text=" OK ")
                ok += 1
    except IndexError:
        pass
    return trees, ok
Example #17
0
def install_sysv_init_script(nsd, nuser, cfgfile):
    """
    Install the APP init script for an operational deployment.
    The init script is an old System V init system.
    In the presence of a systemd-enabled system we use the update-rc.d tool
    to enable the script as part of systemd (instead of the System V chkconfig
    tool which we use instead). The script is prepared to deal with both tools.
    """

    # Different distros place it in different directories
    # The init script is prepared for both
    opt_file = '/etc/sysconfig/{0}'.format(APP_name())
    if get_linux_flavor() in ('Ubuntu', 'Debian'):
        opt_file = '/etc/default/{0}'.format(APP_name())

    # Script file installation
    sudo('cp {0}/fabfile/init/sysv/{1}-server /etc/init.d/'.format(
        nsd, APP_name()))
    sudo('chmod 755 /etc/init.d/{0}-server'.format(APP_name()))

    # Options file installation and edition
    sudo('cp {0}/fabfile/init/sysv/APP-server.options {1}'.format(
        nsd, opt_file))
    sudo('chmod 644 %s' % (opt_file, ))

    # Enabling init file on boot
    if check_command('update-rc.d'):
        sudo('update-rc.d {0}-server defaults'.format(APP_name()))
    else:
        sudo('chkconfig --add {0}-server'.format(APP_name()))

    success("{0} init script installed".format(APP_name()))
Example #18
0
def install_user_profile():
    """
    Put the activation of the virtualenv into the login profile of the user
    unless the NGAS_DONT_MODIFY_BASHPROFILE environment variable is defined

    NOTE: This will be executed for the user running NGAS.
    """
    if run('echo $NGAS_DONT_MODIFY_BASHPROFILE') or \
       'NGAS_NO_BASH_PROFILE' in env:
        return

    nid = ngas_install_dir()
    nrd = ngas_root_dir()
    with cd("~"):
        if not exists(".bash_profile_orig"):
            run('cp .bash_profile .bash_profile_orig', warn_only=True)
        else:
            run('cp .bash_profile_orig .bash_profile')

        script = ('if [ -f "{0}/bin/activate" ]'.format(nid), 'then',
                  '   source "{0}/bin/activate"'.format(nid), 'fi',
                  'export NGAS_PREFIX="{0}"'.format(nrd))

        run("echo '{0}' >> .bash_profile".format('\n'.join(script)))

    success("~/.bash_profile edited for automatic virtualenv sourcing")
Example #19
0
def prepare_ngas_data_dir():
    """Creates a new NGAS root directory"""

    info('Preparing NGAS root directory')
    nrd = ngas_root_dir()
    tgt_cfg = os.path.join(nrd, 'cfg', 'ngamsServer.conf')
    with cd(ngas_source_dir()):

        cmd = ['./prepare_ngas_root.sh']
        if ngas_overwrite_root():
            cmd.append('-f')
        cmd.append(nrd)
        res = run(' '.join(cmd), quiet=True)
        if res.succeeded:
            success("NGAS data directory ready")
            return tgt_cfg

    # Deal with the errors here
    error = 'NGAS root directory preparation under {0} failed.\n'.format(nrd)
    if res.return_code == 2:
        error = (nrd +
                 " already exists. Specify NGAS_OVERWRITE_ROOT to overwrite, "
                 "or a different NGAS_ROOT_DIR location")
    else:
        error = res
    abort(error)
Example #20
0
def upload(directory):
    """Upload a directory to S3.

    DIRECTORY: Directory to upload. Required.
    """
    if not AWS_BUCKET:
        utils.error('AWS_BUCKET environment variable not set. Exiting.')
        return

    conn = S3Connection()
    bucket = get_or_create_bucket(conn, AWS_BUCKET)

    files = list(utils.get_files(directory))
    total_size = 0

    utils.info('Found', len(files), 'files to upload to s3://' + AWS_BUCKET)

    for path in files:
        filesize = os.path.getsize(path)
        total_size += filesize

        utils.info('Uploading', path, '-', sizeof_fmt(filesize))

        k = Key(bucket)
        k.key = path
        k.set_contents_from_filename(path)

    utils.success('Done. Uploaded', sizeof_fmt(total_size))
Example #21
0
def create_health_check():
    utils.status('Creating health check for load balancer')
    health_check = boto.ec2.elb.HealthCheck(
        interval=10,
        healthy_threshold=2,
        unhealthy_threshold=3,
        target='HTTP:80/health')
    utils.success('Finished creating health check for load balancer')
    return health_check
Example #22
0
def convert_py(input_dir, author, reference):
    input_dirpath = os.path.realpath(os.path.join('..', input_dir))
    num_files = p2_convert_py(
        path_to_dir=input_dirpath,
        author=author,
        primary_reference=reference
    )

    success('Converted {} *.py programs in {} to *.p2'.format(num_files, input_dirpath))
Example #23
0
def provision_logging_instance():
    if is_logger_instance_running():
        return

    utils.status("Deploying logger instance")

    logger_reservation = env.connections.ec2.run_instances(
        image_id=env.logging_ami_id,
        min_count=1,
        max_count=1,
        key_name='%s-%s' % (env.project, env.environment),
        security_groups=['%s' % env.environment],
        user_data=utils.get_logging_user_data(env=env),
        instance_type=env.instance_type,
        instance_profile_name='%s-ec2-%s' % (env.project, env.environment)
    )

    for instance in logger_reservation.instances:

        utils.status("Waiting on logging instance to spin up...")
        instance_status = instance.update()
        while instance_status != 'running':
            time.sleep(5)
            instance_status = instance.update()
            print('Instance status: %s' % instance_status)

        utils.status("Naming instance")
        instance.add_tag('Name', '%s-logger' % env.project)

        utils.status('Getting logging volume')
        volume = get_logging_volume(instance)

        utils.status('Waiting on volume to be available')
        while volume.status != 'available':
            time.sleep(5)
            volume.update()
            print('Volume status: %s' % volume.status)

        utils.status('Attaching volume')
        volume.attach(instance.id, '/dev/sda2')

    utils.status('Linking logs URL')
    route53_zone = env.connections.route53.get_zone(env.zone)
    while not instance.dns_name:
        time.sleep(1)
        instance.update()

    for url in env.logging_urls:
        route53_zone.update_cname(
            name=url,
            value=instance.dns_name,
            ttl=60
        )
    utils.success('Finished provisioning logging instance')
def setup_container():
    """Create and prepare a docker container and let Fabric point at it"""

    from docker.client import DockerClient

    image = 'centos:centos7'
    container_name = 'APP_installation_target'
    cli = DockerClient.from_env(version='auto', timeout=10)

    # Create and start a container using the newly created stage1 image
    cont = cli.containers.run(image=image, name=container_name, remove=False, detach=True, tty=True)
    success("Created container %s from %s" % (container_name, image))

    # Find out container IP, prepare container for APP installation
    try:
        host_ip = cli.api.inspect_container(cont.id)['NetworkSettings']['IPAddress']

        info("Updating and installing OpenSSH server in container")
        cont.exec_run('yum -y update')
        cont.exec_run('yum -y install openssh-server sudo')
        cont.exec_run('yum clean all')

        info('Configuring OpenSSH to allow connections to container')
        add_public_ssh_key(cont)
        cont.exec_run('sed -i "s/#PermitRootLogin yes/PermitRootLogin yes/" /etc/ssh/sshd_config')
        cont.exec_run('sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config')
        cont.exec_run('ssh-keygen -A')
        cont.exec_run('chown root.root /root/.ssh/authorized_keys')
        cont.exec_run('chmod 600 /root/.ssh/authorized_keys')
        cont.exec_run('chmod 700 /root/.ssh')

        info('Starting OpenSSH deamon in container')
        cont.exec_run('/usr/sbin/sshd -D', detach=True)
    except:
        failure("Error while preparing container for APP installation, cleaning up...")
        cont.stop()
        cont.remove()
        raise

    # From now on we connect to root@host_ip using our SSH key
    env.hosts = host_ip
    env.user = '******'
    if 'key_filename' not in env and 'key' not in env:
        env.key_filename = os.path.expanduser("~/.ssh/id_rsa")

    # Make sure we can connect via SSH to the newly started container
    # We disable the known hosts check since docker containers created at
    # different times might end up having the same IP assigned to them, and the
    # ssh known hosts check will fail
    with settings(disable_known_hosts=True):
        execute(check_ssh)

    success('Container successfully setup! APP installation will start now')
    return DockerContainerState(cli, cont)
Example #25
0
def push_config_to_s3():
    utils.status('Pushing %(environment)s config to S3' % env)
    bucket = env.connections.s3.get_bucket(env.s3_bootstrap_bucket)
    for (dirpath, dirname, filenames) in os.walk(env.bootstrap_folder):
        for filename in filenames:
            filepath = os.path.join(dirpath, filename)
            key_name = os.path.join(env.environment, filepath)
            key = bucket.new_key(key_name)
            contents = get_bootstrap_file(filepath)
            key.set_contents_from_string(contents)
            key.set_acl('authenticated-read')
    utils.success('Finished pushing deploy script to S3')
Example #26
0
def create_load_balancer():
    load_balancer = env.connections.elb.create_load_balancer(
        name=env.load_balancer_name,
        zones=env.zones,
        security_groups=utils.security_groups(),
        complex_listeners=[('80', '80', 'http', 'http')]
    )
    utils.success('Finished creating load balancer')

    health_check = create_health_check()
    load_balancer.configure_health_check(health_check=health_check)
    return load_balancer
Example #27
0
def create_filter_file():
    qn_filter = create_question_filter()
    ans_filter = create_answer_filter()
    with open(FILTER_FILEPATH, 'w') as output_file:
        output_file.truncate()
        output_file.write(
            json.dumps({
                'questions': qn_filter,
                'answers': ans_filter
            },
                       indent=2))
    success('Wrote custom filters to {}'.format(FILTER_FILEPATH))
Example #28
0
def pull_so_recent(output_dir, tag, count):
    output_dirpath = os.path.realpath(os.path.join('..', output_dir))
    current_time = datetime.utcnow()
    num_snippets = pull_snippets(
        num_snippets=count,
        start_time=(current_time - timedelta(weeks=1)),
        end_time=current_time,
        extra_tags=list(tag),
        save_to_dir=output_dirpath
    )

    success('Pulled {} snippets from StackOverflow into {}'.format(num_snippets, output_dirpath))
Example #29
0
def upload(file, org, domain, type):
    """Upload a file in Nexus."""
    utils.error("upload not implemented yet")
    """Upload a given file into Nexus"""
    if file is None:
        utils.error('ERROR: you must give a filename')

    print(os.path.abspath(file))
    if not os.path.isfile(file):
        utils.error('ERROR: File doesn' 't exist:' + file)
    else:
        utils.success('File found.')
def load_erc20_snapshot(args):
    step('Loading ETH snapshot')
    with open(args.csv_balance, 'rb') as csvfile:
        balances = {}
        for row in csv.reader(csvfile, delimiter=','):
            tick()
            row = [unicode(i) for i in row]
            row[CSV_EOS_BALANCE] = '{0} {1}'.format(row[CSV_EOS_BALANCE],
                                                    args.core_symbol)
            balances[row[CSV_EOS_ACCOUNT]] = row
    success()
    return balances
Example #31
0
def main():
    """main method"""
    try:
        utils.clear()
        print(color('    ┌──────────────────────────────────────────┐', fg='#b61042'))
        print(color('    │       ', fg='#b61042') +
              color(f'π-hole 5 list tool  v{__version__}', '#FFF') + color('         │', fg='#b61042'))
        print(color('    └──────────────────────────────────────────┘', fg='#b61042'))
        utils.info('    https://github.com/jessedp/pihole5-list-tool\n')
        utils.danger('    Do not hit ENTER or Y if a step seems to hang!')
        utils.danger("    Use CTRL+C if you're sure it's hung and report it.\n")

        db_file = ''
        use_docker = False
        docker = utils.find_docker()

        if docker[0] is True:
            utils.success(f'Found Running Docker config: {docker[1]}')
            use_docker = inquirer.confirm('Use Docker-ized config?', 'n')
            if use_docker:
                db_file = docker[1]

        if not use_docker:
            db_file = inquirer.ask_db()

        list_type = inquirer.ask_list_type()

        if list_type == constants.BLACKLIST:
            process_blacklists(db_file)

        if list_type == constants.WHITELIST:
            process_whitelists(db_file)

        if inquirer.confirm('Update Gravity for immediate effect?'):
            print()
            if use_docker:
                os.system('docker exec pihole bash "/usr/local/bin/pihole" "-g"')
            else:
                os.system('pihole -g')
        else:
            if use_docker:
                utils.info('Update Gravity through the web interface or by running:\n\t' +
                           '# docker exec pihole bash "/usr/local/bin/pihole" "-g"')

            else:
                utils.info(
                    'Update Gravity through the web interface or by running:\n\t# pihole -g')

            utils.info('\n\tBye!')

    except (KeyboardInterrupt, KeyError):
        sys.exit(0)
Example #32
0
def process_blocklists(db_file):
    """ prompt for and process blocklists """
    source = inquirer.ask_blocklist()

    import_list = []

    if source in blockLists:
        url_source = blockLists[source]
        resp = requests.get(url_source["url"])
        import_list = utils.process_lines(resp.text, url_source["comment"])

    if source == constants.FILE:
        fname = inquirer.ask_import_file()
        import_file = open(fname)
        import_list = utils.process_lines(import_file, f"File: {fname}")

    if source == constants.PASTE:
        import_list = inquirer.ask_paste()
        import_list = utils.process_lines(import_list, "Pasted content")

    if len(import_list) == 0:
        utils.die("No valid urls found, try again")

    if not inquirer.confirm(
            f"Add {len(import_list)} block lists to {db_file}?"):
        utils.warn("Nothing changed. Bye!")
        sys.exit(0)

    conn = sqlite3.connect(db_file)
    sqldb = conn.cursor()
    added = 0
    exists = 0
    for item in import_list:
        sqldb.execute("SELECT COUNT(*) FROM adlist WHERE address = ?",
                      (item["url"], ))

        cnt = sqldb.fetchone()

        if cnt[0] > 0:
            exists += 1
        else:
            added += 1
            vals = (item["url"], item["comment"])
            sqldb.execute(
                "INSERT OR IGNORE INTO adlist (address, comment) VALUES (?,?)",
                vals)
            conn.commit()

    sqldb.close()
    conn.close()

    utils.success(f"{added} block lists added! {exists} already existed.")
Example #33
0
def build_ngas():
    """
    Builds and installs NGAS into the target virtualenv.
    """
    with cd(ngas_source_dir()):
        extra_pkgs = extra_python_packages()
        if extra_pkgs:
            virtualenv('pip install %s' % ' '.join(extra_pkgs))
        no_client = ngas_no_client()
        develop = ngas_develop()
        no_doc_dependencies = ngas_doc_dependencies()
        build_cmd = ngas_build_cmd(no_client, develop, no_doc_dependencies)
        virtualenv(build_cmd)
    success("NGAS built and installed")
Example #34
0
def queryColumns():
    data = utils.getRequestData()
    dataBase = data.get("database")
    if dataBase == '':
        return utils.success(list())
    table = data.get("table")
    page = int(data.get("page"))
    limit = int(data.get("limit"))
    sql = r"select column_name, case when is_nullable = 'NO' then '否' else '是' end as is_nullable" \
          r", data_type, character_maximum_length, case when ifnull(extra,'') = '' then '否' else '是' end as extra" \
          r", column_comment" \
          r" from information_schema.columns where table_schema =  '%s' and table_name ='%s' " \
          % (dataBase, table)
    return utils.success(list(utils.queryData(sql)), page, limit)
Example #35
0
def process(sources, output, force):
    """Download sources and process the file to the output directory.

    \b
    SOURCES: Source JSON file or directory of files. Required.
    OUTPUT: Destination directory for generated data. Required.
    """
    for path in utils.get_files(sources):
        pathparts = utils.get_path_parts(path)
        pathparts[0] = output.strip(os.sep)
        pathparts[-1] = pathparts[-1].replace('.json', '.geojson')

        outdir = os.sep.join(pathparts[:-1])
        outfile = os.sep.join(pathparts)

        source = utils.read_json(path)
        urlfile = urlparse(source['url']).path.split('/')[-1]

        if not hasattr(adapters, source['filetype']):
            utils.error('Unknown filetype', source['filetype'], '\n')
            continue

        if os.path.isfile(outfile) and not force:
            utils.error('Skipping', path, 'since generated file exists.',
                        'Use --force to regenerate.', '\n')
            continue

        utils.info('Downloading', source['url'])

        try:
            fp = utils.download(source['url'])
        except IOError:
            utils.error('Failed to download', source['url'], '\n')
            continue

        utils.info('Reading', urlfile)

        try:
            geojson = getattr(adapters, source['filetype']).read(fp, source['properties'])
        except IOError:
            utils.error('Failed to read', urlfile)
            continue
        finally:
            os.remove(fp.name)

        utils.make_sure_path_exists(outdir)
        utils.write_json(outfile, geojson)

        utils.success('Done. Processed to', outfile, '\n')
Example #36
0
def build_APP():
    """
    Builds and installs APP into the target virtualenv.
    """
    with cd(APP_source_dir()):
        extra_pkgs = extra_python_packages()
        if extra_pkgs:
            virtualenv('pip install %s' % ' '.join(extra_pkgs))
        develop = False
        no_doc_dependencies = APP_doc_dependencies()
        build_cmd = APP_build_cmd(False, develop, no_doc_dependencies)
        print build_cmd
        if build_cmd != '':
            virtualenv(build_cmd)
    success("{0} built and installed".format(APP))
def validate_memory(snapshot):

    step('Verifying 64Gb max ram size')

    table = snapshot['tables']['eosio']['eosio']['global']
    params = table[table.keys()[0]]['data']

    max_ram_size = int(params["max_ram_size"]) >> 30
    if max_ram_size != 64:
        warning()
        print "> Max ram size != 64Gb : (%d)" % max_ram_size
    else:
        success()

    return True
Example #38
0
def get_week_rank():
    now = datetime.now()
    zero_time = get_day_zero_time(now)
    start = zero_time - timedelta(days=zero_time.weekday())  # 本周起始时间
    start_str = '{0:%Y-%m-%d %H:%M:%S}'.format(start)
    now_str = '{0:%Y-%m-%d %H:%M:%S}'.format(now)

    sql = """
    select u.id, u.username, u.avatar_url, count(*) from 
        submissions s left join users u on u.id = s.user_id 
            where s.timestamp > '{}' and s.timestamp < '{}' and s.result = 0 group by s.user_id;
    """.format(start_str, now_str)

    cursor = db.session.execute(sql)
    result_set = cursor.fetchall()

    result = []
    for row in result_set:
        result.append({
            'id': row[0],
            'username': row[1],
            'avatar_url': row[2],
            'count': row[3]
        })

    if len(result) < 3:
        for i in range(3 - len(result)):
            result.append({
                'id': None,
                'username': '******',
                'avatar_url': None,
                'count': 0
            })
    return success(result)
def create_scaling_down_alarm(scale_down_policy, autoscaling_group):
    utils.status('Creating scaling down alarm...')
    name = '%s-%s-scale-down-alarm' % (env.project, env.environment)
    scale_down_alarm = boto.ec2.cloudwatch.MetricAlarm(
        name=name,
        namespace=env.cw_namespace,
        metric=env.cw_metric,
        statistic=env.cw_statistic,
        comparison=env.cw_comparison_lt,
        threshold=env.cw_threshold_down,
        period=env.cw_period,
        evaluation_periods=env.cw_evaluation_periods,
        alarm_actions=[scale_down_policy.policy_arn],
        dimensions={'AutoScalingGroupName': autoscaling_group.name})
    env.connections.cloudwatch.create_alarm(scale_down_alarm)
    utils.success('Finished creating scaling down alarm.')
def create_scaling_up_policy(autoscaling_group):
    utils.status('Creating scaling up policy...')
    name = '%s-%s-scale-up' % (env.project, env.environment)
    scale_up_policy = boto.ec2.autoscale.ScalingPolicy(
        name=name,
        adjustment_type='ChangeInCapacity',
        as_name=autoscaling_group.name,
        scaling_adjustment=env.asg_adjustment_up,
        cooldown=env.asg_default_cooldown
    )
    env.connections.autoscale.create_scaling_policy(scale_up_policy)

    # We need to hit the API for the created policy to get it's new ARN
    scale_up_policy = env.connections.autoscale.get_all_policies(
        as_group=autoscaling_group.name,
        policy_names=[name])[0]
    utils.success('Finished creating scaling up policy.')
    return scale_up_policy
Example #41
0
 def get(self):
     try:
         players = [utils.to_dict(player) for player in NFL_Player_2015_M.query.all()]
     except:
         abort(400, response={
             'status': 400,
             'message': 'Players not found'
         })
     else:
         return utils.success(players)
Example #42
0
 def get(self, pos):
     m = model_map[pos]
     try:
         totals = [utils.to_dict(m) for m in m.query.filter_by(is_season_totals=True).all()]
     except:
         abort(400, response={
             'status': 400,
             'message': 'Season totals not found'
         })
     else:
         return utils.success(totals)
Example #43
0
def tag(load_balancer, tags):
    """
    We fall back to using the AWS CLI tool here because boto doesn't
    support adding tags to load balancers yet.

    As soon as https://github.com/boto/boto/issues/2549 is merged we're good
    to change this to use boto
    """
    utils.status('Tagging load balancer')
    tags = make_tags(tags=tags)
    local('aws elb add-tags '
          '--load-balancer-names {lb_name} '
          '--tags {tags} '
          '--region={region} '
          '--profile={profile_name}'.format(lb_name=load_balancer.name,
                                            tags=tags,
                                            region=env.region,
                                            profile_name=env.profile_name)
          )

    utils.success('Finished tagging load balancer')
Example #44
0
def classify(f1, f2, encoding, verbose):
  # Read tokens
  print "Reading tokens"
  tks1 = f1.read().splitlines()
  tks2 = f2.read().splitlines()
  reader = read_binaries if encoding else read_characters
  dtks1, dtks2, cs = reader(tks1, tks2, encoding)
  print "Size of samples:", len(dtks1), "and", len(dtks2)

  # Build features from both sets
  print "Building features"
  feature_builder = build_binary_features if encoding else build_character_features
  f1,f_type1 = feature_builder(dtks1, cs)
  f2,f_type2 = feature_builder(dtks2, cs)
  assert len(f_type1) == len(f_type2)
  X = np.concatenate((f1, f2))
  print X.shape[1], "features have been generated"
  print "Dropping empty features"
  masked_features = mask_features(X)
  X = np.delete(X, masked_features, 1)
  f_type = np.delete(np.array(f_type1), masked_features)
  print X.shape[1], "features have been kept"
  target = np.concatenate([np.zeros(len(f1)), np.ones(len(f2))])

  # Running Chi2
  #print u"Running features selection via \u03c7\u00b2"
  #c2, pval = chi2(X, target)
  #print list(sorted(pval))
  #for i, pv in enumerate(pval):
  #  if pv < 0.001:
  #    print pv, f_type[i]

  # Cross validate (learn & test)
  print "Cross-validating the model"
  logistic = linear_model.LogisticRegression()
  scores = cross_validation.cross_val_score(logistic, X, target, cv=5)
  acc = scores.mean()
  if acc > 0.9:
    print(success("Accuracy: %0.2f (+/- %0.2f)" % (acc, scores.std() * 2)))
  else:
    print(error("Accuracy: %0.2f (+/- %0.2f)" % (acc, scores.std() * 2)))

  logistic.fit(X, target)
  ordered_coef = sorted(enumerate(logistic.coef_[0]), key=operator.itemgetter(1))
  if verbose:
    for i, c in ordered_coef:
      print c, f_type[i]
  else:
    for i, c in ordered_coef[:5]:
      print c, f_type[i]
    print "..."
    for i, c in ordered_coef[-5:]:
      print c, f_type[i]
Example #45
0
 def get(self, player_id):
     try:
         row = NFL_Player_2015_M.query.filter(NFL_Player_2015_M.id == player_id).all()
         player = utils.to_dict(row)
         game_model = pos_map[player['position']]
         games = game_model.query.filter(game_model.player_name == player['name']).all()
     except:
         abort(400, response={
             'status': 400,
             'message': 'Player not found'
         })
     else:
         return utils.success(games)
def assign_elastic_ip_addresses(autoscaling_group):
    utils.status("Waiting on the new load balancer to get instances")
    while not autoscaling_group.instances:
        time.sleep(1)
        autoscaling_group = get(asg_type='QA')
    addresses = env.connections.ec2.get_all_addresses()

    free_addresses = filter(lambda x: x.instance_id is None, addresses)
    utils.success("Got the following addresses: %s" % addresses)

    for index, instance in enumerate(autoscaling_group.instances):
        utils.status("Waiting on instances to spin up...")
        instance_obj = ec2.get(instance_id=instance.instance_id)
        while instance_obj.state != 'running':
            time.sleep(1)
            instance_obj = ec2.get(instance_id=instance.instance_id)
            print('Instance status: %s' % instance_obj.state)
        address = free_addresses.pop(index)
        env.connections.ec2.associate_address(
            instance.instance_id, address.public_ip)
        utils.status(
            "Assigned %s to %s" % (address.public_ip, instance.instance_id))
Example #47
0
def run_all_tests(csqs, alpha, verbose):
  for test in [global_freq_test, ]:
    print "Running {}".format(test.__name__)
    p_value, reason = test(csqs, verbose)
    if p_value < alpha:
      print error("  {0} has failed (p-value={1})".format(test.__name__, p_value))
      print "  Reason:\n  {}".format(reason.replace("\n","\n  "))
    elif verbose:
      print success("  {0} has passed (p-value={1})".format(test.__name__, p_value))
  for test in [ freq_test, serial_test_nonoverlap, ]:
    for i,csq in enumerate(csqs):
      print "Running {} at position {}".format(test.__name__, i)
      lcs = alphabet(csq)
      if verbose:
        print "  Local Character Set:", "".join(sorted(lcs))
      validate_charset(lcs, verbose)
      p_value, reason = test(csq, lcs, verbose)
      if p_value < alpha:
        print error("  {0} has failed (character position={1}, p-value={2})".format(
                                                            test.__name__, i, p_value))
        print "  Reason:\n  {}".format(reason.replace("\n", "\n  "))
      elif verbose:
        print success("  {0} has passed (character position={1}, p-value={2})".format(
                                                            test.__name__, i, p_value))
Example #48
0
def get_project_info(request):
    if request.method == 'GET':
        pid = request.GET.get('pid', 1)
        proj = Projects.objects.filter(id=int(pid), status=1).first()
        pro_dict = {
            'id': proj.id,
            'title': proj.title,
            'descr': render_content_html(proj.descr),
            'img': proj.img,
            'ptype': proj.ptype,
            'status': proj.status,
            'create_time': proj.create_time,
            'update_time': proj.update_time,
        }
        return success(pro_dict)
Example #49
0
def author_list(request):
    if request.method == 'GET':
        authors = Author.objects.filter(status=1).all()
        ret = []
        for author in authors:
            author_dict = {
                'id': author.id,
                'nickname': author.nickname,
                'english_name': author.english_name,
                'avatar': author.avatar,
                'descr': render_content_html(author.descr),
                'author_type': author.author_type,
                'author_title': '设计师' if author.author_type == 0 else '客户主管',
                'author_eng_title': 'Design' if author.author_type == 0 else 'Client Master',
            }
            ret.append(author_dict)
        return success(ret)
Example #50
0
def banner_list(request):
    if request.method == 'GET':
        params = {
            'status': 1
        }
        banners = Banners.objects.filter(**params).order_by('-weight', '-create_time').all()

        ret = []
        for b in banners:
            b_dict = {
                'id': b.id,
                'img': b.img,
                'link': b.link or '#',
                'status': b.status,
            }
            ret.append(b_dict)
        return success(ret)
Example #51
0
def classify(f1, f2, verbose):
  # Read tokens
  print "Reading tokens"
  tks1 = f1.read().splitlines()
  tks2 = f2.read().splitlines()
  cs = list(alphabet(tks1 + tks2))
  print "Alphabet contains", len(cs), "characters:", "".join(sorted(cs))

  # Build features from both sets
  print "Building features"
  f1,f_type1 = build_features(tks1, cs)
  f2,f_type2 = build_features(tks2, cs)
  assert len(f_type1) == len(f_type2)
  print len(f_type1), "features have been generated"
  target = [0,] * len(f1) + [1,] * len(f2)

  #print f_type1
  #print f1[:2]
  #print f2[:2]
  #print target[:2]

  # Cross validate (learn & test)
  print "Cross-validating the model" 
  X = f1 + f2
  logistic = linear_model.LogisticRegression()
  scores = cross_validation.cross_val_score(logistic, X, np.array(target), cv=5)  
  acc = scores.mean()
  if acc > 0.9:
    print(success("Accuracy: %0.2f (+/- %0.2f)" % (acc, scores.std() * 2)))
  else:
    print(error("Accuracy: %0.2f (+/- %0.2f)" % (acc, scores.std() * 2)))

  logistic.fit(X, target)
  ordered_coef = sorted(enumerate(logistic.coef_[0]), key=operator.itemgetter(1))
  if verbose:
    for i, c in ordered_coef:
      print c, f_type1[i]
  else:
    for i, c in ordered_coef[:5]:
      print c, f_type1[i]
    print "..."
    for i, c in ordered_coef[-5:]:
      print c, f_type1[i]
Example #52
0
                properties['source_url'] = source['url']
                properties['feature_count'] = len(geojson['features'])
                
                geojson['properties'] = properties
    
                utils.make_sure_path_exists(outdir)
                utils.write_json(outfile, geojson)

                utils.info("Generating label points")
                label_geojson = geoutils.get_label_points(geojson)
                label_pathparts = list(pathparts)
                label_pathparts[-1] = label_pathparts[-1].replace('.geojson', '.labels.geojson')
                label_path = os.sep.join(label_pathparts)
                utils.write_json(label_path, label_geojson)

                utils.success('Done. Processed to', outfile, '\n')
    
            properties['path'] = "/".join(pathparts[path_parts_to_skip:])
            catalog_entry = {
                'type': 'Feature',
                'properties': properties,
                'geometry': geoutils.get_union(geojson)
            }
            catalog_features.append(catalog_entry)
        except Exception, e:
            traceback.print_exc(e)
            failures.append(path)
            utils.error(str(e))
            utils.error("Error processing file " + path + "\n")
            success = False
Example #53
0
    def run(self):
        testModuleDirs = filter(os.path.isdir, os.listdir('.'))
        unitTestPassed = True
        # module tests
        for testModuleDir in testModuleDirs:

            isTesting = False
            for segment in self.segments:
                if re.match(segment + "_*", testModuleDir):
                    isTesting = True
                    break

            if not isTesting:
                continue

            testModuleFilter = "^" + self.filterRegex.split("/")[0]
            # skip if filter regex not match
            if not re.match(testModuleFilter, testModuleDir):
                continue
            print ""
            sys.stdout.flush()
            print "<",testModuleDir
            sys.stdout.flush()
            testDirs = next(os.walk(testModuleDir))[1]

            moduleTestsPassed = True
            # test cases
            for testDir in sorted(testDirs):
                if not re.match(self.filterRegex, testModuleDir+"/"+testDir):
                    continue
                if "ignore" in testDir:
                    print "  +", utils.info(testDir), utils.warning("IGNORE")
                    sys.stdout.flush()
                    continue

                print "  +", utils.info(testDir), ":",
                sys.stdout.flush()
                testPartFiles = sorted(os.listdir(testModuleDir + '/' + testDir))
                # print testPartFiles,
                singleTestPassed = True
                # test parts
                for testPartFile in sorted(testPartFiles):
                    # print "   +", testPartFile,
                    testPartPassed = self.runSingleTestPart(testModuleDir + '/' + testDir + '/' + testPartFile)
                    if(testPartPassed):
                        print testPartFile,
                        sys.stdout.flush()
                        pass
                    else:
                        singleTestPassed = False
                        print utils.failure(testPartFile) ,
                        sys.stdout.flush()
                        # dont break, we need to clean after test

                if(singleTestPassed):
                    print utils.success("OK")
                    sys.stdout.flush()
                else:
                    print utils.failure("FAIL")
                    sys.stdout.flush()
                    moduleTestsPassed = False
                    if(self.stopOnFail):
                        break

            if(moduleTestsPassed):
                print utils.success("> module "+ testModuleDir +" OK")
                sys.stdout.flush()
            else:
                print utils.failure("> module "+ testModuleDir +" FAIL")
                sys.stdout.flush()
                unitTestPassed = False
                if(self.stopOnFail):
                  break

        if(unitTestPassed):
            print utils.success("Tests OK")
            sys.stdout.flush()
            return SUCCESS
        else:
            print utils.failure("Tests FAIL")
            sys.stdout.flush()
            unitTestPassed = False
            return ERROR_TEST_FAIL

        return SUCCESS
def delete_launch_config(autoscaling_group):
    utils.status('Deleting launch config')
    launch_config = env.connections.autoscale.get_all_launch_configurations(
        names=[autoscaling_group.launch_config_name])[0]
    launch_config.delete()
    utils.success('Launch config deleted')
Example #55
0
# published by the Free Software Foundation. You should have received
# a copy of the GNU General Public License along with this program.
# If not, see <http://www.gnu.org/licenses/>.
"""

import json
from os import path

from jsonschema import validate, ValidationError
from jsonschema.exceptions import SchemaError as SchemaError

from utils import error, success

DB_FILE = path.join(path.dirname(path.abspath(__file__)), "../data.json")
SCHEMA_FILE = path.join(path.dirname(path.abspath(__file__)), 'schema.json')

with open(SCHEMA_FILE, 'r') as schema_obj:
    SCHEMA = json.load(schema_obj)

has_errors = False
with open(DB_FILE, 'r') as db_obj:
    try:
        validate(json.load(db_obj), SCHEMA)
    except (ValidationError, ValueError, SchemaError) as error:
        has_errors = True
        error("Invalid database")
        error("{}".format(error))
    else:
        success("The database is valid")
exit(int(has_errors))