Ejemplo n.º 1
0
def compile_by_domain_and_scope(domain, scope):
    common.print_verbose("Compiling " + scope)
    compile.compile(
        "domains/" + domain + "/src/" + scope + "/java",
        "target/domains/" + scope + "/" + domain,
        dependencies.classpath_for(domain, scope),
    )
Ejemplo n.º 2
0
def add_to_classpath(cp, to_add):
    if cp == "":
        cp = ":".join(to_add)
    else:
        cp = cp + ":" + ":".join(to_add)
    common.print_verbose("New classpath: " + cp)
    return cp
Ejemplo n.º 3
0
def main(argv):

    # Parse arguments
    args = parse_args(argv)
    set_verbose_level(args.verbose)

    print_verbose("Args: %s" % str(args), 1)

    # Adapted from
    # https://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/00-classification.ipynb
    np.set_printoptions(threshold=np.nan)

    caffe.set_mode_cpu()
    net = caffe.Net(args.proto, args.model, caffe.TEST)

    # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_mean('data', VGG_MEAN_PIXEL)  # mean pixel
    transformer.set_raw_scale(
        'data', 255
    )  # the reference model operates on images in [0,255] range instead of [0,1]
    transformer.set_channel_swap(
        'data',
        (2, 1,
         0))  # the reference model has channels in BGR order instead of RGB

    # Read image names
    with open(args.list) as f:
        allnames = f.read().splitlines()

    for sub in xrange(0, len(allnames), CAFFE_BATCH_SIZE):
        fnames = allnames[sub:sub + CAFFE_BATCH_SIZE]

        # Reshape input data
        print net.blobs['data'].data.shape
        net.blobs['data'].reshape(len(fnames), *net.blobs['data'].shape[1:])
        print net.blobs['data'].data.shape

        # Preprocess images
        for idx, fname in enumerate(fnames):
            fpath = os.path.join(args.input, fname)
            print "Processing image %s ..." % fpath
            img = transformer.preprocess('data', caffe.io.load_image(fpath))
            net.blobs['data'].data[idx] = img

        # Extract features
        print "Extracting features ..."
        out = net.forward()

        # Write extracted features
        for idx, fname in enumerate(fnames):
            path = os.path.join(args.output, os.path.dirname(fname))
            if not os.path.exists(path):
                os.makedirs(path)
            fpath = os.path.join(args.output, fname + ".feat")
            print "Writing features to %s ..." % fpath
            np.savetxt(fpath, net.blobs['fc7'].data[idx])

    print "Done!"
Ejemplo n.º 4
0
def resize_image(page):

    # Global params
    global gb_idx_pageid
    global gb_idx_img_url
    global gb_idx_artist
    global gb_idx_realheight
    global gb_idx_realwidth
    global gb_density
    global gb_orig_dir
    global gb_dest_dir

    # Parse values
    pageid = page[gb_idx_pageid]
    img_url = page[gb_idx_img_url]
    artist = page[gb_idx_artist]
    realheight = float(page[gb_idx_realheight])
    realwidth = float(page[gb_idx_realwidth])

    # Parse paths
    orig_path, dest_path = parse_entry_paths(gb_orig_dir, gb_dest_dir, pageid, img_url, artist)

    # Parse dimensions
    pixelheight, pixelwidth = parse_entry_sizes(gb_density, realheight, realwidth)

    # Call convert to resize image
    convert_resize(orig_path, dest_path, pixelheight, pixelwidth)

    # Call convert to update density metadata
    convert_density(dest_path, realheight, realwidth)

    print_verbose('Done processing pageid %s' % pageid, 0)
Ejemplo n.º 5
0
    def get_vm_list(self, filter_params={}):
        p_instance = filter_params.get("instance", "")
        self.header_keys = ['region', 'name', 'type']
        self.header_titles = ['Region', 'Name', 'Instance Type']
        vms = []
        for avm in self.client.virtual_machines.list_all():
            vm = AzureVM(avm)
            if p_instance > "":
                if p_instance != vm.name:
                    continue
                else:
                    dm = self.client.virtual_machines.get(
                        vm.resource_group, vm.name, expand="instanceView")
                    vm.set_extra_info(dm.__dict__)
                    for n in dm.network_profile.network_interfaces:
                        name = " ".join(n.id.split('/')[-1:])
                        sub = "".join(n.id.split('/')[4])
                        ips = self.networkClient.network_interfaces.get(
                            sub, name)
                        vm.set_extra_info(ips.__dict__)
                        if vm.public_ip_address is None:
                            ip_reference = ips.ip_configurations[
                                0].public_ip_address.id.split('/')
                            public_ip = self.networkClient.public_ip_addresses.get(
                                ip_reference[4], ip_reference[8])
                            vm.public_ip_address = public_ip.ip_address

            if self.verbose:
                print_verbose(vm.__dict__, p_type="vm")
            vms.append(vm.__dict__)
        return vms
Ejemplo n.º 6
0
def download_image(img_path, img_url):

    # Fetch URL
    url = urllib2.urlopen(img_url)
    meta = url.info()
    file_size = int(meta.getheaders("Content-Length")[0])
    print_verbose("Downloading image %s (%s)" % (url.geturl(),
        size(file_size, system=alternative)), 0)

    # Set progress bar
    widgets = ['Progress: ', Percentage(), ' ', Bar(),
            ' ', AdaptiveETA(), ' ', AdaptiveTransferSpeed()]
    pbar = ProgressBar(widgets=widgets, maxval=file_size).start()

    # Download
    f = open(img_path, 'wb')
    file_size_dl = 0
    block_sz = 1024 * 8

    while True:
        buff = url.read(block_sz)
        if not buff:
            break

        file_size_dl += len(buff)
        f.write(buff)
        pbar.update(file_size_dl)

    # Done
    f.close()
    pbar.finish()
    return url.getcode()
Ejemplo n.º 7
0
  def __init__(self, dependencies_file_contents):
    self.dependencies_file_contents = dependencies_file_contents
    self.dependencies = dict()
    dependencies_yaml = yaml.load(self.dependencies_file_contents)
    if not dependencies_yaml:
      common.print_verbose("No dependencies found")
      return

    for stage in dependencies_yaml.items():
      stage_name_from_yaml = stage[0]

      # No stages defined
      if not stage_name_from_yaml in self.dependencies:
        self.dependencies[stage_name_from_yaml] = list()

      # Stage is defined but not dependencies listed for the stage
      stage_dependencies_from_yaml = stage[1] if stage[1] else dict()

      for dependency in stage_dependencies_from_yaml.items():
        dep_name = dependency[0]
        dep_version = dependency[1]["version"]
        dep_installer = dependency[1]["installer"]
        dep_details = dict(dependency[1])
        del dep_details["version"]
        del dep_details["installer"]
        self.dependencies[stage_name_from_yaml].append(\
          Dependency(name=dependency[0], version=dep_version, installer=dep_installer, details=dep_details))

    return
Ejemplo n.º 8
0
  def run(self):
    common.print_verbose("Running " + self.name + " action")

    self.deps = self.load_dependencies(self.dependencies_file_location)

    for dep in self.deps.dependencies_for("default"):
      dep.install()

    return 0, ""
Ejemplo n.º 9
0
def convert_resize(orig_path, dest_path, pixelheight, pixelwidth):
    # Prepare command
    cmd = ["convert", orig_path, "-resize",
                  "%dx%d^" % (pixelwidth, pixelheight), dest_path]
    if get_verbose_level() >= 5:
        cmd.insert(1, "-verbose")

    # Run
    print_verbose("Running command: " + list2cmdline(cmd), 3)
    return check_call(cmd)
Ejemplo n.º 10
0
def main(argv):

    # Parse arguments
    args = parse_args(argv)
    set_verbose_level(args.verbose)

    print_verbose("Args: %s" % str(args), 1)

    # Resize images
    resize_from_csv(args.csv, args.original, args.resized, args.density)
Ejemplo n.º 11
0
def main(argv):

    # Parse arguments
    args = parse_args(argv)
    set_verbose_level(args.verbose)

    print_verbose("Args: %s" % str(args), 1)

    # Download images
    download_from_csv(args.csv, args.directory)
Ejemplo n.º 12
0
def main(argv):

    # Parse arguments
    args = parse_args(argv)
    set_verbose_level(args.verbose)

    print_verbose("Args: %s" % str(args), 1)

    # Crawl URL
    crawl(args.url)
Ejemplo n.º 13
0
  def run(self):
    common.print_verbose("Running " + self.name + " action")

    with open(self.actions_file_location) as f:
      actions_file_contents = f.read()
      for stage in yaml.load(actions_file_contents).items():
        for an_action in stage[1]:
          common.stop_if_failed(*self.action_for(an_action).run())
    f.closed
    return 0, ""
Ejemplo n.º 14
0
  def run(self):
    common.print_verbose("Running " + self.name + " action")

    files_to_copy = glob.glob(MetaModel(common.meta_model()).template_for_action(self.name) + "/*", recursive=False)
    command_to_run = ['/bin/cp', "-R", *files_to_copy, '.']

    common.run_command(command_to_run)
    common.print_raw("Initialized new Java 9 application.")

    return 0, ""
Ejemplo n.º 15
0
def parse_class(filename):
    if filename.startswith(VG_PREFIX):
        cl = VG_CLASS
    elif filename.startswith(NVG_PREFIX):
        cl = NVG_CLASS
    else:
        raise ValueError('Invalid class prefix.'
                         'Valid prefixes are "%s" and "%s"' % (VG_PREFIX, NVG_PREFIX))

    print_verbose('File %s class: %d' % (filename, cl), 4)
    return cl
Ejemplo n.º 16
0
def agg_pred_dist_far(pred, classes):
    print_verbose('Aggregating by using the farthest point class ...', 0)

    arr_pos = pred[pred >= 0]
    max_pos = np.max(arr_pos) if (arr_pos.size > 0) else 0

    arr_neg = pred[pred <= 0]
    max_neg = np.abs(np.min(arr_neg)) if (arr_neg.size > 0) else 0

    cl = classes[1] if (max_pos > max_neg) else classes[0]
    return cl
Ejemplo n.º 17
0
def agg_pred_dist_mediangroup(pred, classes):
    print_verbose('Aggregating by comparing distance groups medians ...', 0)

    arr_pos = pred[pred >= 0]
    med_pos = np.median(arr_pos) if (arr_pos.size > 0) else 0

    arr_neg = pred[pred <= 0]
    med_neg = np.abs(np.median(arr_neg)) if (arr_neg.size > 0) else 0

    cl = classes[1] if (med_pos > med_neg) else classes[0]
    return cl
Ejemplo n.º 18
0
def main(argv):

    # Parse arguments
    args = parse_args(argv)
    set_verbose_level(args.verbose)
    set_n_cores(args.cores)

    print_verbose("Args: %s" % str(args), 1)

    # Extract patches
    patch_extract(args.image, args.window, args.step, args.dir)
Ejemplo n.º 19
0
def setup(topo, defaults):
    common.null_to_string(topo.addressing)
    addrs = setup_pools(defaults.addressing + topo.addressing, defaults)

    common.print_verbose("Addressing\n=================")
    common.print_verbose(addrs.to_yaml())

    validate_pools(addrs)
    common.exit_on_error()

    topo.pools = create_pool_generators(addrs)
    topo.addressing = addrs
    common.exit_on_error()
Ejemplo n.º 20
0
def setup(topo, defaults={}):
    addrs = topo.get('addressing', {})
    addrs = common.merge_defaults(addrs, defaults.get('addressing'))
    addrs = setup_pools(addrs, defaults)
    topo['addressing'] = addrs

    common.print_verbose("Addressing\n=================")
    common.print_verbose(yaml.dump(addrs))

    validate_pools(addrs)
    common.exit_on_error()

    topo['pools'] = create_pool_generators(addrs)
    common.exit_on_error()
Ejemplo n.º 21
0
  def run(self):
    common.print_verbose("Running " + self.name + " action")

    cp_string = ""
    if self.classpath != "":
      cp_string = " -cp " + self.classpath + " "

    common.run_command_in_shell('rm -rf ' + self.target_dir)
    common.run_command_in_shell('mkdir -p ' + self.target_dir)
    common.run_command_in_shell('find ' + self.source_dir + \
                                ' -type f -name "*.java" -print | xargs javac ' + \
                                cp_string + \
                                ' -d ' + self.target_dir + ' -sourcepath ' + self.source_dir)
    return 0, ""
Ejemplo n.º 22
0
 def run(self):
   common.print_verbose("Running " + self.name + " action")
   exit_code = 0
   for test_dir in glob.iglob('**/test', recursive=True):
     original_working_directory = os.getcwd()
 
     run_directory = os.path.join(original_working_directory, str(test_dir))
     common.print_info("Running tests in " + str(run_directory))
     common.print_verbose("Changing directory to " + str(run_directory))
     os.chdir(run_directory)
 
     tests = []
     for filename in glob.iglob('**/*.py', recursive=True):
         tests.append(filename)
     command = ['/usr/local/bin/python3', '-m', 'unittest']
     command.extend(tests)
     subprocess_exit_code, output = common.run_command(command)
     if subprocess_exit_code != common.SUCCESS:
       exit_code = common.FAILED
     common.print_verbose(output)
     common.continue_if_failed(subprocess_exit_code, output)
 
     common.print_verbose("Changing directory to " + str(original_working_directory))
     os.chdir(original_working_directory) 
   
   return exit_code, ""
Ejemplo n.º 23
0
def calc_dist(clf_model, data):

    # Read model
    with open(clf_model, "rb") as f:
        model = pickle.load(f)
    print_verbose(
        "Model [%0.2f%%]: %s" %
        (model.best_score_ * 100, str(model.best_estimator_)), 4)

    # Calculate distances
    dist = model.decision_function(data)
    dist = dist.reshape(-1, 1)

    return dist
Ejemplo n.º 24
0
    def get_vm_list(self, filter_params={}):
        self.header_keys = [
            'region', 'name', 'type', 'public_ips', 'private_ips', 'state'
        ]
        self.header_titles = [
            'Region', 'Name', 'Instance Type', 'Public Ips', 'Private Ips',
            'state'
        ]
        p_region = filter_params.get("region", "")
        p_instance = filter_params.get("instance", "")
        info = False
        if p_instance:
            info = True
        ec2_regions = []
        available_ec2_regions = boto3.session.Session().get_available_regions(
            self.instance_type)
        if p_region > "":
            if p_region in available_ec2_regions:
                ec2_regions = [p_region]
            else:
                msg = str(p_region) + " is not a valid region for ec2."
                exit_message(msg, 1, self.isJson)
        else:
            ec2_regions = available_ec2_regions

        ec2_hosts = []
        for region in ec2_regions:
            try:
                msg = "Searching " + region + "..."
                message(msg, "info", self.isJson)
                ec2 = get_client('ec2', region=region)
                hosts = ec2.describe_instances()
                for host in hosts['Reservations']:
                    for h in host['Instances']:
                        if p_instance > "":
                            if p_instance != h['InstanceId']:
                                continue
                        vm = AwsVM(h)
                        vm.region = region
                        if p_instance:
                            vm.set_extra_info(h)
                        if self.verbose:
                            print_verbose(vm.__dict__)
                        ec2_hosts.append(vm.__dict__)
            except Exception as e:
                pass

        return ec2_hosts
Ejemplo n.º 25
0
def run_tests_for(domain, scope):
    common.print_info_no_eol("Running " + scope + " for " + domain + "...")
    classpath = dependencies.classpath_for(domain, scope)
    test_classes_as_string = test_classes_for(domain, scope)

    if test_classes_as_string.strip() != "":
        run_tests_command = "java -cp " + classpath + " org.junit.runner.JUnitCore " + test_classes_as_string
        common.print_verbose("Running tests with:")
        common.print_verbose(run_tests_command)
        (exit_code, output) = common.run_command(run_tests_command)
        if exit_code == 0:
            common.print_info(" PASSED.")
        else:
            common.print_info(" FAILED.")
    else:
        common.print_info(" No tests found.")
Ejemplo n.º 26
0
def direct_dependencies(domain, scope):
    global dependency_versions

    dependency_file = "domains/" + domain + "/src/" + scope + "/java/dependencies.csv"
    dependencies_to_return = {}

    if os.path.isfile(dependency_file):
        f = open(dependency_file, "r")
        deps = f.read().split("\n")
        deps.remove("groupId,artifactId")
        for dep in deps:
            common.print_verbose("Found dependency: " + dep + " with version " + dependency_versions[dep])
            dependencies_to_return[dep] = dependency_versions[dep]

    common.print_verbose(dependencies_to_return)
    return dependencies_to_return
Ejemplo n.º 27
0
def main(log_level="info", meta_model_name="power-daps/python3", actions_to_run=["default"]):
  common.set_log_level(log_level)
  meta_model = MetaModel(meta_model_name)
  common.set_meta_model(meta_model_name)

  valid_actions = meta_model.actions()

  common.print_verbose('Actions to run ' + str(actions_to_run))
  common.print_verbose('Valid actions ' + str([va.name for va in valid_actions]))

  for action_to_run in actions_to_run:
    if action_to_run not in [va.name for va in valid_actions]:
      common.print_error("Action '" + action_to_run + "' not found.")
      continue
    for valid_action in valid_actions:
      if valid_action.name == action_to_run:
        common.stop_if_failed(*valid_action.run())
Ejemplo n.º 28
0
    def get_rds_list(self, filter_params={}):
        self.header_keys = [
            'region', 'instance', 'status', 'db_class', 'engine_version'
        ]
        self.header_titles = [
            'Region', 'Instance', 'Status', 'Class', 'Version'
        ]
        p_region = filter_params.get("region", "")
        p_instance = filter_params.get("instance", "")
        rds_regions = []
        available_rds_regions = boto3.session.Session().get_available_regions(
            "rds")
        if p_region > "":
            if p_region in available_rds_regions:
                rds_regions = [p_region]
            else:
                msg = str(p_region) + " is not a valid region for rds."
                exit_message(msg, 1, self.isJson)
        else:
            rds_regions = available_rds_regions

        # get all of the postgres db instances
        pg_list = []
        for region in rds_regions:
            msg = "Searching " + region + "..."
            message(msg, "info", self.isJson)
            rds = get_client('rds', region=region)
            dbs = rds.describe_db_instances()
            for db in dbs['DBInstances']:
                if db['Engine'] in ["postgres"]:
                    if p_instance > "":
                        if p_instance != db['DBInstanceIdentifier']:
                            continue
                    rds = AwsRDS(db)
                    rds.region = region
                    if p_instance:
                        extras_args = {}
                        extras_args['ec2'] = get_client('ec2', region=region)
                        rds.set_extra_info(db, extras_args)
                    if self.verbose:
                        print_verbose(rds.__dict__, p_type="pg")
                    pg_list.append(rds.__dict__)
        return pg_list
Ejemplo n.º 29
0
def main(argv):

    # Parse arguments
    args = parse_args(argv)
    set_verbose_level(args.verbose)

    print_verbose("Args: %s" % str(args), 1)

    # Crawl URL
    result = crawl(args.url)

    # Extract data
    raw_data = extract_data(result)

    # Sort (optional)
    raw_data.sort()

    # Save to CSV
    gen_csv(args.csv, raw_data)
Ejemplo n.º 30
0
def convert_density(filepath, realheight, realwidth):
    # Get image dimensions in pixels
    pixelheight, pixelwidth = identify_size(filepath)

    # Calculate densities
    # Densities are calculated in CM due to precision errors in ImageMagick
    dens_height = 1.0 * pixelheight / realheight / 2.54
    dens_width = 1.0 * pixelwidth / realwidth / 2.54

    # Prepare command
    cmd = ["convert", "-units", "PixelsPerCentimeter", filepath,
                    "-density", "%.5fx%.5f" % (dens_width, dens_height),
                    filepath]
    if get_verbose_level() >= 5:
        cmd.insert(1, "-verbose")

    # Run
    print_verbose("Running command: " + list2cmdline(cmd), 3)
    return check_call(cmd)
Ejemplo n.º 31
0
 def get_db_list(self, filter_params={}):
     p_instance = filter_params.get("instance", "")
     self.header_keys = [
         'region', 'instance', 'status', 'db_class', 'engine_version'
     ]
     self.header_titles = [
         'Region', 'Instance', 'Status', 'Class', 'Version'
     ]
     db_list = []
     for pg in self.client.servers.list():
         db = AzurePG(pg)
         if p_instance > "":
             if p_instance != db.instance:
                 continue
         if self.verbose:
             db.set_extra_info(pg)
             print_verbose(db.__dict__, p_type="pg")
         db_list.append(db.__dict__)
     return db_list
Ejemplo n.º 32
0
def gen_data(dirname, gtruth=True):
    files = list_files(dirname)
    full_paths = map(lambda x: os.path.join(dirname, x), files)
    print_verbose('Dir %s full file path: %s' % (dirname, str(full_paths)), 4)

    data = apply_multicore_function(read_data, full_paths)
    labels = apply_multicore_function(parse_label, files)
    if gtruth:
        classes = apply_multicore_function(parse_class, files)

    data = np.asarray(data, dtype=np.float)
    labels = np.asarray(labels, dtype=np.str)
    if gtruth:
        classes = np.asarray(classes, dtype=np.uint8)

    if gtruth:
        return data, labels, classes
    else:
        return data, labels
Ejemplo n.º 33
0
  def load_actions_from_dir(self, dir):
    common.print_verbose("Looking for actions in " + dir)
    if os.path.isdir(dir) is not True:
      common.exit_with_error_message("Meta-model '" + self.name() + "' not found in '" + dir + "'")

    elif os.path.isdir(os.path.join(dir, "actions")) is not True:
      common.exit_with_error_message("Meta-model '" + self.name() + "' found but no actions found")

    elif len(self.actions_found_in(dir + "/actions")) == 0:
      common.exit_with_error_message("No actions found in '" + dir + "/actions'")

    if dir not in sys.path:
      sys.path.insert(0, dir)

    actions = []
    #for action in ["default", "deps", "unit_test", "package", "run"]:

    for action in self.actions_found_in(dir + "/actions"):
      action_module = importlib.import_module("actions." + action + "_action")
      actions.append(action_module.action())
    
    return actions
Ejemplo n.º 34
0
def delete_many_parallel(groups):
    common.print_info(
        "Deleting ResourceGroup in parallel started. May take couple of minutes"
    )

    # Start processes and redirect output to tmp files
    processes = []
    for group in groups:
        group_name = group["name"]
        p = common.process_start(f"az group delete --name {group_name} --yes")
        common.print_info(
            f"Deleting ResourceGroup {group_name} started. PID={p.pid}")
        processes.append(p)

    # Wait for processes and print their outputs
    common.print_info("Waiting to finish...")
    for p in processes:
        p.wait()
        common.print_verbose(f"{p.pid} finished. Return status={p.returncode}")
        for line in p.stdout.readlines():
            common.print_info(line)
        for line in p.stderr.readlines():
            common.print_error(line)
    common.print_info("Deleting ResourceGroups (parallel) finished")
Ejemplo n.º 35
0
def resize_from_csv(csvfile, orig_dir, dest_dir, density):

    # Global params
    global gb_idx_pageid
    global gb_idx_img_url
    global gb_idx_artist
    global gb_idx_realheight
    global gb_idx_realwidth
    global gb_density
    global gb_orig_dir
    global gb_dest_dir

    # Define writer
    reader = csv.reader(csvfile, quoting=csv.QUOTE_ALL, strict=True)

    # Field names
    field_names = reader.next()

    # Indices
    gb_idx_pageid = field_names.index('PageID')
    gb_idx_img_url = field_names.index('ImageURL')
    gb_idx_artist = field_names.index('Artist')
    gb_idx_realheight = field_names.index('RealHeightInches')
    gb_idx_realwidth = field_names.index('RealWidthInches')

    # Prepare multicore execution
    gb_orig_dir = orig_dir
    gb_dest_dir = dest_dir
    gb_density = density

    pool = Pool()
    pool.map(resize_image, reader)
    pool.close()
    pool.join()

    print_verbose('Completed!', 0)
Ejemplo n.º 36
0
def crawl(url_param):

    # Fix eventual full URL
    url_param = unquote_plus(basename(url_param))

    # Generate query
    params = {
            'action'        : 'query',
            'prop'          : 'imageinfo|revisions',
            'iiprop'        : 'url|sha1|size',
            'rvprop'        : 'content',
            'rawcontinue'   : '' }

    url_type = get_url_type(url_param)

    if url_type == 'category':
        params['generator'] = 'categorymembers'
        params['gcmtitle']  = url_param
        params['gcmlimit']  = 'max'
    elif url_type == 'file':
        params['titles']    = url_param
    else:
        params['generator'] = 'images'
        params['titles']    = url_param
        params['gimlimit']  = 'max'


    # Call API
    site = wiki.Wiki(API_URL)
    request = api.APIRequest(site, params)

    print_verbose("Site: %s" % str(site), 2)
    print_verbose("Query: ", 2)
    pprint_verbose(params, 2)

    result = request.query(querycontinue=True)
    print_verbose("Result: ", 4)
    pprint_verbose(result, 4)

    # Check result
    if 'error' in result:
        raise Error(result['error'])

    if 'warnings' in result:
        sys.stderr.write(result['warnings'])
        return None

    if '-1' in result['query']['pages']:
        sys.stderr.write(result['query']['pages']['-1'])
        return None

    return result['query']['pages']
Ejemplo n.º 37
0
    def fetch(self):
        common.print_info_no_eol( "   " + self.group_id + "," + self.artifact_id + ": " + self.version + "... ")

        common.print_verbose('')
        common.print_verbose('URL: ' + self.remotelocation("jar"))
        common.print_verbose('Local: ' + self.locallocation("jar"))

        if not self.islatest():
            self.forcefetch("jar")
            self.forcefetch("pom")
            self.forcefetch("pom.asc")
            self.forcefetch("jar.asc")
            common.print_info("downloaded")
        else:
            common.print_info("exists")
Ejemplo n.º 38
0
def extract_data(result):

    img_list = []

    # For each page
    for page in result.values():

        # Get ID
        page_id = page['pageid']
        print_verbose("Extracting info from %s ..." % page_id, 1)

        try:
            # Image info
            img_info = page['imageinfo'][0]
            img_desc_url = img_info['descriptionurl']
            img_url = img_info['url']
            img_sha1 = img_info['sha1']
            img_height = img_info['height']
            img_width = img_info['width']

            # Content
            img_content = page['revisions'][0]['*']
            (paint_id, artist, dim) = extract_image_content(img_content)

            print_verbose("URL is %s" % img_desc_url, 1)

            # Internal object
            img_page = ImagePage(page_id, img_desc_url, img_url, img_sha1,
                    img_height, img_width,
                    paint_id, artist, dim)

            print_verbose(img_page, 3)
            img_list.append(img_page)
        except Exception as e:
            sys.stderr.write("Error processing PageID %s\n" % page_id)
            sys.stderr.write("-- %s\n" % e)


    return img_list
Ejemplo n.º 39
0
def generate_model(data, classes, args):

    # Define the parameters
    tuned_parameters = {'C': C_RANGE, 'class_weight': CLASS_WEIGHTS}

    # Define the classifier
    clf = linear_model.LogisticRegression(max_iter=SCORE_MAX_ITER,
                                          n_jobs=args.cores)

    print_verbose("Classifier: %s" % str(clf), 5)
    print_verbose("Parameters: %s" % str(tuned_parameters), 5)

    # Generate the K-fold development
    skf = cross_validation.StratifiedKFold(classes,
                                           n_folds=SCORE_K_FOLD,
                                           shuffle=True)
    print_verbose("KFold: %s" % str(skf), 5)

    gscv = grid_search.GridSearchCV(clf,
                                    tuned_parameters,
                                    cv=skf,
                                    scoring='mean_squared_error',
                                    n_jobs=1,
                                    verbose=get_verbose_level())

    # Search
    print_verbose("GridSearch: %s" % str(gscv), 5)
    gscv.fit(data, classes)

    # Print scores
    print_verbose("GridSearch scores:", 5)
    for params, mean_score, scores in gscv.grid_scores_:
        print_verbose(
            "%0.6f (+/-%0.06f) for %r" %
            (mean_score, scores.std() / 2, params), 5)

    # Print best score
    print_verbose("GridSearch best score:", 0)
    print_verbose("%0.6f for %r" % (gscv.best_score_, gscv.best_params_), 0)

    return gscv
Ejemplo n.º 40
0
def main(argv):

    # Parse arguments
    args = parse_args(argv)
    set_verbose_level(args.verbose)
    set_n_cores(args.cores)

    print_verbose("Args: %s" % str(args), 1)

    # Prepare data
    data, labels, classes = gen_data(args.dir)
    print_verbose('Data: %s' % str(data), 5)
    print_verbose('Labels: %s' % str(labels), 4)
    print_verbose('Classes: %s' % str(classes), 4)

    print_verbose('Data shape: %s' % str(data.shape), 2)
    print_verbose('Labels shape: %s' % str(labels.shape), 2)
    print_verbose('Classes shape: %s' % str(classes.shape), 2)

    print_verbose('Data bytes: %s' % str(data.nbytes), 2)

    # Calculate distances
    dist = calc_dist(args.model, data)

    # Generate score model
    model = generate_model(dist, classes, args)
    print_verbose('Model: %s' % str(model), 0)

    # Export
    print_verbose('Saving model to %s' % args.score, 0)
    with open(args.score, "wb") as f:
        pickle.dump(model, f)

    print_verbose('Done!', 0)
Ejemplo n.º 41
0
def eval_perf(classification):
    y_true = []
    y_pred = []

    for (key, value) in classification.iteritems():
        y_true.extend([parse_class(key)])
        y_pred.extend([value])

        print_verbose("Classification pair: %s" % str((key, value)), 4)
        print_verbose("True classes: %s" % str(y_true), 5)
        print_verbose("Predicted classes: %s" % str(y_pred), 5)

    # Print results
    print_verbose("True classes: %s" % str(y_true), 2)
    print_verbose("Predicted classes: %s" % str(y_pred), 2)

    # Print metrics
    print_verbose("Confusion Matrix:", 0)
    print_verbose(metrics.confusion_matrix(y_true, y_pred), 0)
    print_verbose("Classification Report:", 0)
    print_verbose(metrics.classification_report(y_true, y_pred), 0)
Ejemplo n.º 42
0
def main(argv):

    # Parse arguments
    args = parse_args(argv)
    set_verbose_level(args.verbose)
    set_n_cores(args.cores)

    print_verbose("Args: %s" % str(args), 1)

    # Some tests
    data, labels = gen_data(args.dir, False)

    print_verbose('Data: %s' % str(data), 5)
    print_verbose('Labels: %s' % str(labels), 4)

    print_verbose('Data shape: %s' % str(data.shape), 2)
    print_verbose('Labels shape: %s' % str(labels.shape), 2)

    classification = classify(data, labels, args)
    print_verbose('Final classification: %s' % str(classification), 0)

    # Evaluate performance
    if args.gtruth:
        eval_perf(classification)
Ejemplo n.º 43
0
def classify(data, labels, args):

    classification = {}

    # Read model
    with open(args.model, "rb") as f:
        model = pickle.load(f)
    print_verbose(
        "Model [%0.2f%%]: %s" %
        (model.best_score_ * 100, str(model.best_estimator_)), 4)

    # Classify each label
    lolo = cross_validation.LeaveOneLabelOut(labels)
    print_verbose("LeaveOneOut: %s" % str(lolo), 5)

    for train_index, test_index in lolo:
        print_verbose("Test index: %s" % str(test_index), 5)
        print_verbose("Classifying label: %s" % str(labels[test_index[0]]), 4)

        # Classify
        if args.aggregation == 'mode':
            pred = model.predict(data[test_index])
        else:
            pred = model.decision_function(data[test_index])
        print_verbose("Patch prediction: %s" % str(pred), 4)

        # Aggregate
        if args.aggregation == 'mode':
            res = agg_pred_mode(pred)
        elif args.aggregation == 'sum':
            res = agg_pred_dist_sumall(pred, model.best_estimator_.classes_)
        elif args.aggregation == 'far':
            res = agg_pred_dist_far(pred, model.best_estimator_.classes_)
        elif args.aggregation == 'mean':
            res = agg_pred_dist_meangroup(pred, model.best_estimator_.classes_)
        elif args.aggregation == 'median':
            res = agg_pred_dist_mediangroup(pred,
                                            model.best_estimator_.classes_)
        print_verbose("Aggregate result: %s" % str(res), 4)

        # Append to final result
        classification[labels[test_index[0]]] = res
        print_verbose("Classification: %s" % str(classification), 5)

    return classification
Ejemplo n.º 44
0
 def run(self):
   common.print_verbose("Running " + self.name + " action")
   common.stop_if_failed(*common.run_command(["/bin/rm", "-rf", "dist/dap"]))
   return common.run_command([self.pyinstaller(),
                       "--noconfirm", "--log-level=WARN",
                       common.power_daps_dir() + "dap.spec"])
Ejemplo n.º 45
0
 def run(self):
   common.print_verbose("Running " + self.name + " action")
   return 0, ""
Ejemplo n.º 46
0
 def run(self):
   common.print_verbose("Running " + self.name + " action")
   common.print_raw("blueee!!!")
   return 0, ""