コード例 #1
0
def main():
    # Init
    options = parse_inputs()
    c = color_codes()

    # Data loading (or preparation)
    d_path = options['val_dir']
    gt_names = sorted(filter(
        lambda xi: not os.path.isdir(xi) and re.search(options['lab_tag'], xi),
        os.listdir(d_path)),
                      key=find_number)
    cases_pre = [str(find_number(r)) for r in gt_names]
    gt_names = [
        gt for c, gt in zip(cases_pre, gt_names)
        if find_file('Z{:}.jpg'.format(c), d_path)
    ]
    cases = [c for c in cases_pre if find_file('Z{:}.jpg'.format(c), d_path)]

    print('%s[%s] %s<Tree detection pipeline>%s' %
          (c['c'], time.strftime("%H:%M:%S"), c['y'], c['nc']))
    ''' <Detection task> '''
    net_name = 'tree-detection.nDEM.unet'
    train(cases, gt_names, net_name, 'nDEM')
    net_name = 'tree-detection.DEM.unet'
    train(cases, gt_names, net_name, 'DEM')

    eval(cases, gt_names)
コード例 #2
0
    def test_utils(self):
        app.config['ENV'] = 'TESTING'
        # allowing files types
        self.assertTrue(utils.allowed_file('filename.jpg'))
        self.assertFalse(utils.allowed_file('filename.gif'))
        self.assertTrue(utils.allowed_file('filename.png'))
        self.assertFalse(utils.allowed_file('filename.pdf'))

        # search file function
        self.assertTrue(utils.find_file('1.jpg'))
        self.assertTrue(utils.find_file('2.jpg'))
        self.assertFalse(utils.find_file('3.jpg'))

        # download function
        file = utils._download(
            'http://www.reportingday.com/wp-content/uploads/2018/06/Cat-Sleeping-Pics.jpg'
        )
        self.assertTrue(file['success'])
        self.assertEqual(file['name'], file['name'])

        # remove function
        self.assertTrue(utils.remove_file(file['name']))
        self.assertFalse(utils.remove_file('randomanme.jpg'))

        # predict function
        self.assertEqual(utils.make_prediction('1.jpg'), 'dog')
        self.assertEqual(utils.make_prediction('2.jpg'), 'cat')
        self.assertRaises(FileNotFoundError, utils.make_prediction, 'asd.jpg')
コード例 #3
0
ファイル: main.py プロジェクト: spuronlee/deeprl_network
def evaluate_fn(agent_dir, output_dir, seeds, port, demo):
    agent = agent_dir.split('/')[-1]
    if not check_dir(agent_dir):
        logging.error('Evaluation: %s does not exist!' % agent)
        return
    if not demo:
        # load config file for env
        config_dir = find_file(agent_dir + '/data/')
    else:
        config_dir = find_file(agent_dir + '/')
    if not config_dir:
        return
    config = configparser.ConfigParser()
    config.read(config_dir)

    # init env
    env = init_env(config['ENV_CONFIG'], port=port)
    env.init_test_seeds(seeds)

    # load model for agent
    model = init_agent(env, config['MODEL_CONFIG'], 0, 0)
    if model is None:
        return
    if not demo:
        model_dir = agent_dir + '/model/'
    else:
        model_dir = agent_dir + '/'
    if not model.load(model_dir):
        return
    # collect evaluation data
    evaluator = Evaluator(env, model, output_dir, gui=demo)
    evaluator.run()
コード例 #4
0
    def find_mds(self, model_dir):
        """returns the path to the mds that was used to generate this
        model output.  While the text file that the R model produces
        has an absolute path to the data this function assumes that
        the mds file is in the session folder that this model output
        folder is in.  That is it looks for an mds with the same
        file name in the parent folder of the model folder.
        """
        model_text = os.path.join(model_dir,
                                  utils.find_file(model_dir, "_output.txt"))
        #  assumed to be one level up from model folder.
        session_folder = os.path.split(model_dir)[0]

        f = open(model_text, 'rb')
        lines = f.read().splitlines()

        #  grab the line after "Data:"
        try:
            originalMDS = [
                lines[i + 1] for i in range(len(lines))
                if lines[i].startswith("Data:")
            ][0].strip()

            fname = os.path.split(originalMDS)[1]
            mds_in_root = os.path.join(session_folder, fname)
            if os.path.exists(originalMDS):
                return originalMDS
            elif os.path.exists(mds_in_root):
                return mds_in_root
            else:
                raise RuntimeError(
                    'Valid input MDS file not found in Model text output.')
        except IndexError:
            raise RuntimeError(
                'Valid input MDS file not found in Model text output.')
コード例 #5
0
ファイル: main.py プロジェクト: Dogordog/deeprl_dist
def evaluate_fn(agent_dir, output_dir, seeds, port):
    agent = agent_dir.split('/')[-1]
    if not check_dir(agent_dir):
        logging.error('Evaluation: %s does not exist!' % agent)
        return
    # load config file for env
    config_dir = find_file(agent_dir + '/data/')
    if not config_dir:
        return
    config = configparser.ConfigParser()
    config.read(config_dir)

    # init env
    env, greedy_policy = init_env(config['ENV_CONFIG'],
                                  port=port,
                                  naive_policy=True)
    env.init_test_seeds(seeds)

    # load model for agent
    if agent != 'greedy':
        # init centralized or multi agent
        model = init_agent(env, config['MODEL_CONFIG'], 0, 0)
        if model is None:
            return
        if not model.load(agent_dir + '/model/'):
            return
    else:
        model = greedy_policy
    # collect evaluation data
    evaluator = Evaluator(env, model, output_dir)
    evaluator.run()
コード例 #6
0
ファイル: merge.py プロジェクト: paulschreiber/people
def retire(abbr, existing, new, retirement=None):
    if not retirement:
        retirement = click.prompt("Enter retirement date YYYY-MM-DD")
    person, num = retire_person(existing, retirement)
    fname = find_file(existing["id"])
    dump_obj(person, filename=fname)
    move_file(fname)
コード例 #7
0
ファイル: cardiff.py プロジェクト: tremble/edeploy
def analyze_data(global_params,
                 pattern,
                 ignore_list,
                 detail,
                 rampup_value=0,
                 max_rampup_value=0,
                 current_dir=""):
    if rampup_value > 0:
        pattern = pattern + "*.hw"

    # Extracting regex and path
    path = os.path.dirname(pattern)
    if not path:
        path = "."
    else:
        pattern = os.path.basename(pattern)

    if not os.path.isdir(path):
        print "Error: the path %s doesn't exists !" % path
        sys.exit(2)

    health_data_file = utils.find_file(path, pattern)
    if len(health_data_file) == 0:
        print "No log file found with pattern %s!" % pattern
        sys.exit(1)
    else:
        if rampup_value == 0:
            print "### %d files Selected with pattern '%s' ###" % (
                len(health_data_file), pattern)
        else:
            print "########## Rampup: %d / %d hosts #########" % (
                rampup_value, max_rampup_value)

    # Extract data from the hw files
    bench_values = []
    for health in health_data_file:
        bench_values.append(eval(open(health).read()))

    if (rampup_value > 0):
        unique_id = 'uuid'
    else:
        unique_id = 'serial'

    # Extracting the host list from the data to get
    # the initial list of hosts. We have here a single group with all the servers
    systems_groups = []
    systems_groups.append(utils.get_hosts_list(bench_values, unique_id))

    # Let's create groups of similar servers
    if (rampup_value == 0):
        group_systems(global_params, bench_values, unique_id, systems_groups,
                      ignore_list)
        compare_sets.print_systems_groups(systems_groups)

    # It's time to compare performance in each group
    compare_performance(bench_values, unique_id, systems_groups, detail,
                        rampup_value, current_dir)
    print "##########################################"
    print
    return bench_values
コード例 #8
0
def evaluate_fn(agent_dir, output_dir, seeds, port, demo, policy_type):
    agent = agent_dir.split('/')[-1]
    if not check_dir(agent_dir):
        logging.error('Evaluation: %s does not exist!' % agent)
        return
    # load config file for env
    config_dir = find_file(agent_dir + '/data/')
    if not config_dir:
        return
    config = configparser.ConfigParser()
    config.read(config_dir)

    # init env
    env, greedy_policy = init_env(config['ENV_CONFIG'],
                                  port=port,
                                  naive_policy=True)
    logging.info(
        'Evaluation: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
        (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls))
    env.init_test_seeds(seeds)

    # load model for agent
    if agent != 'greedy':
        # init centralized or multi agent
        if agent == 'a2c':
            model = A2C(env.n_s, env.n_a, 0, config['MODEL_CONFIG'])
        elif agent == 'ia2c':
            model = IA2C(env.n_s_ls, env.n_a_ls, env.n_w_ls, 0,
                         config['MODEL_CONFIG'])
        elif agent == 'ma2c':
            model = MA2C(env.n_s_ls, env.n_a_ls, env.n_w_ls, env.n_f_ls, 0,
                         config['MODEL_CONFIG'])
        elif agent == 'iqld':
            model = IQL(env.n_s_ls,
                        env.n_a_ls,
                        env.n_w_ls,
                        0,
                        config['MODEL_CONFIG'],
                        seed=0,
                        model_type='dqn')
        else:
            model = IQL(env.n_s_ls,
                        env.n_a_ls,
                        env.n_w_ls,
                        0,
                        config['MODEL_CONFIG'],
                        seed=0,
                        model_type='lr')
        if not model.load(agent_dir + '/model/'):
            return
    else:
        model = greedy_policy
    env.agent = agent
    # collect evaluation data
    evaluator = Evaluator(env,
                          model,
                          output_dir,
                          demo=demo,
                          policy_type=policy_type)
    evaluator.run()
コード例 #9
0
    def compute(self):
        self.inputs = map_ports(self, self.port_map)
        self.inputs.update(self.parse_inputs())

        self.inputs["model_dir"] = self.inputs["model_workspace"]

        for model_output in ['prob', 'bin', 'resid', 'mess', 'MoD']:
            try:
                self.inputs[model_output + "_map"] = os.path.join(
                    self.inputs["model_dir"],
                    utils.find_file(self.inputs["model_dir"],
                                    "_" + model_output + "_map.tif"))
            except:
                self.inputs[model_output + "_map"] = ""

        try:
            self.inputs["mds"] = self.find_mds(self.inputs["model_dir"])
        except RuntimeError:
            self.inputs["mds"] = ""

        self.inputs["model_tag"] = os.path.split(self.inputs["model_dir"])[1]

        utils.set_sheet_location(self)

        if utils.check_if_model_finished(self.inputs["model_dir"]):
            self.local_displayAndWait(self.inputs)
コード例 #10
0
ファイル: cardiff.py プロジェクト: Prophidys/edeploy
def analyze_data(global_params, pattern, ignore_list, detail, rampup_value=0,
                 max_rampup_value=0, current_dir=""):
    if rampup_value > 0:
        pattern = pattern + "*.hw"

    # Extracting regex and path
    path = os.path.dirname(pattern)
    if not path:
        path = "."
    else:
        pattern = os.path.basename(pattern)

    if not os.path.isdir(path):
        print "Error: the path %s doesn't exists !" % path
        sys.exit(2)

    health_data_file = utils.find_file(path, pattern)
    if len(health_data_file) == 0:
        print "No log file found with pattern %s!" % pattern
        sys.exit(1)
    else:
        if rampup_value == 0:
            print "### %d files Selected with pattern '%s' ###" % \
                (len(health_data_file), pattern)
        else:
            print "########## Rampup: %d / %d hosts #########" % \
                (rampup_value, max_rampup_value)

    # Extract data from the hw files
    bench_values = []
    for health in health_data_file:
        bench_values.append(eval(open(health).read()))

    if rampup_value > 0:
        unique_id = 'uuid'
    else:
        unique_id = 'serial'

    # Extracting the host list from the data to get
    # the initial list of hosts. We have here a single group
    # with all the servers
    systems_groups = []
    systems_groups.append(utils.get_hosts_list(bench_values, unique_id))

    # Let's create groups of similar servers
    if rampup_value == 0:
        group_systems(global_params, bench_values, unique_id, systems_groups,
                      ignore_list)
        compare_sets.print_systems_groups(systems_groups)

    # It's time to compare performance in each group
    compare_performance(bench_values, unique_id, systems_groups, detail,
                        rampup_value, current_dir)
    print "##########################################"
    print
    return bench_values
コード例 #11
0
def sample(args):
    # load ascii text and covert to lowercase
    with codecs.open(os.path.join(args.data_dir, 'input.txt'),
                     "r",
                     encoding='utf-8') as f:
        raw_text = f.read()
    # create mapping of unique chars to integers, and a reverse mapping
    chars = sorted(list(set(raw_text)))
    char_to_int = dict((c, i) for i, c in enumerate(chars))
    int_to_char = dict((i, c) for i, c in enumerate(chars))
    # summarize the loaded data
    n_chars = len(raw_text)
    n_vocab = len(chars)
    print("Total Characters: ", n_chars)
    print("Total Vocab: ", n_vocab)
    # prepare the dataset of input to output pairs encoded as integers
    seq_length = 100
    dataX = []
    dataY = []
    for i in range(0, n_chars - seq_length, 1):
        seq_in = raw_text[i:i + seq_length]
        seq_out = raw_text[i + seq_length]
        dataX.append([char_to_int[char] for char in seq_in])
        dataY.append(char_to_int[seq_out])
    n_patterns = len(dataX)
    print("Total Patterns: "), n_patterns
    X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
    # normalize
    X = X / float(n_vocab)
    # one hot encode the output variable
    y = np_utils.to_categorical(dataY)
    # load the network weights
    model = make_model(X, y)
    iteration = 10
    filename = find_file(
        'snapshot_' + MODEL_NAME + '_{:02d}'.format(iteration) + '*.hdf5',
        'models')
    model.load_weights(filename)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    # pick a random seed
    start = numpy.random.randint(0, len(dataX) - 1)
    pattern = dataX[start]
    print("Seed:")
    print("\"", ''.join([int_to_char[value] for value in pattern]), "\"")
    # generate characters
    for i in range(1000):
        x = numpy.reshape(pattern, (1, len(pattern), 1))
        x = x / float(n_vocab)
        prediction = model.predict(x, verbose=0)
        index = numpy.argmax(prediction)
        result = int_to_char[index]
        seq_in = [int_to_char[value] for value in pattern]
        sys.stdout.write(result)
        pattern.append(index)
        pattern = pattern[1:len(pattern)]
    print("\nDone.")
コード例 #12
0
ファイル: merge.py プロジェクト: paulschreiber/people
def interactive_merge(abbr, old, new, name_match, role_match, retirement):
    """
    returns True iff a merge was done
    """
    oldfname = find_file(old["id"])
    newfname = "incoming/{}/legislature/{}".format(abbr, get_new_filename(new))
    click.secho(" {} {}".format(oldfname, newfname), fg="yellow")

    # simulate difference
    changes = compute_merge(old, new, keep_both_ids=False)

    if not changes:
        click.secho(" perfect match, removing " + newfname, fg="green")
        os.remove(newfname)
        return True

    for change in changes:
        if change.key_name == "name" or change.key_name == "roles":
            click.secho("    " + str(change), fg="red", bold=True)
        else:
            click.echo("    " + str(change))

    ch = "~"
    if name_match and role_match:
        choices = "m"
        # automatically pick merge
        ch = "m"
        # there is one very specific case that this fails in, if someone is beaten
        # by someone with the exact same name, that'll need to be caught manually
    elif name_match:
        choices = "m"
        text = "(m)erge?"
    elif role_match:
        choices = "mr"
        text = f"(m)erge? (r)etire {old['name']}"

    while ch not in (choices + "sa"):
        click.secho(text + " (s)kip? (a)bort?", bold=True)
        ch = click.getchar()

    if ch == "a":
        raise SystemExit(-1)
    elif ch == "m":
        merged = merge_people(old, new, keep_both_ids=False)
        dump_obj(merged, filename=oldfname)
        click.secho(" merged.", fg="green")
        os.remove(newfname)
    elif ch == "r":
        copy_new_incoming(abbr, new, "legislature")
        retire(abbr, old, new, retirement)
    elif ch == "s":
        return False

    return True
コード例 #13
0
ファイル: GPYedit_conf.py プロジェクト: Eclipse000/GPYedit
        def __init__(this):
                """
                Read the configuration file and set up the
                preferences so that they are ready to be accessed
                by the main application.

                """
                if not os.path.exists(CONFIG_FILE): config_file_location = find_file(CONFIG_FILE, os.environ["HOME"])
                else:
                        config_file_location = os.getcwd() + os.sep + CONFIG_FILE

                if config_file_location is None: return  # Configuration not found. Use default settings
                else:
                        this.process_options(open(config_file_location, "r").readlines())
コード例 #14
0
def train_test_net(net_name, dem_name='nDEM', ratio=10, verbose=1):
    """

    :param net_name:
    :return:
    """
    # Init
    options = parse_inputs()

    # Data loading (or preparation)
    d_path = options['val_dir']
    gt_names = sorted(filter(
        lambda xi: not os.path.isdir(xi) and re.search(options['lab_tag'], xi),
        os.listdir(d_path)),
                      key=find_number)
    cases_pre = [str(find_number(r)) for r in gt_names]
    gt_names = [
        gt for c, gt in zip(cases_pre, gt_names)
        if find_file('Z{:}.jpg'.format(c), d_path)
    ]
    cases = [c for c in cases_pre if find_file('Z{:}.jpg'.format(c), d_path)]

    train(cases, gt_names, net_name, dem_name, ratio, verbose)
コード例 #15
0
ファイル: main.py プロジェクト: murtazarang/deeprl_dist
def evaluate_fn(agent_dir, output_dir, seeds, port):
    agent = agent_dir.split('/')[-1]
    if not check_dir(agent_dir):
        logging.error('Evaluation: %s does not exist!' % agent)
        return
    # load config file for env
    config_dir = find_file(agent_dir)
    if not config_dir:
        return
    config = configparser.ConfigParser()
    config.read(config_dir)

    # init env
    env, greedy_policy = init_env(config['ENV_CONFIG'],
                                  port=port,
                                  naive_policy=True)
    env.init_test_seeds(seeds)

    # load model for agent
    if agent != 'greedy':
        # init centralized or multi agent
        if env.agent == 'ia2c':
            model = IA2C(env.n_s_ls, env.n_a, env.neighbor_mask,
                         env.distance_mask, env.coop_gamma, 0,
                         config['MODEL_CONFIG'])
        elif env.agent == 'ia2c_fp':
            model = IA2C_FP(env.n_s_ls, env.n_a, env.neighbor_mask,
                            env.distance_mask, env.coop_gamma, 0,
                            config['MODEL_CONFIG'])
        elif env.agent == 'ma2c_nc':
            model = MA2C_NC(env.n_s, env.n_a, env.neighbor_mask,
                            env.distance_mask, env.coop_gamma, 0,
                            config['MODEL_CONFIG'])
        else:
            return
        if not model.load(agent_dir + '/'):
            return
    else:
        model = greedy_policy
    env.agent = agent
    # collect evaluation data
    evaluator = Evaluator(env, model, output_dir)
    evaluator.run()
コード例 #16
0
ファイル: index.py プロジェクト: iamtheef/Flask-CNN-API
def predict():
    try:
        is_link = request.get_json()['isLink']
        name = request.get_json()['input'].replace(' ', '_')
        if is_link:
            file = _download(name)
            if file['success']:
                return call_prediction(file['name'])
            else:
                logger('Faulty link', request)
                return error.return_response(message='Link is not correct',
                                             status=400)
        elif find_file(name):
            return call_prediction(name)
        else:
            logger('File not found', request)
            return error.return_response(message='File not found', status=404)
    except:
        return error.return_response(message='Something was wrong', status=500)
コード例 #17
0
ファイル: manual_data.py プロジェクト: resistbot/people
def update_from_csv(filename, fields, other_identifiers):
    with open(filename) as f:
        for line in csv.DictReader(f):
            yaml_filename = find_file(line["id"])
            with open(yaml_filename) as yf:
                person = load_yaml(yf)

            for field in fields:
                person[field] = line[field]

            if other_identifiers and "other_identifiers" not in person:
                person["other_identifiers"] = []
            for scheme in other_identifiers:
                # TODO: check for duplicates among what was already there
                for id in line[scheme].split(";"):
                    if id:
                        person["other_identifiers"].append({
                            "scheme": scheme,
                            "identifier": id
                        })
            dump_obj(person, filename=yaml_filename)
コード例 #18
0
ファイル: test_utils.py プロジェクト: resistbot/people
def test_find_file_good():
    filename = find_file("a2e4a1b2-f0fd-4c35-9e0c-bb009778792f")
    assert "Pam-Snyder" in filename
コード例 #19
0
def main():
    if (args.resynthesize_static):
        if (args.load_file != None):
            filepath = utils.find_file(args.load_file)
            spectrum, freqs, _ = utils.spectrogram(filepath)
            freqs, amps, freq_idx = utils.get_harmonics(spectrum,
                                                        freqs,
                                                        plot=args.plot)
            noise_mean, noise_std = utils.fit_noise(spectrum,
                                                    freqs,
                                                    plot=args.plot)
            if (args.save_file != None):
                save_path = DEST_DIR + args.save_file
            else:
                save_path = DEST_DIR + args.load_file[:-4] + '_resynthesized.csv'
            noise_path = save_path[:-4] + '_noise.csv'
            utils.write_peaks(freqs, amps, save_path, norm=True)
            utils.write_noise(noise_mean, noise_std, noise_path)
        else:
            if (args.load_dir != None):
                filepath = utils.find_file(args.load_dir,
                                           all_files=True,
                                           extension='.wav')
                for file in filepath:
                    try:
                        spectrum, freqs, _ = utils.spectrogram(file)
                        freqs, amps, freq_idx = utils.get_harmonics(
                            spectrum, freqs, plot=args.plot)
                        save_path = DEST_DIR + os.path.basename(
                            file) + '_resynthesized.csv'
                        utils.write_peaks(freqs, amps, save_path, norm=True)
                    except KeyboardInterrupt:
                        raise KeyboardInterrupt("Ctrl-c pressed!")
                    except Exception as e:
                        print(e)
            else:
                raise Exception("--load_file or --load_dir argument missing!")
    if (args._fit_noise):
        if (args.load_file != None):
            filepath = utils.find_file(args.load_file)
            S, freqs, sr = utils.spectrogram(filepath)
            attack, sustain, release = utils.get_adsr(S,
                                                      freqs,
                                                      sr,
                                                      filename=filepath,
                                                      plot=False)
            S_sustain = S[:, sustain[0]:sustain[1]]
            #utils.visualize_S(S_sustain,sr)
            noise_mean, noise_std = utils.fit_noise(S_sustain, freqs,
                                                    args.plot)
        else:
            raise Exception("--load_file argument missing!")
    if (args.spectro):
        if (args.load_file != None):
            filepath = utils.find_file(args.load_file)
            spectrum, freqs = utils.spectrogram(filepath)
        else:
            raise Exception("--load_file argument missing!")
        if (args.save_file != None):
            utils.write_spectro(spectrum, freqs,
                                SPECTRUM_CSV_DEST + args.save_file)
    if (args.harm):
        if (args.load_file != None):
            filepath = utils.find_file(args.load_file)
            S, freqs, sr = utils.spectrogram(filepath)
            #print("S: ",S.shape,"S_: ", S_.shape)
            peaks, amps, _ = utils.get_harmonics(S, freqs)
            if (args.save_file != None):
                utils.write_peaks(peaks, amps,
                                  SPECTRUM_CSV_DEST + args.save_file)
        else:
            raise Exception("--load_file argument missing!")
    if (args.analyze):
        if (args.load_file != None):
            filepath = utils.find_file(args.load_file)
            S, freqs, sr = utils.spectrogram(filepath)
            attack, sustain, release = utils.get_adsr(S,
                                                      freqs,
                                                      sr,
                                                      filename=filepath,
                                                      plot=False)  #args.plot)
            # Take a look at sustain part
            S_sustain = S[:, sustain[0]:sustain[1]]
            #utils.plot_freqs(S_sustain,freqs)
            utils.fit_freqs(S_sustain, freqs, plot=args.plot)
        else:
            raise Exception("--load_file argument missing!")
    if (args.adsr):
        if (args.load_file != None):
            filepath = utils.find_file(args.load_file)
            S, freqs, sr = utils.spectrogram(filepath)
            attack, sustain, release = utils.get_adsr(S,
                                                      freqs,
                                                      sr,
                                                      filename=filepath,
                                                      plot=args.plot)
            print("attack: ", attack)
            print("sustain: ", sustain)
            print("release: ", release)
        else:
            if (args.load_dir != None):
                filepath = utils.find_file(args.load_dir,
                                           all_files=True,
                                           extension='.wav')
                for file in filepath:
                    try:
                        S, freqs, sr = utils.spectrogram(file)
                        attack, sustain, release = utils.get_adsr(
                            S, freqs, sr, filename=file, plot=args.plot)
                        print('Attack: ', attack)
                        print('Sustain: ', sustain)
                        print('Release: ', release)
                    except KeyboardInterrupt:
                        raise KeyboardInterrupt("Ctrl+c pressed!")
                    except Exception as e:
                        print(e)
            else:
                raise Exception("--load_file or --load_dir argument missing!")
    if (args.visualize):
        if (args.load_file != None):
            filepath = utils.find_file(args.load_file)
            utils.visualize(filepath)
コード例 #20
0
ファイル: compile.py プロジェクト: jmcruz1983/UBS
 def _find_file(self, dir=None, filename=None):
     return find_file(os.path.join(self.project_path,
                                   dir),
                       filename)
コード例 #21
0
ファイル: nyfix.py プロジェクト: paulschreiber/people
import csv
from utils import find_file, load_yaml, dump_obj

with open("nyleg.csv") as f:
    for row in csv.DictReader(f):
        os_id = row["osid"]
        fname = find_file(os_id)
        with open(fname) as lf:
            obj = load_yaml(lf)
            for cd in obj["contact_details"]:
                if cd["note"] == "Capitol Office":
                    cd["voice"] = row["Capitol Phone"].replace("(",
                                                               "").replace(
                                                                   ") ", "-")
                if cd["note"] == "District Office":
                    cd["voice"] = row["District Phone"].replace("(",
                                                                "").replace(
                                                                    ") ", "-")
            obj["email"] = row["email"]
            if row["twitter"] and "ids" not in obj:
                obj["ids"] = {"twitter": row["twitter"].replace("@", "")}
        dump_obj(obj, filename=fname)
コード例 #22
0
def evaluate_fn(agent_dir, output_dir, seeds, port, demo):
    agent = agent_dir.split('/')[-1]
    doubleQ = True
    if agent == 'ddqn':
        doubleQ = False
        agent = 'dqn'
    if not check_dir(agent_dir):
        logging.error('Evaluation: %s does not exist!' % agent)
        return
    # load config file for env
    config_dir = find_file(agent_dir + '/data/')
    if not config_dir:
        return
    config = configparser.ConfigParser()
    config.read(config_dir)

    # init env
    env, greedy_policy = init_env(config['ENV_CONFIG'],
                                  port=port,
                                  naive_policy=True)
    logging.info(
        'Evaluation: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
        (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls))
    env.init_test_seeds(seeds)

    # load model for agent
    if agent != 'greedy':
        # init centralized or multi agent
        if agent == 'a2c':
            model = A2C(env.n_s, env.n_a, 0, config['MODEL_CONFIG'])
        elif agent == 'ia2c':
            model = IA2C(env.n_s_ls, env.n_a_ls, env.n_w_ls, 0,
                         config['MODEL_CONFIG'])
        elif agent == 'ma2c':
            model = MA2C(env.n_s_ls, env.n_a_ls, env.n_w_ls, env.n_f_ls, 0,
                         config['MODEL_CONFIG'])
        elif agent == 'codql':
            print('This is codql')
            model = MFQ(nb_agent=len(env.n_s_ls),
                        a_dim=env.n_a_ls[0],
                        s_dim=env.n_s_ls[0],
                        s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                        s_dim_wait=env.n_w_ls[0],
                        config=config['MODEL_CONFIG'])
        elif agent == 'dqn':
            model = DQN(nb_agent=len(env.n_s_ls),
                        a_dim=env.n_a_ls[0],
                        s_dim=env.n_s_ls[0],
                        s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                        s_dim_wait=env.n_w_ls[0],
                        config=config['MODEL_CONFIG'],
                        doubleQ=doubleQ)  #doubleQ=False denotes dqn else ddqn
        elif agent == 'ddpg':
            model = DDPGEN(nb_agent=len(env.n_s_ls),
                           share_params=True,
                           a_dim=env.n_a_ls[0],
                           s_dim=env.n_s_ls[0],
                           s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                           s_dim_wait=env.n_w_ls[0])
        elif agent == 'iqld':
            model = IQL(env.n_s_ls,
                        env.n_a_ls,
                        env.n_w_ls,
                        0,
                        config['MODEL_CONFIG'],
                        seed=0,
                        model_type='dqn')
        else:
            model = IQL(env.n_s_ls,
                        env.n_a_ls,
                        env.n_w_ls,
                        0,
                        config['MODEL_CONFIG'],
                        seed=0,
                        model_type='lr')
        if not model.load(agent_dir + '/model/'):
            return
    else:
        model = greedy_policy
    env.agent = agent
    # collect evaluation data
    evaluator = Evaluator(env, model, output_dir, demo=demo)
    evaluator.run()
コード例 #23
0
ファイル: test_utils.py プロジェクト: resistbot/people
def test_find_file_missing():
    with pytest.raises(FileNotFoundError):
        find_file("77777777-ffff-0000-9000-bbbbbbbbbbbb")
コード例 #24
0
 def _find_firefox_bin(self):
     return find_file(self.path, "geckodriver").pop(0)
コード例 #25
0
def main(argv):
    pattern = ''
    ignore_list = ''
    detail = {'category': '', 'group': '', 'item': ''}
    try:
        opts, args = getopt.getopt(
            argv[1:], "hp:l:g:c:i:I:",
            ['pattern', 'log-level', 'group', 'category', 'item', "ignore"])
    except getopt.GetoptError:
        print "Error: One of the options passed to the cmdline was not supported"
        print "Please fix your command line or read the help (-h option)"
        sys.exit(2)

    utils.print_level = int(utils.Levels.SUMMARY)

    for opt, arg in opts:
        if opt in ("-h", "--help"):
            print_help()
            sys.exit(0)
        elif opt in ("-p", "--pattern"):
            pattern = arg
            pattern = pattern.replace('\\', '')
        elif opt in ("-l", "--log-level"):
            if "list" in arg:
                print_help()
                sys.exit(2)
            utils.print_level = 0
            if utils.Levels.message[utils.Levels.INFO] in arg:
                utils.print_level |= int(utils.Levels.INFO)
            if utils.Levels.message[utils.Levels.WARNING] in arg:
                utils.print_level |= int(utils.Levels.WARNING)
            if utils.Levels.message[utils.Levels.ERROR] in arg:
                utils.print_level |= int(utils.Levels.ERROR)
            if utils.Levels.message[utils.Levels.SUMMARY] in arg:
                utils.print_level |= int(utils.Levels.SUMMARY)
            if utils.Levels.message[utils.Levels.DETAIL] in arg:
                utils.print_level |= int(utils.Levels.DETAIL)
            if utils.print_level == 0:
                print "Error: The log level specified is not part of the supported list !"
                print "Please check the usage of this tool and retry."
                sys.exit(2)
        elif opt in ("-g", "--group"):
            detail['group'] = arg
        elif opt in ("-c", "--category"):
            detail['category'] = arg
        elif opt in ("-i", "--item"):
            detail['item'] = arg
        elif opt in ("-I", "--ignore"):
            ignore_list = arg

    if ((utils.print_level & utils.Levels.DETAIL) == utils.Levels.DETAIL):
        if (len(detail['group']) == 0) or (len(detail['category'])
                                           == 0) or (len(detail['item']) == 0):
            print "Error: The DETAIL output requires group, category & item options to be set"
            sys.exit(2)

    if not pattern:
        print "Error: Pattern option is mandatory"
        print_help()
        sys.exit(2)

    # Extracting regex and path
    path = os.path.dirname(pattern)
    if not path:
        path = "."
    else:
        pattern = os.path.basename(pattern)

    if not os.path.isdir(path):
        print "Error: the path %s doesn't exists !" % path
        sys.exit(2)

    health_data_file = utils.find_file(path, pattern)
    if len(health_data_file) == 0:
        print "No log file found with pattern %s!" % pattern
        sys.exit(1)
    else:
        print "%d files Selected with pattern '%s'" % (len(health_data_file),
                                                       pattern)

    # Extract data from the hw files
    bench_values = []
    for health in health_data_file:
        bench_values.append(eval(open(health).read()))

    # Extracting the host list from the data to get
    # the initial list of hosts. We have here a single group with all the servers
    systems_groups = []
    systems_groups.append(utils.get_hosts_list(bench_values))

    # Let's create groups of similar servers
    group_systems(bench_values, systems_groups, ignore_list)
    compare_sets.print_systems_groups(systems_groups)

    # It's time to compare performance in each group
    compare_performance(bench_values, systems_groups, detail)
コード例 #26
0
ファイル: 1_data_split.py プロジェクト: sea-lab-wm/bee-tool
        set2 = set(bug["id"] for bug in valid_sets[(k + 1) % num_sets])
        if set1 & set2:
            raise Exception(f"Validation elements in common in fold: {k}")

        set1 = set(bug["id"] for bug in test_sets[k])
        set2 = set(bug["id"] for bug in test_sets[(k + 1) % num_sets])
        if set1 & set2:
            raise Exception(f"Testing elements in common in fold: {k}")


if __name__ == '__main__':
    data_path = "data_prep3"
    output_path = "data_split3"
    num_folds = 10

    for file in utils.find_file("*.json.prep", data_path):
        sys_name = os.path.basename(file).replace(".json.prep", "")
        all_bugs = utils.read_json_line_by_line(
            os.path.join(data_path, os.path.basename(file)))

        print(sys_name, len(all_bugs))
        train_sets, valid_sets, test_sets = random_split_bugs(
            all_bugs, num_folds)

        check_sets(valid_sets, test_sets)
        print_stats(train_sets, valid_sets, test_sets)

        sys_output_path = os.path.join(output_path, sys_name)
        Path(sys_output_path).mkdir(parents=True, exist_ok=True)
        write_sets(sys_output_path, train_sets, valid_sets, test_sets)
コード例 #27
0
def train(cases, gt_names, net_name, dem_name, ratio=10, verbose=1):
    # Init
    options = parse_inputs()
    d_path = options['val_dir']
    c = color_codes()
    n_folds = len(gt_names)
    cases = [
        c_i for c_i in cases
        if find_file('Z{:}.jpg'.format(c_i + dem_name), d_path)
    ]

    print('{:}[{:}]{:} Loading all mosaics and DEMs{:}'.format(
        c['c'], time.strftime("%H:%M:%S"), c['g'], c['nc']))
    for c_i in cases:
        dem_file = os.path.join(d_path, 'Z{:}.jpg'.format(c_i + dem_name))
        dem = cv2.imread(dem_file)
        mosaic_file = os.path.join(d_path, 'Z{:}.jpg'.format(c_i))
        mosaic = cv2.imread(mosaic_file)
        print(dem_file, dem.shape, mosaic_file, mosaic.shape)

    print('{:}[{:}]{:} Ground truth'.format(c['c'], time.strftime("%H:%M:%S"),
                                            c['nc']))
    y = [(np.mean(cv2.imread(os.path.join(d_path, im)), axis=-1) < 50).astype(
        np.uint8) for im in gt_names]

    print('{:}[{:}]{:} DEM'.format(c['c'], time.strftime("%H:%M:%S"), c['nc']))
    dems = [
        cv2.imread(os.path.join(d_path, 'Z{:}.jpg'.format(c_i + dem_name)))
        for c_i in cases
    ]
    print('{:}[{:}]{:} Mosaics'.format(c['c'], time.strftime("%H:%M:%S"),
                                       c['nc']))
    mosaics = [
        cv2.imread(os.path.join(d_path, 'Z{:}.jpg'.format(c_i)))
        for c_i in cases
    ]

    print('{:}[{:}]{:} Normalising data'.format(c['c'],
                                                time.strftime("%H:%M:%S"),
                                                c['nc']))
    x = [
        np.moveaxis(
            np.concatenate([mosaic, np.expand_dims(dem[..., 0], -1)], -1), -1,
            0) for mosaic, dem in zip(mosaics, dems)
    ]

    mean_x = [np.mean(xi.reshape((len(xi), -1)), axis=-1) for xi in x]
    std_x = [np.std(xi.reshape((len(xi), -1)), axis=-1) for xi in x]

    norm_x = [(xi - meani.reshape((-1, 1, 1))) / stdi.reshape((-1, 1, 1))
              for xi, meani, stdi in zip(x, mean_x, std_x)]

    print('%s[%s] %sStarting cross-validation (leave-one-mosaic-out)'
          ' - %d mosaics%s' %
          (c['c'], time.strftime("%H:%M:%S"), c['g'], n_folds, c['nc']))
    training_start = time.time()
    for i, case in enumerate(cases):
        if verbose > 0:
            print('%s[%s]%s Starting training for mosaic %s %s(%d/%d)%s' %
                  (c['c'], time.strftime("%H:%M:%S"), c['g'], case, c['c'],
                   i + 1, len(cases), c['nc']))
        test_x = norm_x[i]

        train_y = y[:i] + y[i + 1:]
        train_x = norm_x[:i] + norm_x[i + 1:]

        val_split = 0.1
        batch_size = 32
        # patch_size = (256, 256)
        patch_size = (64, 64)
        # overlap = (64, 64)
        overlap = (32, 32)
        num_workers = 1

        model_name = '{:}.d{:}.unc.mosaic{:}.mdl'.format(net_name, ratio, case)
        net = Unet2D(n_inputs=len(norm_x[0]))

        try:
            net.load_model(os.path.join(d_path, model_name))
        except IOError:

            # Dataloader creation
            if verbose > 0:
                n_params = sum(p.numel() for p in net.parameters()
                               if p.requires_grad)
                print('%sStarting training with a Unet 2D%s (%d parameters)' %
                      (c['c'], c['nc'], n_params))

            if val_split > 0:
                n_samples = len(train_x)

                n_t_samples = int(n_samples * (1 - val_split))

                d_train = train_x[:n_t_samples]
                d_val = train_x[n_t_samples:]

                l_train = train_y[:n_t_samples]
                l_val = train_y[n_t_samples:]

                print('Training dataset (with validation)')
                # train_dataset = Cropping2DDataset(
                #     d_train, l_train, patch_size=patch_size, overlap=overlap,
                #     filtered=True
                # )
                train_dataset = CroppingDown2DDataset(d_train,
                                                      l_train,
                                                      patch_size=patch_size,
                                                      overlap=overlap,
                                                      filtered=True)

                print('Validation dataset (with validation)')
                # val_dataset = Cropping2DDataset(
                #     d_val, l_val, patch_size=patch_size, overlap=overlap,
                #     filtered=True
                # )
                val_dataset = CroppingDown2DDataset(d_val,
                                                    l_val,
                                                    patch_size=patch_size,
                                                    overlap=overlap,
                                                    filtered=True)
            else:
                print('Training dataset')
                train_dataset = Cropping2DDataset(train_x,
                                                  train_y,
                                                  patch_size=patch_size,
                                                  overlap=overlap,
                                                  filtered=True)

                print('Validation dataset')
                val_dataset = Cropping2DDataset(train_x,
                                                train_y,
                                                patch_size=patch_size,
                                                overlap=overlap)

            train_dataloader = DataLoader(train_dataset,
                                          batch_size,
                                          True,
                                          num_workers=num_workers)
            val_dataloader = DataLoader(val_dataset,
                                        batch_size,
                                        num_workers=num_workers)

            epochs = parse_inputs()['epochs']
            patience = parse_inputs()['patience']

            net.fit(
                train_dataloader,
                val_dataloader,
                epochs=epochs,
                patience=patience,
            )

            net.save_model(os.path.join(d_path, model_name))

        if verbose > 0:
            print('%s[%s]%s Starting testing with mosaic %s %s(%d/%d)%s' %
                  (c['c'], time.strftime("%H:%M:%S"), c['g'], case, c['c'],
                   i + 1, len(cases), c['nc']))

        downtest_x = imresize(
            test_x, (test_x.shape[0], ) +
            tuple([length // ratio for length in test_x.shape[1:]]))
        yi, unci = net.test([downtest_x], patch_size=None)

        upyi = imresize(yi[0], test_x.shape[1:])

        upunci = imresize(unci[0], test_x.shape[1:])

        cv2.imwrite(
            os.path.join(
                d_path,
                'pred.ds{:}.{:}_trees{:}.jpg'.format(ratio, dem_name, case)),
            (yi[0] * 255).astype(np.uint8))
        cv2.imwrite(
            os.path.join(
                d_path,
                'pred.d{:}.{:}_trees{:}.jpg'.format(ratio, dem_name, case)),
            (upyi * 255).astype(np.uint8))
        cv2.imwrite(
            os.path.join(
                d_path,
                'unc.ds{:}.{:}_trees{:}.jpg'.format(ratio, dem_name, case)),
            (unci[0] * 255).astype(np.uint8))
        cv2.imwrite(
            os.path.join(
                d_path,
                'unc.d{:}.{:}_trees{:}.jpg'.format(ratio, dem_name, case)),
            (upunci * 255).astype(np.uint8))

    if verbose > 0:
        time_str = time.strftime('%H hours %M minutes %S seconds',
                                 time.gmtime(time.time() - training_start))
        print('%sTraining finished%s (total time %s)\n' %
              (c['r'], c['nc'], time_str))
コード例 #28
0
        value.extend(dic[key])


if __name__ == '__main__':
    data_path = "data_split3"
    output_path = "data_split_ft_all3"
    num_folds = 10

    for k in range(num_folds):
        print(k)
        for _, dirs, _ in os.walk(data_path):
            fold_ft_sentences = {}
            fold_sentences = {}

            for sys_name in dirs:
                files = utils.find_file(f"*_{k}.json",
                                        os.path.join(data_path, sys_name))

                Path(os.path.join(output_path, sys_name)).mkdir(parents=True,
                                                                exist_ok=True)
                for file in files:
                    bugs = utils.read_json_line_by_line(file)
                    ft_sentences, sentences = get_bug_sentences(bugs, sys_name)

                    fold = os.path.basename(file)
                    if fold not in fold_ft_sentences:
                        fold_ft_sentences[fold] = {
                            "all": [],
                            "ob": [],
                            "eb": [],
                            "s2r": []
                        }
コード例 #29
0
ファイル: mayors.py プロジェクト: resistbot/people
def get_mayor_details(csv_fname):
    with open(csv_fname) as f:
        data = csv.DictReader(f)

        mayors_by_state = defaultdict(list)
        municipalities_by_state = defaultdict(list)

        for line in data:
            state = line["Postal Code"].lower()
            if state == "dc":
                continue
            # if state != state_to_import:
            #     continue
            city = line["City"].strip()
            given_name = line["Given Name"].strip()
            family_name = line["Family Name"].strip()
            name = f"{given_name} {family_name}"
            email = line["Email"].strip()
            source = line["Source"].strip()
            phone = reformat_phone_number(
                f"{line['Voice']} line['Phone Extension']")
            address = line["Address"].strip()
            zipcode = line["Zip Code"].strip()
            if not line["Term End"]:
                term_end = "2022-01-01"  # temporary term end date for the unknowns
            else:
                term_end = datetime.datetime.strptime(
                    line["Term End"], "%m/%d/%Y").strftime("%Y-%m-%d")

            if term_end < "2020-09-24":
                click.secho(f"skipping retired {name}, {term_end}",
                            fg="yellow")
                continue

            full_address = f"{address};{city}, {state.upper()} {zipcode}"

            contact = OrderedDict({"note": "Primary Office"})
            if full_address:
                contact["address"] = full_address
            if phone:
                contact["voice"] = phone

            jid = city_to_jurisdiction(city, state)

            existing, retired = get_existing_mayor(state, name)
            if existing:
                pid = existing["id"]
            else:
                pid = ocd_uuid("person")

            if retired:
                os.remove(find_file(existing["id"]))

            mayors_by_state[state].append(
                OrderedDict({
                    "id":
                    pid,
                    "name":
                    name,
                    "given_name":
                    given_name,
                    "family_name":
                    family_name,
                    "roles": [{
                        "jurisdiction": jid,
                        "type": "mayor",
                        "end_date": term_end
                    }],
                    "contact_details": [contact],
                    "sources": [{
                        "url": source
                    }] if source else [],
                    "links": [{
                        "url": source
                    }] if source else [],
                    "email":
                    email,
                }))

            municipalities_by_state[state].append(
                OrderedDict({
                    "name": city,
                    "id": jid
                }))

    return mayors_by_state, municipalities_by_state
コード例 #30
0
ファイル: test_check.py プロジェクト: xluffy-fork/edeploy
def load_samples(bench_values):
    for health in utils.find_file('tools/cardiff/sample', '*.hw_'):
        bench_values.append(eval(open(health).read()))
コード例 #31
0
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine
from tfy_db_credentials import tfy_db_url
# from database import SessionMaker

app = Flask(__name__)
app.secret_key = 'development key'
app.jinja_env.add_extension('jinja2.ext.loopcontrols')

############## MAIL ####################
mail = Mail()
app.config["MAIL_SERVER"] = "smtp.gmail.com"
app.config["MAIL_PORT"] = 465
app.config["MAIL_USE_SSL"] = True

with open(utils.find_file(), 'r') as fp:
    d = json.load(fp)
    app.config["MAIL_USERNAME"] = d['email']
    app.config["MAIL_PASSWORD"] = d['password']

mail.init_app(app)
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
############## MAIL ####################


@login_manager.user_loader
def load_user(user_id):
    engine = create_engine(tfy_db_url, convert_unicode=True)
    SessionMaker = sessionmaker(bind=engine)
    session = SessionMaker()
コード例 #32
0
 def _find_chrome_bin(self):
     return find_file(self.path, "chromedriver").pop(0)
コード例 #33
0
ファイル: cardiff.py プロジェクト: dsavineau/edeploy
def main(argv):
    pattern = ''
    ignore_list = ''
    detail = {'category': '', 'group': '', 'item': ''}
    try:
        opts, args = getopt.getopt(argv[1:], "hp:l:g:c:i:I:", ['pattern', 'log-level', 'group', 'category', 'item', "ignore"])
    except getopt.GetoptError:
        print "Error: One of the options passed to the cmdline was not supported"
        print "Please fix your command line or read the help (-h option)"
        sys.exit(2)

    utils.print_level = int(utils.Levels.SUMMARY)

    for opt, arg in opts:
        if opt in ("-h", "--help"):
            print_help()
            sys.exit(0)
        elif opt in ("-p", "--pattern"):
            pattern = arg
            pattern = pattern.replace('\\', '')
        elif opt in ("-l", "--log-level"):
            if "list" in arg:
                print_help()
                sys.exit(2)
            utils.print_level = 0
            if utils.Levels.message[utils.Levels.INFO] in arg:
                utils.print_level |= int(utils.Levels.INFO)
            if utils.Levels.message[utils.Levels.WARNING] in arg:
                utils.print_level |= int(utils.Levels.WARNING)
            if utils.Levels.message[utils.Levels.ERROR] in arg:
                utils.print_level |= int(utils.Levels.ERROR)
            if utils.Levels.message[utils.Levels.SUMMARY] in arg:
                utils.print_level |= int(utils.Levels.SUMMARY)
            if utils.Levels.message[utils.Levels.DETAIL] in arg:
                utils.print_level |= int(utils.Levels.DETAIL)
            if utils.print_level == 0:
                print "Error: The log level specified is not part of the supported list !"
                print "Please check the usage of this tool and retry."
                sys.exit(2)
        elif opt in ("-g", "--group"):
            detail['group'] = arg
        elif opt in ("-c", "--category"):
            detail['category'] = arg
        elif opt in ("-i", "--item"):
            detail['item'] = arg
        elif opt in ("-I", "--ignore"):
            ignore_list = arg

    if ((utils.print_level & utils.Levels.DETAIL) == utils.Levels.DETAIL):
        if (len(detail['group']) == 0) or (len(detail['category']) == 0) or (len(detail['item']) == 0):
            print "Error: The DETAIL output requires group, category & item options to be set"
            sys.exit(2)

    if not pattern:
        print "Error: Pattern option is mandatory"
        print_help()
        sys.exit(2)

    # Extracting regex and path
    path = os.path.dirname(pattern)
    if not path:
        path = "."
    else:
        pattern = os.path.basename(pattern)

    if not os.path.isdir(path):
        print "Error: the path %s doesn't exists !" % path
        sys.exit(2)

    health_data_file = utils.find_file(path, pattern)
    if len(health_data_file) == 0:
        print "No log file found with pattern %s!" % pattern
        sys.exit(1)
    else:
        print "%d files Selected with pattern '%s'" % (len(health_data_file), pattern)

    # Extract data from the hw files
    bench_values = []
    for health in health_data_file:
        bench_values.append(eval(open(health).read()))

    # Extracting the host list from the data to get
    # the initial list of hosts. We have here a single group with all the servers
    systems_groups = []
    systems_groups.append(utils.get_hosts_list(bench_values))

    # Let's create groups of similar servers
    group_systems(bench_values, systems_groups, ignore_list)
    compare_sets.print_systems_groups(systems_groups)

    # It's time to compare performance in each group
    compare_performance(bench_values, systems_groups, detail)