コード例 #1
0
def compute_elo_by_goals(data_df, players, all_teams, elo, initial_score=100):
    """
    This function is used to compute the elo ratings of teams based on goals scored depending on wins and losses.

    :param data_df:
    :param players:
    :param elo:
    :return:
    """
    n_games = data_df.shape[0]
    elo_table = np.zeros((n_games, len(players)))
    # set initial score for all teams
    elo_table[0, :] = initial_score
    bar = progressbar.ProgressBar(widgets=[
        ' [',
        progressbar.Timer(),
        '] ',
        progressbar.Bar(),
        ' (',
        progressbar.ETA(),
        ') ',
    ])
    for i in bar(range(n_games)):

        match = data_df.iloc[i]
        player_home = match.home_team
        player_away = match.away_team
        hid = np.where(all_teams == player_home)[0][0]
        aid = np.where(all_teams == player_away)[0][0]
        pair = [players[hid], players[aid]]
        res = match.result_final
        home_goals = match.home_goals
        away_goals = match.away_goals

        if res == 0:
            for goal_difference in range(int(abs(home_goals - away_goals))):
                a, b = elo.match_algo_strict(pair[0], pair[1])
            elo_table[i, hid] = a.score
            elo_table[i, aid] = b.score
        elif res == 2:
            for goal_difference in range(int(abs(home_goals - away_goals))):
                a, b = elo.match(pair[1], pair[0])
            elo_table[i, aid] = a.score
            elo_table[i, hid] = b.score
        else:
            pass

    # make continuous
    bar = progressbar.ProgressBar(widgets=[
        ' [',
        progressbar.Timer(),
        '] ',
        progressbar.Bar(),
        ' (',
        progressbar.ETA(),
        ') ',
    ])
    print('[C] Making continuous')
    for p in bar(range(len(players))):

        for g in range(n_games):
            if elo_table[g, p] == 0:
                elo_table[g, p] = elo_table[g - 1, p]
            else:
                pass
    return elo_table
コード例 #2
0
def train_model(model, encoder_frnn, encoder_rrnn, decoder_rnn, train_lemmas,
                train_feat_dicts, train_words, dev_lemmas, dev_feat_dicts,
                dev_words, alphabet_index, inverse_alphabet_index, epochs,
                optimization, results_file_path, morph_index,
                train_aligned_pairs, dev_aligned_pairs, feat_index,
                feature_types, plot):
    print 'training...'

    np.random.seed(17)
    random.seed(17)

    if optimization == 'ADAM':
        trainer = pc.AdamTrainer(model,
                                 lam=REGULARIZATION,
                                 alpha=LEARNING_RATE,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 eps=1e-8)
    elif optimization == 'MOMENTUM':
        trainer = pc.MomentumSGDTrainer(model)
    elif optimization == 'SGD':
        trainer = pc.SimpleSGDTrainer(model)
    elif optimization == 'ADAGRAD':
        trainer = pc.AdagradTrainer(model)
    elif optimization == 'ADADELTA':
        trainer = pc.AdadeltaTrainer(model)
    else:
        trainer = pc.SimpleSGDTrainer(model)

    total_loss = 0
    best_avg_dev_loss = 999
    best_dev_accuracy = -1
    best_train_accuracy = -1
    patience = 0
    train_len = len(train_words)
    epochs_x = []
    train_loss_y = []
    dev_loss_y = []
    train_accuracy_y = []
    dev_accuracy_y = []
    e = -1

    # progress bar init
    widgets = [progressbar.Bar('>'), ' ', progressbar.ETA()]
    train_progress_bar = progressbar.ProgressBar(widgets=widgets,
                                                 maxval=epochs).start()
    avg_loss = -1

    for e in xrange(epochs):

        # randomize the training set
        indices = range(train_len)
        random.shuffle(indices)
        train_set = zip(train_lemmas, train_feat_dicts, train_words,
                        train_aligned_pairs)
        train_set = [train_set[i] for i in indices]

        # compute loss for each example and update
        for i, example in enumerate(train_set):
            lemma, feats, word, alignment = example
            loss = one_word_loss(model, encoder_frnn, encoder_rrnn,
                                 decoder_rnn, lemma, feats, word,
                                 alphabet_index, alignment, feat_index,
                                 feature_types)
            loss_value = loss.value()
            total_loss += loss_value
            loss.backward()
            trainer.update()
            if i > 0:
                avg_loss = total_loss / float(i + e * train_len)
            else:
                avg_loss = total_loss

        if EARLY_STOPPING:

            # get train accuracy
            train_predictions = predict_templates(
                model, decoder_rnn, encoder_frnn, encoder_rrnn, alphabet_index,
                inverse_alphabet_index, train_lemmas, train_feat_dicts,
                feat_index, feature_types)
            print 'evaluating on train...'
            train_accuracy = evaluate_model(train_predictions,
                                            train_lemmas,
                                            train_feat_dicts,
                                            train_words,
                                            feature_types,
                                            print_results=False)[1]

            if train_accuracy > best_train_accuracy:
                best_train_accuracy = train_accuracy

            dev_accuracy = 0
            avg_dev_loss = 0

            if len(dev_lemmas) > 0:

                # get dev accuracy
                dev_predictions = predict_templates(model, decoder_rnn,
                                                    encoder_frnn, encoder_rrnn,
                                                    alphabet_index,
                                                    inverse_alphabet_index,
                                                    dev_lemmas, dev_feat_dicts,
                                                    feat_index, feature_types)
                print 'evaluating on dev...'
                # get dev accuracy
                dev_accuracy = evaluate_model(dev_predictions,
                                              dev_lemmas,
                                              dev_feat_dicts,
                                              dev_words,
                                              feature_types,
                                              print_results=False)[1]

                if dev_accuracy > best_dev_accuracy:
                    best_dev_accuracy = dev_accuracy

                    # save best model to disk
                    save_pycnn_model(model, results_file_path, morph_index)
                    print 'saved new best model'
                    patience = 0
                else:
                    patience += 1

                # found "perfect" model
                if dev_accuracy == 1:
                    train_progress_bar.finish()
                    if plot:
                        plt.cla()
                    return model, e

                # get dev loss
                total_dev_loss = 0
                for i in xrange(len(dev_lemmas)):
                    total_dev_loss += one_word_loss(
                        model, encoder_frnn, encoder_rrnn, decoder_rnn,
                        dev_lemmas[i], dev_feat_dicts[i], dev_words[i],
                        alphabet_index, dev_aligned_pairs[i], feat_index,
                        feature_types).value()

                avg_dev_loss = total_dev_loss / float(len(dev_lemmas))
                if avg_dev_loss < best_avg_dev_loss:
                    best_avg_dev_loss = avg_dev_loss

                print 'epoch: {0} train loss: {1:.4f} dev loss: {2:.4f} dev accuracy: {3:.4f} train accuracy = {4:.4f} \
 best dev accuracy {5:.4f} best train accuracy: {6:.4f} patience = {7}'.format(
                    e, avg_loss, avg_dev_loss, dev_accuracy, train_accuracy,
                    best_dev_accuracy, best_train_accuracy, patience)

                if patience == MAX_PATIENCE:
                    print 'out of patience after {0} epochs'.format(str(e))
                    # TODO: would like to return best model but pycnn has a bug with save and load. Maybe copy via code?
                    # return best_model[0]
                    train_progress_bar.finish()
                    if plot:
                        plt.cla()
                    return model, e
            else:

                # if no dev set is present, optimize on train set
                print 'no dev set for early stopping, running all epochs until perfectly fitting or patience was \
                reached on the train set'

                if train_accuracy > best_train_accuracy:
                    best_train_accuracy = train_accuracy

                    # save best model to disk
                    save_pycnn_model(model, results_file_path, morph_index)
                    print 'saved new best model'
                    patience = 0
                else:
                    patience += 1

                print 'epoch: {0} train loss: {1:.4f} train accuracy = {2:.4f} best train accuracy: {3:.4f} \
                patience = {4}'.format(e, avg_loss, train_accuracy,
                                       best_train_accuracy, patience)

                # found "perfect" model on train set or patience has reached
                if train_accuracy == 1 or patience == MAX_PATIENCE:
                    train_progress_bar.finish()
                    if plot:
                        plt.cla()
                    return model, e

            # update lists for plotting
            train_accuracy_y.append(train_accuracy)
            epochs_x.append(e)
            train_loss_y.append(avg_loss)
            dev_loss_y.append(avg_dev_loss)
            dev_accuracy_y.append(dev_accuracy)

        # finished epoch
        train_progress_bar.update(e)
        if plot:
            with plt.style.context('fivethirtyeight'):
                p1, = plt.plot(epochs_x, dev_loss_y, label='dev loss')
                p2, = plt.plot(epochs_x, train_loss_y, label='train loss')
                p3, = plt.plot(epochs_x, dev_accuracy_y, label='dev acc.')
                p4, = plt.plot(epochs_x, train_accuracy_y, label='train acc.')
                plt.legend(loc='upper left', handles=[p1, p2, p3, p4])
            plt.savefig(results_file_path + '_' + morph_index + '.png')
    train_progress_bar.finish()
    if plot:
        plt.cla()
    print 'finished training. average loss: ' + str(avg_loss)
    return model, e
コード例 #3
0
def calculate_score(means_path,
                    label_encoder_path,
                    best_weight_path,
                    test_hdf5_path,
                    cross_val=None,
                    preds_cross=None,
                    labels_cross=None,
                    is_mapped=False):
    # load RGB means for training set
    means = json.loads(open(means_path).read())

    # load LabelEncoder
    le = pickle.loads(open(label_encoder_path, 'rb').read())

    # initialize image preprocessors
    sp, mp, cp, iap = SimplePreprocessor(
        config.IMAGE_SIZE, config.IMAGE_SIZE), MeanPreprocessor(
            means['R'], means['G'], means['B']), CropPreprocessor(
                config.IMAGE_SIZE,
                config.IMAGE_SIZE), ImageToArrayPreprocessor()

    custom_objects = None
    agh = AgeGenderHelper(config, deploy)
    if config.DATASET_TYPE == 'age':
        one_off_mappings = agh.build_oneoff_mappings(le)
        one_off = OneOffAccuracy(one_off_mappings)
        custom_objects = {'one_off_accuracy': one_off.one_off_accuracy}

    # load model
    print(f'[INFO] loading {best_weight_path}...')
    model = load_model(best_weight_path, custom_objects=custom_objects)

    # initialize testing dataset generator, then predict
    if cross_val is None:
        print(
            f'[INFO] predicting in testing data (no crops){config.SALIENCY_INFO}...'
        )
    else:
        print(
            f'[INFO] predicting in testing data (no crops) for cross validation {cross_val}{config.SALIENCY_INFO}...'
        )

    test_gen = HDF5DatasetGenerator(test_hdf5_path,
                                    batch_size=config.BATCH_SIZE,
                                    preprocessors=[sp, mp, iap],
                                    classes=config.NUM_CLASSES)
    preds = model.predict_generator(test_gen.generator(),
                                    steps=test_gen.num_images //
                                    config.BATCH_SIZE)

    # compute rank-1 and one-off accuracies
    labels = to_categorical(
        test_gen.db['labels'][0:config.BATCH_SIZE *
                              (test_gen.num_images // config.BATCH_SIZE)],
        num_classes=config.NUM_CLASSES)
    preds_mapped = preds.argmax(axis=1)

    if is_mapped == True:
        preds_mapped = agh.build_mapping_to_iog_labels()[preds_mapped]

    if cross_val is None:
        print(
            '[INFO] serializing all images classified incorrectly for testing dataset...'
        )
        prefix_path = os.path.sep.join(
            [config.WRONG_BASE, config.DATASET_TYPE])

        agh.plot_confusion_matrix_from_data(config,
                                            labels.argmax(axis=1),
                                            preds_mapped,
                                            le=le,
                                            save_path=os.path.sep.join([
                                                config.OUTPUT_BASE,
                                                f'cm_{config.DATASET_TYPE}.png'
                                            ]))
    else:
        print(
            f'[INFO] serializing all images classified incorrectly for cross validation {cross_val} of testing dataset...'
        )
        prefix_path = os.path.sep.join(
            [config.WRONG_BASE, f'Cross{cross_val}', config.DATASET_TYPE])

        preds_cross.extend(preds_mapped.tolist())
        labels_cross.extend(labels.argmax(axis=1).tolist())

    if os.path.exists(prefix_path):
        shutil.rmtree(prefix_path)
    os.makedirs(prefix_path)

    for i, (pred, label) in enumerate(zip(preds_mapped,
                                          labels.argmax(axis=1))):
        if pred != label:
            image = test_gen.db['images'][i]

            if config.DATASET_TYPE == 'age':
                real_label, real_pred = le.classes_[label], le.classes_[pred]
                real_label = real_label.replace('_', '-')
                real_label = real_label.replace('-inf', '+')

                real_pred = real_pred.replace('_', '-')
                real_pred = real_pred.replace('-inf', '+')

            elif config.DATASET_TYPE == 'gender':
                real_label = 'Male' if label == 0 else 'Female'
                real_pred = 'Male' if pred == 0 else 'Female'

            cv2.putText(image, f'Actual: {real_label}, Predict: {real_pred}',
                        (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0),
                        2)

            cv2.imwrite(os.path.sep.join([prefix_path, f'{i:05d}.jpg']), image)

    score = accuracy_score(labels.argmax(axis=1), preds_mapped)
    print(f'[INFO] rank-1: {score:.4f}')
    score_one_off = None
    if config.DATASET_TYPE == 'age':
        score_one_off = one_off.one_off_compute(
            labels, to_categorical(preds_mapped,
                                   num_classes=config.NUM_CLASSES))
        print(f'[INFO] one-off: {score_one_off:.4f}')
    test_gen.close()

    # re-initialize testing generator, now excluding SimplePreprocessor
    test_gen = HDF5DatasetGenerator(test_hdf5_path,
                                    config.BATCH_SIZE,
                                    preprocessors=[mp],
                                    classes=config.NUM_CLASSES)
    preds = []

    labels = to_categorical(test_gen.db['labels'],
                            num_classes=config.NUM_CLASSES)

    print('[INFO] predicting in testing data (with crops)...')
    # initialize progress bar
    widgets = [
        'Evaluating: ',
        progressbar.Percentage(), ' ',
        progressbar.Bar(), ' ',
        progressbar.ETA()
    ]
    pbar = progressbar.ProgressBar(maxval=math.ceil(test_gen.num_images /
                                                    config.BATCH_SIZE),
                                   widgets=widgets).start()

    for i, (images, _) in enumerate(test_gen.generator(passes=1)):
        for image in images:
            crops = cp.preprocess(image)
            crops = np.array([iap.preprocess(c) for c in crops])

            pred = model.predict(crops)
            preds.append(pred.mean(axis=0))

        pbar.update(i)

    pbar.finish()
    test_gen.close()

    # compute rank-1 accuracy
    preds_mapped = np.argmax(preds, axis=1)
    if is_mapped == True:
        preds_mapped = agh.build_mapping_to_iog_labels()[preds_mapped]

    score_crops = accuracy_score(labels.argmax(axis=1), preds_mapped)
    print(f'[INFO] rank-1: {score_crops:.4f}')
    score_one_off_crops = None
    if config.DATASET_TYPE == 'age':
        score_one_off_crops = one_off.one_off_compute(
            labels, to_categorical(preds_mapped,
                                   num_classes=config.NUM_CLASSES))
        print(f'[INFO] one-off: {score_one_off_crops:.4f}')

    return score, score_one_off, score_crops, score_one_off_crops
コード例 #4
0
datasets = [
    ('train', train_paths, train_labels, config.TRAIN_HDF5),
    ('val', val_paths, val_labels, config.VAL_HDF5),
    ('test', test_paths, test_labels, config.TEST_HDF5)
]

R = []
G = []
B = []

for dataset_type, paths, labels, output_path in datasets:
    print(f'[INFO] Building {output_path}...')
    writer = HDF5DatasetWriter((len(paths), 64, 64, 3), output_path)

    widgets = ['Building dataset: ', progressbar.Percentage(), ' ', progressbar.Bar(), progressbar.ETA()]
    progress_bar = progressbar.ProgressBar(maxval=len(paths), widgets=widgets).start()

    for i, (path, label) in enumerate(zip(paths, labels)):
        image = cv2.imread(path)

        if dataset_type == 'train':
            (b, g, r) = cv2.mean(image)[:3]
            R.append(r)
            G.append(g)
            B.append(b)

        writer.add([image], [label])
        progress_bar.update(i)

    progress_bar.finish()
コード例 #5
0
ファイル: MGGenerate.py プロジェクト: loftwah/midiGenerator
    def generate_fom_data(self, nb_seeds=10, new_data_path=None, length=None, new_save_path=None, save_images=False,
                          no_duration=False, verbose=1):
        """
        Generate Midi file from the seed and the trained model
        :param nb_seeds: number of seeds for the generation
        :param new_data_path: The path of the seed
        :param length: Length of th generation
        :param new_save_path:
        :param save_images: To save the pianoroll of the generation (.jpg images)
        :param no_duration: if True : all notes will be the shortest length possible
        :param verbose: Level of verbose
        :return:
        """
        # ---------- Verify the inputs ----------

        # ----- Create the seed -----
        if (new_data_path is not None) and (new_data_path != self.data_transformed_path.as_posix()):
            self.load_data(new_data_path)
        if self.data_transformed_path is None:
            raise Exception('Some data need to be loaded before generating')
        self.sequence = Sequences.AllInstSequence(
            path=str(self.data_transformed_path),
            nb_steps=self.nb_steps,
            batch_size=1,
            work_on=self.work_on)
        nb_instruments = self.sequence.nb_instruments

        seeds_indexes = random.sample(range(len(self.sequence)), nb_seeds)

        # -- Length --
        length = length if length is not None else 20
        # -- For save Midi path --
        if type(new_save_path) is str or (
                type(new_save_path) is bool and new_save_path) or (
                new_save_path is None and self.save_midis_path is None):
            self.get_new_save_midis_path(path=new_save_path)
        # --- Done Verifying the inputs ---
        mask = self.get_mask(nb_instruments)

        self.save_midis_path.mkdir(parents=True, exist_ok=True)
        cprint('Start generating from data ...', 'blue')
        for s in range(nb_seeds):
            cprint('Generation {0}/{1}'.format(s + 1, nb_seeds), 'blue')
            generated = np.array(
                self.sequence[seeds_indexes[s]][0])  # (nb_instruments, 1, nb_steps, step_size, inputs_size, 2)
            bar = progressbar.ProgressBar(maxval=length,
                                          widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage(), ' ',
                                                   progressbar.ETA()])
            bar.start()  # To see it working
            for l in range(length):
                samples = generated[:, :, l:]  # (nb_instruments, 1, nb_steps, length, 88, 2)   # 1 = batch
                # expanded_samples = np.expand_dims(samples, axis=0)
                preds = self.keras_nn.generate(
                    input=list(samples) + mask)  # (nb_instruments, batch=1 , nb_steps=1, length, 88, 2)
                preds = np.asarray(preds).astype('float64')  # (nb_instruments, 1, 1, step_size, input_size, 2)
                if len(preds.shape) == 4:  # Only one instrument : output of nn not a list
                    preds = preds[np.newaxis]
                next_array = midi.create.normalize_activation(preds)  # Normalize the activation part
                generated = np.concatenate((generated, next_array), axis=2)  # (nb_instruments, nb_steps, length, 88, 2)

                bar.update(l + 1)
            bar.finish()

            self.ensure_save_midis_path()

            generated_midi_final = self.reshape_generated_array(generated)
            self.compute_generated_array(
                generated_array=generated_midi_final,
                file_name=self.save_midis_path / f'generated_{s}',
                no_duration=no_duration,
                verbose=verbose,
                save_images=save_images
            )

        if self.batch is not None:
            self.sequence.change_batch_size(self.batch)

        summary.summarize_generation(str(self.save_midis_path), **{
            'full_name': self.full_name,
            'epochs': self.total_epochs,
            'input_param': self.input_param,
            'instruments': self.instruments,
            'notes_range': self.notes_range
        })

        cprint('Done generating', 'green')
コード例 #6
0
    def _need_update(self):
        cur_val = self.get_current_value()
        if cur_val < 100:
            return cur_val - self.last_update > 10
        elif cur_val < 500:
            return cur_val - self.last_update > 50
        else:
            return cur_val - self.last_update > 100


default_widgets = [
    progressbar.Percentage(), ' ',
    progressbar.Bar(marker='=', left='[', right=']'), ' ',
    progressbar.CounterWidget(), ' ',
    progressbar.GenericSpeed(format='%.2ft/s'), ' ',
    progressbar.ETA(prefix='eta ')
]


class ProgressBarObserver(progressbar.ProgressBar, Observer):
    """Display progress through a progressbar.
    """

    # the progress bar is only updated in increments of this for performance
    UPDATE_INTERVAL = 25

    def __init__(self,
                 widgets=default_widgets,
                 term_width=None,
                 fd=sys.stderr):
        super(ProgressBarObserver, self).__init__(widgets=widgets,
コード例 #7
0
# from disk
print("[INFO] loading trained models...")
faceNet = cv2.dnn.readNet(config.FACE_PROTOTXT, config.FACE_WEIGHTS)
ageNet = cv2.dnn.readNet(config.AGE_PROTOTXT, config.AGE_WEIGHTS)
camoNet = load_model(config.CAMO_MODEL)

# grab the paths to all images in our dataset
imagePaths = sorted(list(paths.list_images(args["dataset"])))
print("[INFO] processing {} images".format(len(imagePaths)))

# initialize the progress bar
widgets = [
    "Processing Images: ",
    progressbar.Percentage(), " ",
    progressbar.Bar(), " ",
    progressbar.ETA()
]
pbar = progressbar.ProgressBar(maxval=len(imagePaths), widgets=widgets).start()

# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
    # load the image from disk
    image = cv2.imread(imagePath)

    # if the image is 'None', then it could not be properly read from
    # disk (so we should just skip it)
    if image is None:
        continue

    # detect all faces in the input image and then predict their
    # perceived age based on the face ROI
コード例 #8
0
def main():
    '''
    Parse command line arguments and execute the code
    '''
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset_path', required=True, type=str)
    parser.add_argument('--newext', default='.PNG', type=str)
    parser.add_argument('--oldext', default='.JPEG', type=str)
    args = parser.parse_args()

    start = time.time()

    image_list= Utils_Image.get_Image_List(args.dataset_path, args.oldext)

    progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()])

    print "Start Processing... May take a while..."

    for image_path in progress(image_list):
        Utils_Image.resizeImage(image_path)
        Utils_Image.change_extension(image_path,args.oldext,args.newext)
 
    end = time.time()
    print("Parsed: %d Image of the Dataset"%(len(image_list)))
    print("Elapsed Time:%d Seconds"%(end-start))
    print("Running Completed with Success!!!")
コード例 #9
0
def runSteppedTestCase(
    testcase,
    name,
    keep_cuda_code=False,
    printTimings=True,
    showProgress=True,
    solver=None,
):
    params = getTestCaseParams(testcase, keep_cuda_code=keep_cuda_code)

    epsilon = 1e-6
    bar = None
    result = {
        "testcase": testcase,
        "name": name,
        "field_states": None,
        "runtime": 0.0,
        "error": None,
    }

    try:
        if solver is None:
            solver = coss.CUDAODESystemSolver(
                testcase.num_nodes,
                testcase.ode,
                init_field_parameters=testcase.field_parameter_values,
                params=params,
            )

        field_states_fn = testcase.field_states_fn
        if field_states_fn is None:
            field_states_fn = lambda _: None

        if testcase.field_states is not None and testcase.field_states != [""]:
            field_states = np.zeros(
                testcase.num_nodes * len(testcase.field_states),
                dtype=getFloatPrecisionDtypeStr(testcase.double),
            )
            solver.get_field_states(field_states)

            field_states_fn(field_states)
        else:
            field_states = None

            field_states_fn(solver.field_states)

        t = testcase.t0
        dt = testcase.dt * testcase.ode_substeps
        tstop = testcase.tstop

        if progressbar is not None and showProgress:
            bar = progressbar.ProgressBar(
                maxval=tstop + epsilon,
                widgets=[
                    progressbar.Bar("=", "[", "]"),
                    " ",
                    progressbar.Percentage(),
                    " ",
                    progressbar.ETA(),
                ],
            )

        while t < tstop + epsilon:
            if testcase.update_field_states:
                solver.set_field_states(field_states)
            solver.forward(
                t,
                dt,
                update_simulation_runtimes=True,
                update_host_states=testcase.update_host_states,
            )
            if testcase.update_field_states:
                solver.get_field_states(field_states)
                if field_states is not None:
                    field_states_fn(field_states)
                else:
                    field_states_fn(solver.field_states)

            if progressbar is not None and showProgress:
                bar.update(t)

            t += dt

        if progressbar is not None and showProgress:
            bar.finish()

        if printTimings:
            list_timings()
            clear_timings()

        solver.get_field_states(field_states)

        result["field_states"] = field_states
        result["runtime"] = solver.simulation_runtime

        solver.reset()

        print("Completed test '{0}' in {1:.2f}s".format(
            name, result["runtime"]))
    except Exception:
        if bar is not None:
            bar.finish()
        f = StringIO()
        traceback.print_exc(file=f)
        f.read()
        result["error"] = f.buf
        f.close()
        print("FAILED test '{0}'.".format(name))

    print()
    return result
コード例 #10
0
def run_mnist(use_tensorboard=False, use_trace=False, arithmetic_type='wf'):
    ''' Run MNIST using the Wave custom matmul operator. 
    '''

    tf.reset_default_graph()

    waveflow.waveflow_arithmetic = arithmetic_type

    # Import data
    mnist = input_data.read_data_sets('./mnist_data')

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    xx = tf.zeros([784, 10])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))

    y_l1 = tf.matmul(x, W)
    y = tf.nn.bias_add(y_l1, b)

    # Define loss and optimizer
    y_ = tf.placeholder(tf.int64, [None])

    # The raw formulation of cross-entropy,
    #
    #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
    #                                 reduction_indices=[1]))
    #
    # can be numerically unstable.
    #
    # So here we use tf.losses.sparse_softmax_cross_entropy on the raw
    # outputs of 'y', and then average across the batch.
    with tf.name_scope('loss'):
        cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_,
                                                               logits=y)
        # cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)), reduction_indices=[1])
        avg_entropy = tf.reduce_mean(cross_entropy)
        train_step = tf.train.GradientDescentOptimizer(0.5).minimize(
            avg_entropy)
        if use_tensorboard:
            tf.summary.scalar("avg_loss", avg_entropy, family='Accuracy')

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y, 1), y_)
        correct_prediction = tf.cast(correct_prediction, tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)
        if use_tensorboard:
            tf.summary.scalar("accuracy", accuracy, family='Accuracy')

    tb_log = waveflow.TF_TBLogger(log_dir='./tb_mnist_log/',
                                  enable_tb=use_tensorboard,
                                  enable_trace=use_trace,
                                  unified_trace=use_trace,
                                  arith_type=arithmetic_type)

    total_batches = 100

    widgets = ["Training: ", pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
    pbar = pb.ProgressBar(widgets=widgets, maxval=total_batches)
    pbar.start()

    # for shape debugging
    # shape_op = tf.shape(y)

    print("Got op list: %s" % (waveflow.op_list(tf.get_default_graph())))
    # dev = '/device:GPU:0'
    dev = '/cpu:0'

    with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
        with sess.graph.device(dev):
            print("Running model ...")
            tb_log.init_session(sess)

            tf.global_variables_initializer().run()

            # Train
            for i in range(total_batches):
                pbar.update(i)
                batch_xs, batch_ys = mnist.train.next_batch(100)

                tb_log.run_session(iter=i,
                                   ops=train_step,
                                   feed={
                                       x: batch_xs,
                                       y_: batch_ys
                                   })

            pbar.finish()
            tb_log.close()

            # Compute training set accuracy
            print("Evaluating full training set ...")
            sys.stdout.flush()
            train_accuracy = sess.run(accuracy,
                                      feed_dict={
                                          x: mnist.train.images,
                                          y_: mnist.train.labels
                                      })
            print("Train set accuracy: %s" % (train_accuracy))

            # Compute test set accuracy
            print("Evaluating full test set ...")
            sys.stdout.flush()
            test_accuracy = sess.run(accuracy,
                                     feed_dict={
                                         x: mnist.test.images,
                                         y_: mnist.test.labels
                                     })
            print("Test set accuracy: %s" % (test_accuracy))

    return test_accuracy
コード例 #11
0
import datetime

import keras
import numpy as np
import os
import progressbar
from collections import defaultdict

from settings import FLOAT_TYPE, COMMENT_DELIMITER, PARAMETERS_FILE_NAME_SUFFIX

widgets_progressbar = [
    ' [', progressbar.Timer(), '] ',
    progressbar.Bar("░", fill="⋅"),
    ' (', progressbar.ETA(), ') ',
]


def get_parameters_dict(job_folder):
    """

    :param job_folder:
    :return:
    """
    parameters = defaultdict(str)

    try:
        parameters_file = list(filter(lambda file: PARAMETERS_FILE_NAME_SUFFIX in file, os.listdir(job_folder)))[0]
        with open(os.path.join(job_folder, parameters_file), "r") as f:
            lines = f.readlines()
            for line in lines:
                words = line.replace("\n", "").split("=")
コード例 #12
0
    def update_embedding(self,
                         session,
                         batch_size,
                         model_path,
                         loaded_emb=None,
                         loaded_obj_bbs=None):

        # model_name = os.path.basename(model_path).split('.')[0]
        model_name = self._get_codebook_name(model_path)

        self._dataset._kw['model_path'] = list([str(model_path)])
        self._dataset._kw[
            'model'] = 'cad' if 'cad' in model_path else self._dataset._kw[
                'model']
        self._dataset._kw[
            'model'] = 'reconst' if 'reconst' in model_path else self._dataset._kw[
                'model']

        if loaded_emb is None:
            embedding_size = self._dataset.embedding_size
            J = self._encoder.latent_space_size
            embedding_z = np.empty((embedding_size, J))
            obj_bbs = np.empty((embedding_size, 4))
            widgets = [
                'Creating embedding: ',
                progressbar.Percentage(), ' ',
                progressbar.Bar(), ' ',
                progressbar.Counter(),
                ' / %s' % embedding_size, ' ',
                progressbar.ETA(), ' '
            ]
            bar = progressbar.ProgressBar(maxval=embedding_size,
                                          widgets=widgets)
            bar.start()
            for a, e in u.batch_iteration_indices(embedding_size, batch_size):

                batch, obj_bbs_batch = self._dataset.render_embedding_image_batch(
                    a, e)
                # import cv2
                # cv2.imshow('',u.tiles(batch,10,10))
                # cv2.waitKey(0)
                embedding_z[a:e] = session.run(
                    self._encoder.z, feed_dict={self._encoder.x: batch})

                if self.embed_bb:
                    obj_bbs[a:e] = obj_bbs_batch

                bar.update(e)
            bar.finish()
            # embedding_z = embedding_z.T
            normalized_embedding = embedding_z / np.linalg.norm(
                embedding_z, axis=1, keepdims=True)
        else:
            normalized_embedding = loaded_emb
            obj_bbs = loaded_obj_bbs

        session.run(self.embedding_assign_op[model_name],
                    {self.embedding: normalized_embedding})

        if self.embed_bb:
            session.run(self.embed_obj_bbs_assign_op[model_name],
                        {self.embed_obj_bbs: obj_bbs})
コード例 #13
0
def run_conv_mnist(use_tensorboard=False, use_trace=False, arithmetic_type='tf'):
    ''' Run MNIST using the Wave custom matmul operator. 
    '''
    waveflow.waveflow_arithmetic = arithmetic_type

    tf.reset_default_graph()
    # Import data
    mnist = input_data.read_data_sets('./mnist_data')

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])

    # Define loss and optimizer
    y_ = tf.placeholder(tf.int64, [None])

    # Build the graph for the deep net
    y_conv, keep_prob = deepnn(x)


    # The raw formulation of cross-entropy,
    #
    #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
    #                                 reduction_indices=[1]))
    #
    # can be numerically unstable.
    #
    # So here we use tf.losses.sparse_softmax_cross_entropy on the raw
    # outputs of 'y', and then average across the batch.
    with tf.name_scope('loss'):
        cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y_conv)
        avg_entropy = tf.reduce_mean(cross_entropy)
        if use_tensorboard: tf.summary.scalar("avg_loss", avg_entropy, family='Accuracy')

    # train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
    with tf.name_scope('adam_optimizer'):
        optimizer = tf.train.AdamOptimizer(1e-4)
        train_step = optimizer.minimize(avg_entropy)
        # tf.summary.scalar("learning_rate", optimizer._lr_t)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_)
        correct_prediction = tf.cast(correct_prediction, tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)
        if use_tensorboard: tf.summary.scalar("accuracy", accuracy, family='Accuracy')

    '''
    with tf.name_scope('metrics'):
        y_prob = tf.one_hot(y_, 10)
        _, recall = tf.metrics.recall(y_conv, y_prob)
        _, precision = tf.metrics.precision(y_conv, y_prob)

        tf.summary.scalar('recall', recall)
        tf.summary.scalar('precision', precision)
    '''

    tb_log = waveflow.TF_TBLogger(log_dir='./tb_conv_mnist_log/', enable_tb=use_tensorboard, 
        enable_trace=use_trace, unified_trace=use_trace, arith_type=arithmetic_type)

    # print("vars:")
    if use_tensorboard:
        for v in tf.trainable_variables():
            m_v = optimizer.get_slot(v, 'm')
            # print("var: %s, data: %s" % (v.name, m_v))
            v_mean = tf.reduce_mean(m_v)
            tf.summary.scalar("%s" % (v.name), v_mean, family='Momentum')


    # print('nodes with trainng')
    # print('op list: ', op_list(tf.get_default_graph()))

    batch_size = 128
    total_batches = 100
    report_interval = 1

    widgets = ["Training: ", pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
    pbar = pb.ProgressBar(widgets=widgets, maxval=total_batches)
    # pbar.start()

    config = tf.ConfigProto(device_count={"CPU": 16},
                            inter_op_parallelism_threads=2,
                            intra_op_parallelism_threads=16)

    with tf.Session('', config=config) as sess:
        print("Running model ...")
        tb_log.init_session(sess)

        tf.global_variables_initializer().run()

        # Train
        for i in range(total_batches):
            # pbar.update(i)
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)

            tb_log.run_session(iter=i, ops=train_step, feed={x: batch_xs, y_: batch_ys, keep_prob: 0.5})

            if i % report_interval == 0:
                train_accuracy = accuracy.eval(feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 1.0})
                print('step %d, training accuracy %g' % (i, train_accuracy))

        # pbar.finish()
        tb_log.close()

        # Compute training set accuracy
        print("Evaluating full training set ...")
        sys.stdout.flush()
        train_accuracy = accuracy.eval(feed_dict={x: mnist.train.images, y_: mnist.train.labels, keep_prob: 1.0})
        print("Train set accuracy: %s" % (train_accuracy))

        # Compute test set accuracy
        print("Evaluating full test set ...")
        sys.stdout.flush()
        test_accuracy = accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
        print("Test set accuracy: %s" % (test_accuracy))

    return test_accuracy
コード例 #14
0
    def generator(self, color):
        n_samples = len(self.train_list)
        # 进度条
        widgets = ["数据集创建中: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
        pbar = progressbar.ProgressBar(maxval=n_samples, widgets=widgets).start()

        for i in range(n_samples):
            # 绘制当前文本
            self.paint_text(self.train_list[i], i, color)
            pbar.update(i)

        pbar.finish()
コード例 #15
0
def handle(infiles, tables, user_input_path, **kwargs):

    logger = logging.getLogger()
    msg = '{}: Starting'.format(VAR_NAME)
    logger.info(msg)

    logdir = kwargs.get('logdir')
    serial = kwargs.get('serial')

    # check that we have some input files for every variable
    zerofiles = False
    for variable in RAW_VARIABLES:
        if len(infiles[variable]) == 0:
            msg = '{}: Unable to find input files for {}'.format(
                VAR_NAME, variable)
            print_message(msg)
            logging.error(msg)
            zerofiles = True
    if zerofiles:
        return None

    # Create the logging directory and setup cmor
    if logdir:
        logpath = logdir
    else:
        outpath, _ = os.path.split(logger.__dict__['handlers'][0].baseFilename)
        logpath = os.path.join(outpath, 'cmor_logs')
    os.makedirs(logpath, exist_ok=True)

    logfile = os.path.join(logpath, VAR_NAME + '.log')

    cmor.setup(
        inpath=tables,
        netcdf_file_action=cmor.CMOR_REPLACE,
        logfile=logfile)

    cmor.dataset_json(str(user_input_path))
    cmor.load_table(str(TABLE))

    msg = '{}: CMOR setup complete'.format(VAR_NAME)
    logging.info(msg)

    # extract data from the input file
    msg = 'areacella: loading area'
    logger.info(msg)

    filename = infiles['area'][0]

    if not os.path.exists(filename):
        raise IOError("File not found: {}".format(filename))

    f = cdms2.open(filename)

    # load the data for each variable
    variable_data = f('area')

    if not variable_data.any():
        raise IOError("Variable data not found: {}".format(variable))

    # load the lon and lat info & bounds
    data = {
        'lat': variable_data.getLatitude(),
        'lon': variable_data.getLongitude(),
        'lat_bnds': f('lat_bnds'),
        'lon_bnds': f('lon_bnds'),
        'area': f('area')
    }

    msg = '{name}: loading axes'.format(name=VAR_NAME)
    logger.info(msg)

    axes = [{
        str('table_entry'): str('latitude'),
        str('units'): data['lat'].units,
        str('coord_vals'): data['lat'][:],
        str('cell_bounds'): data['lat_bnds'][:]
    }, {
        str('table_entry'): str('longitude'),
        str('units'): data['lon'].units,
        str('coord_vals'): data['lon'][:],
        str('cell_bounds'): data['lon_bnds'][:]
    }]

    msg = 'areacella: running CMOR'
    logging.info(msg)

    axis_ids = list()
    for axis in axes:
        axis_id = cmor.axis(**axis)
        axis_ids.append(axis_id)

    varid = cmor.variable(VAR_NAME, VAR_UNITS, axis_ids)

    if serial:
        myMessage = progressbar.DynamicMessage('running')
        myMessage.__call__ = my_dynamic_message
        widgets = [
            progressbar.DynamicMessage('running'), ' [',
            progressbar.Timer(), '] ',
            progressbar.Bar(),
            ' (', progressbar.ETA(), ') '
        ]
        progressbar.DynamicMessage.__call__ = my_dynamic_message
        pbar = progressbar.ProgressBar(
            maxval=1, widgets=widgets)
        pbar.start()

    r = 6.37122e6

    outdata = data['area'] * pow(r, 2)
    cmor.write(
        varid,
        outdata)

    if serial:
        pbar.update(1, running=msg)
        pbar.finish()

    msg = '{}: write complete, closing'.format(VAR_NAME)
    logger.debug(msg)

    cmor.close()

    msg = '{}: file close complete'.format(VAR_NAME)
    logger.debug(msg)

    return 'areacella'
コード例 #16
0
def __save_new_documents(path_out: pathlib.Path, sentences: list) -> None:
    grade_levels = list(set(sentence.grade_level for sentence in sentences))
    widgets = ['Saving by grade level: ', pb.Percentage(), ' ', pb.Bar(marker = '.', left = '[', right = ']'), ' ', pb.ETA()]
    with pb.ProgressBar(widgets = widgets, max_value = len(grade_levels)) as bar:
        gl_i = 0
        for grade_level in grade_levels:
            bar.update(gl_i)
            file_out = path_out.joinpath(f'grade_level.{grade_level}.txt')
            with file_out.open('a', encoding = 'utf-8') as file_out:
                for sentence in sentences:
                    if sentence.grade_level == grade_level:
                        file_out.write(f'{sentence.text}\n')
            gl_i = gl_i + 1
コード例 #17
0
ファイル: addressset.py プロジェクト: umang1210/btcrecover
def create_address_db(dbfilename,
                      blockdir,
                      table_len,
                      startBlockDate="2019-01-01",
                      endBlockDate="3000-12-31",
                      startBlockFile=0,
                      addressDB_yolo=False,
                      outputToText=False,
                      update=False,
                      progress_bar=True,
                      addresslistfile=None,
                      multiFile=False):
    """Creates an AddressSet database and saves it to a file

    :param dbfilename: the file name where the database is saved (overwriting it)
    :type dbfilename: str
    :param blockdir: the data directory where the Bitcoin block files reside
    :type blockdir: str
    :param update: if True, the existing database file is updated from new txs
    :type update: bool
    :param progress_bar: True to enable the progress bar
    :type progress_bar: bool
    """

    if update:
        print("Loading address database ...")
        address_set = AddressSet.fromfile(open(dbfilename, "r+b"),
                                          mmap_access=mmap.ACCESS_WRITE)
        first_filenum = address_set.last_filenum
        print()
    else:
        first_filenum = startBlockFile

    if not addresslistfile:
        for filename in glob.iglob(path.join(blockdir, "blk*.dat")):
            if path.isfile(filename): break
        else:
            raise ValueError(
                "no block files exist in blocks directory '{}'".format(
                    blockdir))

        filename = "blk{:05}.dat".format(first_filenum)
        if not path.isfile(path.join(blockdir, filename)):
            raise ValueError(
                "first block file '{}' doesn't exist in blocks directory '{}'".
                format(filename, blockdir))

    if not update:
        # Open the file early to make sure we can, but don't overwrite it yet
        # (see AddressSet.tofile() for why io.open() instead of open() is used)
        try:
            dbfile = io.open(dbfilename, "r+b")
        except IOError:
            dbfile = io.open(dbfilename, "wb")

        #Try to create the AddressDB. If the addresset is sufficiently large (eg: BTC) then this requires 64 bit python and will crash if attempted with 32 bit Python...
        try:
            # With the default bytes_per_addr and max_load, this allocates
            # about 8 GiB which is room for a little over 800 million addresses (Required as of 2019)
            address_set = AddressSet(1 << table_len)
        except OverflowError:
            print()
            exit(
                "AddressDB too large for use with 32 bit Python. You will need to install a 64 bit (x64) version of Python 3 from python.org and try again"
            )

    if addresslistfile:
        import btcrecover.btcrseed
        print("Initial AddressDB Contains", len(address_set), "Addresses")
        for i in range(9999):
            if multiFile:
                addresslistfile = addresslistfile[:-4] + '{:04d}'.format(i)
            try:
                with open(addresslistfile) as addressList_file:
                    print("Loading: ", addresslistfile)
                    addresses_loaded = 0
                    for address in addressList_file:
                        try:
                            if (address[0:2] != '0x'):
                                address_set.add(
                                    btcrecover.btcrseed.WalletBase.
                                    _addresses_to_hash160s([address.rstrip()
                                                            ]).pop())
                            else:
                                address_set.add(
                                    btcrecover.btcrseed.WalletEthereum.
                                    _addresses_to_hash160s([address.rstrip()
                                                            ]).pop())
                            addresses_loaded += 1
                            if (addresses_loaded % 1000000 == 0):
                                print("Checked:", addresses_loaded,
                                      "addresses in current file,",
                                      len(address_set),
                                      "in unique Hash160s in AddressDB")

                        except bitcoinlib.encoding.EncodingError:
                            print("Skipping Invalid Address:",
                                  address.rstrip())
                    print("Finished: ", addresslistfile)
                    if not multiFile:
                        break
            except FileNotFoundError:
                if multiFile:
                    continue
                else:
                    print("File:", addresslistfile, " not found")
                    exit()

        print("Finished AddressDB Contains", len(address_set), "Addresses")

    else:
        if progress_bar:
            try:
                import progressbar
            except ImportError:
                progress_bar = False

        if progress_bar:
            print("Parsing block files ...")
            for filenum in itertools.count(first_filenum):
                filename = path.join(blockdir, "blk{:05}.dat".format(filenum))
                if not path.isfile(filename):
                    break
            progress_label = progressbar.FormatLabel(
                " {:11,} addrs. %(elapsed)s, ".format(len(address_set)))
            block_bar_widgets = [
                progressbar.SimpleProgress(), " ",
                progressbar.Bar(left="[", fill="-", right="]"), progress_label,
                progressbar.ETA()
            ]
            progress_bar = progressbar.ProgressBar(maxval=filenum -
                                                   first_filenum,
                                                   widgets=block_bar_widgets)
            progress_bar.start()
        else:
            print("Block file   Address count")
            print("------------ -------------")
            # e.g. blk00943.dat   255,212,706

        for filenum in itertools.count(first_filenum):
            filename = path.join(blockdir, "blk{:05}.dat".format(filenum))
            if not path.isfile(filename):
                break
            address_set.last_filenum = filenum

            with open(filename, "rb") as blockfile:
                if not progress_bar:
                    print(path.basename(filename), end=" ")

                header = blockfile.read(
                    8
                )  # read in the magic and remaining (after these 8 bytes) block length
                chain_magic = header[:4]
                #print("Found Magic:", chain_magic.encode("hex"))
                while len(header) == 8 and header[4:] != b"\0\0\0\0":
                    if supportedChains(
                            chain_magic
                    ) != 1:  # Check magic to see if it is a chain we support
                        if not addressDB_yolo:  #Ignore checks on the blockchain type
                            #Throw an error message and exit if we encounter unsupported magic value
                            if supportedChains(chain_magic) == -1:
                                print(
                                    "Unrecognised Block Protocol (Unrecognised Magic), Found:",
                                    chain_magic,
                                    " You can force an AddressDB creation attempt by re-running this tool with the flag --dbyolo"
                                )

                            if supportedChains(chain_magic) == 0:
                                print(
                                    "Incompatible Block Protocol, You can force an AddressDB creation attempt by re-running this tool with the flag --dbyolo, but it probably won't work"
                                )

                            exit()

                    block = blockfile.read(
                        struct.unpack_from(
                            "<I", header,
                            4)[0])  # read in the rest of the block

                    tx_count, offset = varint(block,
                                              80)  # skips 80 bytes of header

                    #Extract Block Header info (Useful for debugging extra new chains)
                    #print("Block Header: ", block[0:80].encode("hex"))
                    #print()

                    #Get Block Header Info (Useful for debugging and limiting date range)
                    block_version = block[0:4]
                    block_prevHash = block[4:36]
                    block_merkleRoot = block[36:68]
                    block_time = struct.unpack("<I", block[68:72])[0]
                    block_bits = struct.unpack("<I", block[72:76])[0]
                    block_nonce = struct.unpack("<I", block[76:80])[0]

                    #print_debug = False
                    #if block_prevHash.encode("hex") =='52aa3101be5119a77cce7a8f2e2a8fcdfcbcf6ca0f3e15000000000000000000':
                    #    print_debug = True
                    #print("Block Version: ", block_version.hex())
                    #print("Block PrevHash: ", block_prevHash.hex())
                    #print("Block MerkleRoot: ", block_merkleRoot.hex())
                    #print("Block Bits: ", block_bits)
                    #print("Block Nonce: ", block_nonce)
                    #print("Block TIme: ", block_time, " " , datetime.fromtimestamp(float(block_time)))

                    blockDate = datetime.fromtimestamp(float(block_time))

                    #Only add addresses which occur in blocks that are within the time window we are looking at
                    if datetime.strptime(startBlockDate + " 00:00:00",
                                         '%Y-%m-%d %H:%M:%S'
                                         ) <= blockDate and datetime.strptime(
                                             endBlockDate + " 23:59:59",
                                             '%Y-%m-%d %H:%M:%S') >= blockDate:

                        for tx_num in range(tx_count):

                            offset += 4  # skips 4-byte tx version
                            is_bip144 = block[offset] == 0  # bip-144 marker
                            if is_bip144:
                                offset += 2  # skips 1-byte marker & 1-byte flag
                            txin_count, offset = varint(block, offset)
                            for txin_num in range(txin_count):
                                sigscript_len, offset = varint(
                                    block, offset + 36
                                )  # skips 32-byte tx id & 4-byte tx index
                                offset += sigscript_len + 4  # skips sequence number & sigscript
                            txout_count, offset = varint(block, offset)
                            for txout_num in range(txout_count):
                                pkscript_len, offset = varint(
                                    block,
                                    offset + 8)  # skips 8-byte satoshi count

                                #if print_debug:
                                #    print("Tx Data: ", block[offset:offset+100].encode("hex")) #Print all TX data (plus more for debugging)

                                # If this is a P2PKH script (OP_DUP OP_HASH160 PUSH(20) <20 address bytes> OP_EQUALVERIFY OP_CHECKSIG)
                                if pkscript_len == 25 and block[
                                        offset:offset +
                                        3] == b"\x76\xa9\x14" and block[
                                            offset + 23:offset +
                                            25] == b"\x88\xac":
                                    address_set.add(
                                        block[offset + 3:offset + 23],
                                        outputToText, 'P2PKH')
                                elif block[
                                        offset:offset +
                                        2] == b"\xa9\x14":  #Check for Segwit Address
                                    address_set.add(
                                        block[offset + 2:offset + 22],
                                        outputToText, 'P2SH')
                                elif block[
                                        offset:offset +
                                        2] == b"\x00\x14":  #Check for Native Segwit Address
                                    address_set.add(
                                        block[offset + 2:offset + 22],
                                        outputToText, 'Bech32')

                                offset += pkscript_len  # advances past the pubkey script
                            if is_bip144:
                                for txin_num in range(txin_count):
                                    stackitem_count, offset = varint(
                                        block, offset)
                                    for stackitem_num in range(
                                            stackitem_count):
                                        stackitem_len, offset = varint(
                                            block, offset)
                                        offset += stackitem_len  # skips this stack item
                            offset += 4  # skips the 4-byte locktime

                    header = blockfile.read(
                        8)  # read in the next magic and remaining block length

            if progress_bar:
                block_bar_widgets[3] = progressbar.FormatLabel(
                    " {:11,} addrs. %(elapsed)s, ".format(
                        len(address_set)))  # updates address count
                nextval = progress_bar.currval + 1
                if nextval > progress_bar.maxval:  # can happen if the bitcoin client is left running
                    progress_bar.maxval = nextval
                progress_bar.update(nextval)
            else:
                print("{:13,}".format(len(address_set)))

        if progress_bar:
            progress_bar.widgets.pop()  # remove the ETA
            progress_bar.finish()
    if update:
        print("\nSaving changes to address database ...")
        address_set.close()
    else:
        print("\nSaving address database ...")
        dbfile.truncate(0)
        address_set.tofile(dbfile)
        dbfile.close()

    print("\nDone.")
コード例 #18
0
def __document_to_sentences(document_name: pathlib.Path) -> list:
    lines = u.read_document(document_name)
    sentences = lines
    sentences = [sentence.strip() for sentence in sentences]
    sentences = [sentence for sentence in sentences if len(sentence) > 0]
    widgets = ['Calculating grade level: ', pb.Percentage(), ' ', pb.Bar(marker = '.', left = '[', right = ']'), ' ', pb.ETA()]
    with pb.ProgressBar(widgets = widgets, max_value = len(sentences)) as bar:
        for i in range(0, len(sentences)):
            bar.update(i)
            sentence = sentences[i]
            grade_level = __calculate_sentences_median_grade_level(sentence)
            sentences[i] = sentenceplus(sentence, grade_level)
    return sentences
コード例 #19
0
ファイル: mpi.py プロジェクト: naughtont3/iMars3D
def recon_mpi(sinograms,
              theta,
              recon_series,
              stepsize=10,
              center=None,
              recon=None,
              **kwds):
    """reconstruction using mpi.
This method needs to be run on several mpi nodes to achieve
parallalization. sth similar to $ mpirun -np NODES python "code to call this method"

* theta: angles in radians
* recon: reconstruction method
    """
    import logging
    logger = logging.getLogger("mpi")
    import imars3d.io

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm.Get_rank()

    totalN = len(sinograms)
    N = int(np.ceil(totalN * 1. / size))
    start, stop = rank * N, min(totalN, (rank + 1) * N)
    # print("node %s of %s handles %s" % (rank, size, layers))
    # print("N, start, stop=%s, %s, %s" % (N, start, stop))

    if recon is None:
        from .use_tomopy import recon_batch_singlenode as recon

    # progress bar
    # for simplicity, we just report the progress at rank 0, which should be a
    # good indication of progress of all nodes any way
    if rank == 0:
        bar = progressbar.ProgressBar(widgets=[
            "Reconstructing",
            progressbar.Percentage(),
            progressbar.Bar(),
            ' [',
            progressbar.ETA(),
            '] ',
        ],
                                      max_value=stop - start,
                                      **pb_config)
    start0 = start  # shall be fine to define inside if-block. Just to make flake8/pylint feel betters

    # avoid infinite loop
    loop = -1

    while start < stop or loop < MAX_LOOP:
        # update loop
        loop += 1

        stop1 = min(start + stepsize, stop)
        logger.debug("node %s of %s working on %s:%s" %
                     (rank, size, start, stop1))
        sinograms1 = sinograms[start:stop1]
        if not len(sinograms):
            continue
        recon_series1 = recon_series[start:stop1]
        try:
            recon(sinograms1, theta, recon_series1, center=center, **kwds)
        except:
            logger.info("node %s of %s: recon %s:%s failed" %
                        (rank, size, start, stop1))

        # update range
        start = stop1

        # update bar (rank 0)
        if rank == 0:
            bar.update(start - start0)
        continue
    comm.Barrier()

    if rank == 0:
        print('\n')
    return
コード例 #20
0
    def download_file(self,
                      url=None,
                      outputFileName=None,
                      outputPath=None,
                      bytes=False):
        def fmt_size(num, suffix="B"):
            for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
                if abs(num) < 1024.0:
                    return "%3.1f%s%s" % (num, unit, suffix)
                num /= 1024.0
            return "%.1f%s%s" % (num, "Yi", suffix)

        #:
        if not url:
            raise Exception("No URL specified.")

        if outputPath is None:  # Default to current dir.
            outputPath = os.getcwd()
        else:
            if not os.path.isdir(outputPath):
                raise Exception(
                    'Specified path "{0}" does not exist'.format(outputPath))

        fileName = os.path.basename(url)  # Get URL filename
        userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"

        if 'sourceforge.net' in url.lower():
            userAgent = 'wget/1.18'  # sourceforce <3 wget

        if url.lower().startswith("ftp://"):
            self.log("Requesting : {0}".format(url))
            if outputFileName != None:
                fileName = outputFileName
            fullOutputPath = os.path.join(outputPath, fileName)
            urllib.request.urlretrieve(url, fullOutputPath)
            return fullOutputPath

        req = requests.get(url, stream=True, headers={"User-Agent": userAgent})

        if req.status_code != 200:
            req.raise_for_status()

        if "content-disposition" in req.headers:
            reSponse = re.findall("filename=(.+)",
                                  req.headers["content-disposition"])
            if reSponse == None:
                fileName = os.path.basename(url)
            else:
                fileName = reSponse[0]

        size = None
        compressed = False
        if "Content-Length" in req.headers:
            size = int(req.headers["Content-Length"])

        if "Content-Encoding" in req.headers:
            if req.headers["Content-Encoding"] == "gzip":
                compressed = True

        self.log("Requesting : {0} - {1}".format(
            url,
            fmt_size(size) if size != None else "?"))

        # terms = shutil.get_terminal_size((100,100))
        # filler = 0
        # if terms[0] > 100:
        # 	filler = int(terms[0]/4)

        widgetsNoSize = [
            progressbar.FormatCustomText("Downloading: {:25.25}".format(
                os.path.basename(fileName))), " ",
            progressbar.AnimatedMarker(markers='|/-\\'), " ",
            progressbar.DataSize()
            # " "*filler
        ]
        widgets = [
            progressbar.FormatCustomText("Downloading: {:25.25}".format(
                os.path.basename(fileName))),
            " ",
            progressbar.Percentage(),
            " ",
            progressbar.Bar(fill=chr(9617),
                            marker=chr(9608),
                            left="[",
                            right="]"),
            " ",
            progressbar.DataSize(),
            "/",
            progressbar.DataSize(variable="max_value"),
            " |",
            progressbar.AdaptiveTransferSpeed(),
            " | ",
            progressbar.ETA(),
            # " "*filler
        ]
        pbar = None
        if size == None:
            pbar = progressbar.ProgressBar(widgets=widgetsNoSize,
                                           maxval=progressbar.UnknownLength)
        else:
            pbar = progressbar.ProgressBar(widgets=widgets, maxval=size)

        if outputFileName != None:
            fileName = outputFileName
        fullOutputPath = os.path.join(outputPath, fileName)

        updateSize = 0

        if isinstance(pbar.max_value, int):
            updateSize = pbar.max_value if pbar.max_value < 1024 else 1024

        if bytes == True:
            output = b""
            bytesrecv = 0
            pbar.start()
            for buffer in req.iter_content(chunk_size=1024):
                if buffer:
                    output += buffer
                if compressed:
                    pbar.update(updateSize)
                else:
                    pbar.update(bytesrecv)
                bytesrecv += len(buffer)
            pbar.finish()
            return output
        else:
            with open(fullOutputPath, "wb") as file:
                bytesrecv = 0
                pbar.start()
                for buffer in req.iter_content(chunk_size=1024):
                    if buffer:
                        file.write(buffer)
                        file.flush()
                    if compressed:
                        pbar.update(updateSize)
                    else:
                        pbar.update(bytesrecv)
                    bytesrecv += len(buffer)
                pbar.finish()

                return fullOutputPath
コード例 #21
0
from __future__ import division, print_function
import numpy as np
import progressbar
from utils import divide_on_feature, divide_on_feature2

bar_widgets = [
    'Training: ', progressbar.Percentage(), ' ', progressbar.Bar(marker="-", left="[", right="]"),
    ' ', progressbar.ETA(), '\n'
]


class LeastSquaresLoss(): # loss类,包含一阶二阶梯度两个方法
    """ 最小二乘损失 """

    def gradient(self, actual, predicted):
        return actual - predicted # 对于平方损失而言,负梯度就是残差

    def hess(self, actual, predicted):
        return np.ones_like(actual) # 对于平方损失而言,二阶导数为1
    
    
class DecisionNode():
    """ 决策树的节点类

    Parameters:
    -----------
    feature_i: int
        分裂的特征索引号
    threshold: float
        分裂特征的分裂阈值
    value: float
コード例 #22
0
def main():
    seeding()
    # number of parallel agents
    parallel_envs = 8
    # number of training episodes.
    # change this to higher number to experiment. say 30000.
    number_of_episodes = 1000
    episode_length = 80
    batchsize = 1000
    # how many episodes to save policy and gif
    save_interval = 1000
    t = 0

    # amplitude of OU noise
    # this slowly decreases to 0
    noise = 2
    noise_reduction = 0.9999

    # how many episodes before update
    episode_per_update = 2 * parallel_envs

    log_path = os.getcwd() + "/log"
    model_dir = os.getcwd() + "/model_dir"

    os.makedirs(model_dir, exist_ok=True)

    torch.set_num_threads(parallel_envs)
    env = envs.make_parallel_env(parallel_envs)

    # keep 5000 episodes worth of replay
    buffer = ReplayBuffer(int(5000 * episode_length))

    # initialize policy and critic
    maddpg = MADDPG()
    logger = SummaryWriter(log_dir=log_path)
    agent0_reward = []
    agent1_reward = []
    agent2_reward = []

    # training loop
    # show progressbar
    import progressbar as pb
    widget = [
        'episode: ',
        pb.Counter(), '/',
        str(number_of_episodes), ' ',
        pb.Percentage(), ' ',
        pb.ETA(), ' ',
        pb.Bar(marker=pb.RotatingMarker()), ' '
    ]

    timer = pb.ProgressBar(widgets=widget, maxval=number_of_episodes).start()

    # use keep_awake to keep workspace from disconnecting
    for episode in keep_awake(range(0, number_of_episodes, parallel_envs)):

        timer.update(episode)

        reward_this_episode = np.zeros((parallel_envs, 3))
        all_obs = env.reset()  #
        obs, obs_full = transpose_list(all_obs)

        #for calculating rewards for this particular episode - addition of all time steps

        # save info or not
        save_info = ((episode) % save_interval < parallel_envs
                     or episode == number_of_episodes - parallel_envs)
        frames = []
        tmax = 0

        if save_info:
            frames.append(env.render('rgb_array'))

        for episode_t in range(episode_length):

            t += parallel_envs

            # explore = only explore for a certain number of episodes
            # action input needs to be transposed
            actions = maddpg.act(transpose_to_tensor(obs), noise=noise)
            noise *= noise_reduction

            actions_array = torch.stack(actions).detach().numpy()

            # transpose the list of list
            # flip the first two indices
            # input to step requires the first index to correspond to number of parallel agents
            actions_for_env = np.rollaxis(actions_array, 1)

            # step forward one frame
            next_obs, next_obs_full, rewards, dones, info = env.step(
                actions_for_env)

            # add data to buffer
            transition = (obs, obs_full, actions_for_env, rewards, next_obs,
                          next_obs_full, dones)

            buffer.push(transition)

            reward_this_episode += rewards

            obs, obs_full = next_obs, next_obs_full

            # save gif frame
            if save_info:
                frames.append(env.render('rgb_array'))
                tmax += 1

        # update once after every episode_per_update
        if len(buffer
               ) > batchsize and episode % episode_per_update < parallel_envs:
            for a_i in range(3):
                samples = buffer.sample(batchsize)
                maddpg.update(samples, a_i, logger)
            maddpg.update_targets(
            )  #soft update the target network towards the actual networks

        for i in range(parallel_envs):
            agent0_reward.append(reward_this_episode[i, 0])
            agent1_reward.append(reward_this_episode[i, 1])
            agent2_reward.append(reward_this_episode[i, 2])

        if episode % 100 == 0 or episode == number_of_episodes - 1:
            avg_rewards = [
                np.mean(agent0_reward),
                np.mean(agent1_reward),
                np.mean(agent2_reward)
            ]
            agent0_reward = []
            agent1_reward = []
            agent2_reward = []
            for a_i, avg_rew in enumerate(avg_rewards):
                logger.add_scalar('agent%i/mean_episode_rewards' % a_i,
                                  avg_rew, episode)

        #saving model
        save_dict_list = []
        if save_info:
            for i in range(3):

                save_dict = {
                    'actor_params':
                    maddpg.maddpg_agent[i].actor.state_dict(),
                    'actor_optim_params':
                    maddpg.maddpg_agent[i].actor_optimizer.state_dict(),
                    'critic_params':
                    maddpg.maddpg_agent[i].critic.state_dict(),
                    'critic_optim_params':
                    maddpg.maddpg_agent[i].critic_optimizer.state_dict()
                }
                save_dict_list.append(save_dict)

                torch.save(
                    save_dict_list,
                    os.path.join(model_dir, 'episode-{}.pt'.format(episode)))

            # save gif files
            imageio.mimsave(os.path.join(model_dir,
                                         'episode-{}.gif'.format(episode)),
                            frames,
                            duration=.04)

    env.close()
    logger.close()
    timer.finish()
コード例 #23
0
ファイル: MGGenerate.py プロジェクト: loftwah/midiGenerator
    def generate_fill(self, max_length=None, no_duration=False, verbose=1):
        """

        :param max_length:
        :param no_duration:
        :param verbose:
        :return:
        """
        # ----- Parameters -----
        max_length = 300 / self.step_length if max_length is None else max_length

        # ----- Variables -----
        if self.data_transformed_path is None:
            raise Exception('Some data need to be loaded before comparing the generation')
        sequence = Sequences.KerasSequence(
            path=self.data_transformed_path,
            nb_steps=self.nb_steps,
            batch_size=1,
            work_on=self.work_on
        )  # Return array instead of list (for instruments)
        max_length = int(min(max_length, len(sequence)))
        nb_instruments = sequence.nb_instruments
        # ----- Seeds -----
        truth = sequence[0][0]
        filled_list = [np.copy(truth) for inst in range(nb_instruments)]
        mask = np.ones((nb_instruments, nb_instruments, self.nb_steps))
        for inst in range(nb_instruments):
            filled_list[inst][inst] = 0
            mask[inst, inst] = 0

        # ----- Generation -----
        cprint('Start generating (fill) ...', 'blue')
        bar = progressbar.ProgressBar(maxval=max_length,
                                      widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage(), ' ',
                                               progressbar.ETA()])
        bar.start()  # To see it working
        for l in range(max_length):
            s_input, s_output = sequence[l]
            to_fill_list = [np.copy(s_input) for inst in range(nb_instruments)]
            for inst in range(nb_instruments):
                to_fill_list[inst][inst] = 0
            nn_input = np.concatenate(
                tuple(to_fill_list),
                axis=1
            )  # (nb_instruments, batch=nb_instruments, nb_steps, step_size, input_size, channels)
            preds = self.keras_nn.generate(input=list(nn_input) + [mask])

            preds = np.asarray(preds).astype(
                'float64')  # (nb_instruments, bath=nb_instruments, nb_steps=1, step_size, input_size, channels)
            if len(preds.shape) == 5:  # Only one instrument : output of nn not a list
                preds = np.expand_dims(preds, axis=0)
            if len(s_output.shape) == 5:  # Only one instrument : output of nn not a list
                s_output = np.expand_dims(s_output)
            preds = midi.create.normalize_activation(preds, mono=self.mono)
            truth = np.concatenate((truth, s_output), axis=2)
            for inst in range(nb_instruments):
                p = np.copy(s_output)
                p[inst] = np.take(preds, axis=1, indices=[inst])[inst]
                filled_list[inst] = np.concatenate(
                    (filled_list[inst], p),
                    axis=2)  # (nb_instruments, batch=1, nb_steps, step_size, input_size, channels)
            bar.update(l + 1)
        bar.finish()

        # -------------------- Compute notes list --------------------
        # ----- Reshape -----
        truth = self.reshape_generated_array(truth)
        for inst in range(nb_instruments):
            filled_list[inst] = self.reshape_generated_array(filled_list[inst])
        self.ensure_save_midis_path()
        self.save_midis_path.mkdir(parents=True, exist_ok=True)
        self.compute_generated_array(
            generated_array=truth,
            file_name=self.save_midis_path / 'generated_fill_truth',
            no_duration=no_duration,
            verbose=verbose,
            save_images=True
        )
        for inst in range(nb_instruments):
            self.compute_generated_array(
                generated_array=filled_list[inst],
                file_name=self.save_midis_path / f'generated_fill_{inst}',
                no_duration=no_duration,
                array_truth=truth,
                verbose=verbose,
                save_truth=False,
                save_images=True
            )

        cprint('Done generating (fill)', 'green')
コード例 #24
0
def train(maddpg, env, n_episodes=1000, save_every=50):
    """Training loop helper for running the environment using the MADDPG algorithm.
    Params
    ======
        maddpg (MADDPG): instance of MADDPG wrapper class
        env (UnityEnvironment): instance of Unity environment for training
        n_episodes (int): number of episodes to train for
        save_every (int): frequency to save model weights
    """
    widget = [
        "Episode: ",
        pb.Counter(), '/',
        str(n_episodes), ' ',
        pb.Percentage(), ' ',
        pb.ETA(), ' ',
        pb.Bar(marker=pb.RotatingMarker()), ' ', 'Rolling Average: ',
        pb.FormatLabel('')
    ]
    timer = pb.ProgressBar(widgets=widget, maxval=n_episodes).start()

    solved = False
    scores_total = []
    scores_deque = deque(maxlen=100)
    rolling_score_averages = []
    last_best_score = 0.0

    # Environment information
    brain_name = env.brain_names[0]

    for i_episode in range(1, n_episodes + 1):
        current_average = 0.0 if i_episode == 1 else rolling_score_averages[-1]
        widget[12] = pb.FormatLabel(str(current_average)[:6])
        timer.update(i_episode)

        env_info = env.reset(train_mode=True)[brain_name]
        states = env_info.vector_observations[:, -STATE_SIZE:]
        scores = np.zeros(NUM_AGENTS)
        maddpg.reset()

        while True:
            actions = maddpg.act(states)

            env_info = env.step(actions)[brain_name]
            next_states = env_info.vector_observations[:, -STATE_SIZE:]
            rewards = env_info.rewards
            dones = env_info.local_done

            maddpg.step(states, actions, rewards, next_states, dones)

            scores += rewards
            states = next_states

            if np.any(dones):
                break

        max_episode_score = np.max(scores)

        scores_deque.append(max_episode_score)
        scores_total.append(max_episode_score)

        average_score = np.mean(scores_deque)
        rolling_score_averages.append(average_score)

        if average_score >= 0.5 and not solved:
            print(
                '\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'
                .format(i_episode, average_score))
            solved = True
            maddpg.save_model()
            last_best_score = average_score

        if i_episode % save_every == 0 and solved:
            # Only save these weights if they are better than the ones previously saved
            if average_score > last_best_score:
                last_best_score = average_score
                maddpg.save_model()

    return scores_total, rolling_score_averages
コード例 #25
0
ファイル: MGGenerate.py プロジェクト: loftwah/midiGenerator
    def compare_generation(self, max_length=None, no_duration=False, verbose=1):
        """

        :return:
        """
        # -------------------- Find informations --------------------
        if self.data_transformed_path is None:
            raise Exception('Some data need to be loaded before comparing the generation')
        self.sequence = Sequences.AllInstSequence(
            path=str(self.data_transformed_path),
            nb_steps=self.nb_steps,
            batch_size=1,
            work_on=self.work_on,
            noise=0
        )
        max_length = len(self.sequence) if max_length is None else min(max_length, len(self.sequence))

        # -------------------- Construct seeds --------------------
        generated = np.array(self.sequence[0][0])  # (nb_instrument, 1, nb_steps, step_size, input_size, 2) (1=batch)
        generated_helped = np.copy(generated)  # Each step will take the truth as an input
        generated_truth = np.copy(generated)  # The truth

        mask = self.get_mask(self.sequence.nb_instruments)

        # -------------------- Generation --------------------
        cprint('Start comparing generation ...', 'blue')
        bar = progressbar.ProgressBar(maxval=max_length,
                                      widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage(), ' ',
                                               progressbar.ETA()])
        bar.start()  # To see it working
        for l in range(max_length):
            ms_input, ms_output = self.sequence[l]
            sample = np.concatenate((generated[:, :, l:], np.array(ms_input)),
                                    axis=1)  # (nb_instruments, 2, nb_steps, step_size, input_size, 2)

            # Generation
            preds = self.keras_nn.generate(input=list(sample) + mask)

            # Reshape
            preds = np.asarray(preds).astype('float64')  # (nb_instruments, batch=2, nb_steps=1, length, 88, 2)
            preds_truth = np.array(ms_output)  # (nb_instruments, 1, 1, step_size, input_size, 2)
            # if only one instrument
            if len(preds.shape) == 5:  # Only one instrument : output of nn not a list
                preds = np.expand_dims(preds, axis=0)
            if len(preds_truth.shape) == 5:  # Only one instrument : output of nn not a list
                preds_truth = np.expand_dims(preds_truth)
            preds = midi.create.normalize_activation(preds, mono=self.mono)  # Normalize the activation part
            preds_helped = preds[:, [1]]  # (nb_instruments, 1, 1, length, 88, 2)
            preds = preds[:, [0]]

            # Concatenation
            generated = np.concatenate((generated, preds), axis=2)  # (nb_instruments, 1, nb_steps, length, 88, 2)
            generated_helped = np.concatenate((generated_helped, preds_helped),
                                              axis=2)  # (nb_instruments, 1, nb_steps, length, 88, 2)
            generated_truth = np.concatenate((generated_truth, preds_truth), axis=2)
            bar.update(l + 1)
        bar.finish()

        # -------------------- Compute notes list --------------------
        # Generated
        generated_midi_final = self.reshape_generated_array(generated)
        # Helped
        generated_midi_final_helped = self.reshape_generated_array(generated_helped)
        # Truth
        generated_midi_final_truth = self.reshape_generated_array(generated_truth)

        # ---------- find the name for the midi_file ----------
        self.ensure_save_midis_path()
        self.save_midis_path.mkdir(parents=True, exist_ok=True)

        # Generated
        self.compute_generated_array(
            generated_array=generated_midi_final,
            file_name=self.save_midis_path / 'compare_generation_alone',
            no_duration=no_duration,
            array_truth=generated_midi_final_truth,
            verbose=verbose,
            save_truth=False,
            save_images=True
        )
        # Helped
        self.compute_generated_array(
            generated_array=generated_midi_final_helped,
            file_name=self.save_midis_path / 'compare_generation_helped',
            no_duration=no_duration,
            array_truth=generated_midi_final_truth,
            verbose=verbose,
            save_truth=False,
            save_images=True
        )
        # Truth
        self.compute_generated_array(
            generated_array=generated_midi_final_truth,
            file_name=self.save_midis_path / 'compare_generation_truth',
            no_duration=no_duration,
            array_truth=None,
            verbose=verbose,
            save_truth=False,
            save_images=True
        )
        cprint('Done comparing generation', 'green')
コード例 #26
0
def apply_batches(function,
                  arguments,
                  batch_size,
                  description='',
                  show_progressbar=False,
                  show_error_output=True):
    """
    Apply batches to a specified function.

    Parameters
    ----------
    function : func
        Function that accepts one or more positional arguments.
        Each of them should be an array-like variable that
        have exactly the same number of rows.

    arguments : tuple, list
        The arguemnts that will be provided to the function specified
        in the ``function`` argument.

    batch_size : int
        Mini-batch size.

    description : str
        Short description that will be displayed near the progressbar
        in verbose mode. Defaults to ``''`` (empty string).

    show_progressbar : bool
        ``True`` means that function will show progressbar in the
        terminal. Defaults to ``False``.

    show_error_output : bool
        Assumes that outputs from the function errors.
        ``True`` will show information in the progressbar.
        Error will be related to the last epoch.
        Defaults to ``True``.

    Returns
    -------
    list
        List of function outputs.
    """
    if not arguments:
        raise ValueError("The argument parameter should be list or "
                         "tuple with at least one element.")

    samples = arguments[0]
    n_samples = len(samples)
    batch_iterator = list(iter_batches(n_samples, batch_size))

    if show_progressbar:
        widgets = [
            progressbar.Timer(format='Time: %(elapsed)s'),
            ' |',
            progressbar.Percentage(),
            progressbar.Bar(),
            ' ',
            progressbar.ETA(),
        ]

        if show_error_output:
            widgets.extend([' | ', progressbar.DynamicMessage('error')])

        bar = progressbar.ProgressBar(
            widgets=widgets,
            max_value=len(batch_iterator),
            poll_interval=0.1,
        )
        bar.update(0)
    else:
        bar = progressbar.NullBar()

    outputs = []
    for i, batch in enumerate(batch_iterator):
        sliced_arguments = [argument[batch] for argument in arguments]

        output = function(*sliced_arguments)
        outputs.append(output)

        if show_error_output:
            bar.update(i, error=np.atleast_1d(output).item(0))
        else:
            bar.update(i)

    bar.finish('\r' + ' ' * bar.term_width + '\r')
    return outputs
コード例 #27
0
ファイル: bootstrap.py プロジェクト: pbosler/compass
def update_permissions(config, is_test, activ_path, conda_base, system_libs):

    directories = []
    if not is_test:
        directories.append(conda_base)
    if system_libs is not None:
        # even if this is not a release, we need to update permissions on
        # shared system libraries
        directories.append(system_libs)

    group = config.get('e3sm_unified', 'group')

    new_uid = os.getuid()
    new_gid = grp.getgrnam(group).gr_gid

    print('changing permissions on activation scripts')

    read_perm = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
                 | stat.S_IROTH)
    exec_perm = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP
                 | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)

    mask = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO

    if not is_test:

        activation_files = glob.glob('{}/*_compass*.sh'.format(activ_path))
        for file_name in activation_files:
            os.chmod(file_name, read_perm)
            os.chown(file_name, new_uid, new_gid)

    print('changing permissions on environments')

    # first the base directories that don't seem to be included in
    # os.walk()
    for directory in directories:
        try:
            dir_stat = os.stat(directory)
        except OSError:
            continue

        perm = dir_stat.st_mode & mask

        if dir_stat.st_uid != new_uid:
            # current user doesn't own this dir so let's move on
            continue

        if perm == exec_perm and dir_stat.st_gid == new_gid:
            continue

        try:
            os.chown(directory, new_uid, new_gid)
            os.chmod(directory, exec_perm)
        except OSError:
            continue

    files_and_dirs = []
    for base in directories:
        for root, dirs, files in os.walk(base):
            files_and_dirs.extend(dirs)
            files_and_dirs.extend(files)

    widgets = [
        progressbar.Percentage(), ' ',
        progressbar.Bar(), ' ',
        progressbar.ETA()
    ]
    bar = progressbar.ProgressBar(widgets=widgets,
                                  maxval=len(files_and_dirs)).start()
    progress = 0
    for base in directories:
        for root, dirs, files in os.walk(base):
            for directory in dirs:
                progress += 1
                bar.update(progress)

                directory = os.path.join(root, directory)

                try:
                    dir_stat = os.stat(directory)
                except OSError:
                    continue

                if dir_stat.st_uid != new_uid:
                    # current user doesn't own this dir so let's move on
                    continue

                perm = dir_stat.st_mode & mask

                if perm == exec_perm and dir_stat.st_gid == new_gid:
                    continue

                try:
                    os.chown(directory, new_uid, new_gid)
                    os.chmod(directory, exec_perm)
                except OSError:
                    continue

            for file_name in files:
                progress += 1
                bar.update(progress)
                file_name = os.path.join(root, file_name)
                try:
                    file_stat = os.stat(file_name)
                except OSError:
                    continue

                if file_stat.st_uid != new_uid:
                    # current user doesn't own this file so let's move on
                    continue

                perm = file_stat.st_mode & mask

                if perm & stat.S_IXUSR:
                    # executable, so make sure others can execute it
                    new_perm = exec_perm
                else:
                    new_perm = read_perm

                if perm == new_perm and file_stat.st_gid == new_gid:
                    continue

                try:
                    os.chown(file_name, new_uid, new_gid)
                    os.chmod(file_name, new_perm)
                except OSError:
                    continue

    bar.finish()
    print('  done.')
コード例 #28
0
ファイル: mklc.py プロジェクト: bhayden53/SALT2X
def do_stuff(ctr):
    p = pb.ProgressBar(maxval=740, widgets = [pb.Percentage(),pb.Bar(),pb.ETA()]).start()
    pbctr = 0
    for i,l in enumerate(lc):
        p.update(pbctr)
        pbctr += 1

        restcut = (3000,7000)
        data = sncosmo.read_lc(l, format='salt2')
        try:
            z = data.meta['Redshift']
        except:
            pass
        try:
            z = data.meta['Z_CMB']
        except:
            pass
        try:
            survey = data.meta['SURVEY']
        except:
            pass
        try:
            survey = data.meta['SET']
        except:
            pass
        nickname = data.meta['SN']
        try:
            nickname = str(int(nickname))
        except:
            pass
        mwebv = data.meta['MWEBV']
        dust = sncosmo.CCM89Dust()
        data = astropy.table.Table(data, masked=True)
        #rename columns so that my fitter can handle things
        data.rename_column('Filter', 'tmp')
        data.rename_column('Date', 'time')
        data.rename_column('Flux', 'flux')
        data.rename_column('Fluxerr', 'fluxerr')
        data.rename_column('MagSys', 'zpsys')
        data.rename_column('ZP', 'zp')

        if survey == 'SNLS':
            sn_nickname = l.split('/')[-1].split('.')[0].split('-')[-1]
            band = []
            for j, bp in enumerate(data['tmp']):
                band.append( '%s-%s' %(sn_nickname, bp) )
            band = astropy.table.Column(band, name='band')
            data.add_column(band)
            data.remove_column('tmp')
        else:
            data.rename_column('tmp', 'band')

        # deal with swope filters
        mask = (data['band'] == 'SWOPE2::V')
        nswopev = len(mask.nonzero()[0])
        if nswopev > 0:
            band = []
            for j, bp in enumerate(data['band']):
                if (bp == 'SWOPE2::V'):
                    if (data['time'][j] < 53749):
                        band.append('swope2::v_lc3014')
                    elif (data['time'][j] < 53760):
                        band.append('swope2::v_lc3009')
                    else:
                        band.append('swope2::v_lc9844')
                else:
                    band.append(bp)
            data.remove_column('band')
            band = astropy.table.Column(band, name='band')
            data.add_column(band)

            ind = np.where( (data['band'] == 'SWOPE2::V') & (data['time']>53749.) & ((data['time']<=53760.)) )
            data['band'][ind] = 'swope2::v_lc3009'
            ind = np.where( (data['band'] == 'SWOPE2::V') & (data['time']>53760.) )
            data['band'][ind] = 'swope2::v_lc9844'
            # print ind

        #deal with filter coverage
        #also deal with STANDARD filter zeropoints
        unique_bands = np.unique(data['band'])
        fit_bands = []
        nofit_bands = []
        # print unique_bands
        tmperr = np.copy(data['fluxerr'])
        for ub in unique_bands:
            # print ub
            bp = sncosmo.get_bandpass(ub)
            rest = bp.wave_eff / (1.0+z)
            if (rest >= restcut[0]) & (rest <= restcut[1]):
                fit_bands.append(ub)
            else:
                nofit_bands.append(ub)
            if 'STANDARD' in ub:
                ind = np.where(data['band'] == ub)
                data['zp'][ind] = data['zp'][ind] - float(standard_zps[ub])
                errcor = 10**(-0.4*standard_zps[ub])
                data['fluxerr'][ind] *= errcor
            if '4SHOOTER2' in ub:
                ind = np.where(data['band'] == ub)
                data['zp'][ind] = data['zp'][ind] - float(FourShooter_zps[ub])
                errcor = 10**(-0.4*FourShooter_zps[ub])
                data['fluxerr'][ind] *= errcor
            if 'KEPLERCAM' in ub:
                ind = np.where(data['band'] == ub)
                data['zp'][ind] = data['zp'][ind] - float(keplercam_zps[ub])
                errcor = 10**(-0.4*keplercam_zps[ub])
                data['fluxerr'][ind] *= errcor
                # print ub
                # print data['zp'][ind]
            if 'swope' in ub.lower():
                ind = np.where(data['band'] == ub)
                data['zp'][ind] = data['zp'][ind] - float(swope_zps[ub])
                errcor = 10**(-0.4*swope_zps[ub])
                data['fluxerr'][ind] *= errcor
            if 'sdss' in ub.lower():
                ind = np.where(data['band'] == ub)
                data['zp'][ind] = data['zp'][ind] - float(sdss_zps[ub])
                errcor = 10**(-0.4*sdss_zps[ub])
                data['fluxerr'][ind] *= errcor

        for nfb in nofit_bands:
            mask = data['band'] == nfb
            for c in data.colnames:
                data[c].mask = (data[c].mask | mask)

        mwebv = data.meta['MWEBV']

        mask = data['band'].mask.nonzero()[0]
        data.remove_rows(mask)

        ind = np.where(lcfits['SN'] == nickname)
        t0 = lcfits['DayMax'][ind][0]

        x1r, x1f, c = np.random.multivariate_normal(mean,cov,size=1)[0]
        mu = cosmo.distmod(z).value
        absmag = MB - ar*x1r - af*x1f + beta*c + np.random.normal(scale=sigint, size=1)[0]
        mB = mu + absmag

        source = Salt2XSource(version='2.4', modeldir=modeldir)
        model  = sncosmo.Model(source=source, effects=[dust], effect_names=['mw'], effect_frames=['obs'])

        model.set(z=z, x1=x1f, s=x1r, c=c, t0=t0, mwebv=mwebv)
        model.set_source_peakabsmag(absmag, 'bessellb', 'ab')
        flux = model.bandflux(data['band'], data['time'], zp=data['zp'], zpsys=data['zpsys'])
        whocares, saltcov = model.bandfluxcov(data['band'], data['time'], data['zp'],data['zpsys'])

        # handle model cov blowups
        saltcov = np.copy(saltcov)
        diag = np.copy(saltcov.diagonal())
        model_snr = whocares/np.sqrt(diag)

        ind = np.where((np.abs(model_snr) < 1) & ~np.isnan(model_snr))

        diag[ind] = diag[ind] * np.abs(model_snr[ind])**2
        np.fill_diagonal(saltcov, diag)


        diagerr = np.diag(data['fluxerr']**2)
        fullcov = saltcov + diagerr
        try:
            np.linalg.cholesky(fullcov)
        except:
            print 'Cholesky failed... exiting'
            sys.exit()
            
        noise = np.random.multivariate_normal(np.zeros(len(diagerr)), fullcov, size=1)[0]

        #lower zp, lower flux
        data['flux'] = flux + noise

        data.meta['x1r'] = x1r
        data.meta['x1f'] = x1f
        data.meta['c'] = c
        data.meta['alpha_r'] = ar
        data.meta['alpha_f'] = af
        data.meta['beta'] = beta
        data.meta['MB'] = MB
        data.meta['mB'] = mu+MB
        data.meta['DayMax'] = t0
        data.meta['cosmology'] = 'Planck15'

        data.rename_column('band', 'Filter')
        data.rename_column('time', 'Date')
        data.rename_column('flux', 'Flux')
        data.rename_column('fluxerr', 'Fluxerr')
        data.rename_column('zpsys', 'MagSys')
        data.rename_column('zp', 'ZP')

        if survey == 'SNLS':
            for row in data:
                tmp = row['Filter']
                ind = tmp.find('MEGACAM')
                row['Filter'] = row['Filter'][ind:]

        sncosmo.write_lc(data,'./cadence_sim/lc/%s_%s.list' %(nickname, ctr), format='salt2')
    p.finish()
コード例 #29
0
def download(url, dest_path, config, exceptions=True):
    """
    Download a file from a URL to the given path or path name

    Parameters
    ----------
    url : str
        The URL (including file name) to download

    dest_path : str
        The path (including file name) where the downloaded file should be
        saved

    config : configparser.ConfigParser
        Configuration options used to find custom paths if ``dest_path`` is
        a config option

    exceptions : bool, optional
        Whether to raise exceptions when the download fails

    Returns
    -------
    dest_path : str
        The resulting file name if the download was successful, or None if not
    """

    in_file_name = os.path.basename(urlparse(url).path)
    dest_path = os.path.abspath(dest_path)
    out_file_name = os.path.basename(dest_path)

    do_download = config.getboolean('download', 'download')
    check_size = config.getboolean('download', 'check_size')
    verify = config.getboolean('download', 'verify')

    if not do_download:
        if not os.path.exists(dest_path):
            raise OSError(f'File not found and downloading is disabled: '
                          f'{dest_path}')
        return dest_path

    if not check_size and os.path.exists(dest_path):
        return dest_path

    session = requests.Session()
    if not verify:
        session.verify = False

    # dest_path contains full path, so we need to make the relevant
    # subdirectories if they do not exist already
    directory = os.path.dirname(dest_path)
    try:
        os.makedirs(directory)
    except OSError:
        pass

    try:
        response = session.get(url, stream=True)
        total_size = response.headers.get('content-length')
    except requests.exceptions.RequestException:
        if exceptions:
            raise
        else:
            print(f'  {url} could not be reached!')
            return None

    try:
        response.raise_for_status()
    except requests.exceptions.HTTPError as e:
        if exceptions:
            raise
        else:
            print(f'ERROR while downloading {in_file_name}:')
            print(e)
            return None

    if total_size is None:
        # no content length header
        if not os.path.exists(dest_path):
            dest_dir = os.path.dirname(dest_path)
            with open(dest_path, 'wb') as f:
                print(f'Downloading {in_file_name}\n' f'  to {dest_dir}...')
                try:
                    f.write(response.content)
                except requests.exceptions.RequestException:
                    if exceptions:
                        raise
                    else:
                        print(f'  {in_file_name} failed!')
                        return None
                else:
                    print('  {in_file_name} done.')
    else:
        # we can do the download in chunks and use a progress bar, yay!

        total_size = int(total_size)
        if os.path.exists(dest_path) and \
                total_size == os.path.getsize(dest_path):
            # we already have the file, so just return
            return dest_path

        if out_file_name == in_file_name:
            file_names = in_file_name
        else:
            file_names = f'{in_file_name} as {out_file_name}'
        dest_dir = os.path.dirname(dest_path)
        print(f'Downloading {file_names} ({_sizeof_fmt(total_size)})\n'
              f'  to {dest_dir}')
        widgets = [
            progressbar.Percentage(), ' ',
            progressbar.Bar(), ' ',
            progressbar.ETA()
        ]
        bar = progressbar.ProgressBar(widgets=widgets,
                                      max_value=total_size).start()
        size = 0
        with open(dest_path, 'wb') as f:
            try:
                for data in response.iter_content(chunk_size=4096):
                    size += len(data)
                    f.write(data)
                    bar.update(size)
                bar.finish()
            except requests.exceptions.RequestException:
                if exceptions:
                    raise
                else:
                    print(f'  {in_file_name} failed!')
                    return None
            else:
                print(f'  {in_file_name} done.')
    return dest_path
コード例 #30
0
            line = ''
        elif c == '\n':
            # line is done
            yield line
            line = ''
        elif c == '':
            break
        else:
            line += c


#def main():
first_update = None
widgets = [
    progressbar.Percentage(), None, progressbar.Bar(),
    progressbar.ETA(), None
]
pbar = None
debug = False
for line in read_stdin():
    if debug:
        print('Line on next line:')
        print(line)
    parts = line.split()
    '''
    if len(parts) > 3:
        print('parts:', parts)
        print('len(parts):', len(parts))
        print('should end w/ %:', parts[1][-1])
        print('should start w/ to-check:', parts[-1])
        print()