Exemplo n.º 1
0
    def init_from_existing_tutorial(self, tuto_name):
        """Init a tutorial instance from an existing tutorial (data library and tutorial.md)."""
        self.name = tuto_name
        self.set_dir_name()

        if not self.exists():
            raise Exception("The tutorial %s does not exists. It should be created" % self.name)

        # get the metadata information of the tutorial (from the top of the tutorial.md)
        with open(self.tuto_fp, "r") as tuto_f:
            tuto_content = tuto_f.read()
        regex = '^---\n(?P<metadata>[\s\S]*)\n---(?P<body>[\s\S]*)'
        tuto_split_regex = re.search(regex, tuto_content)
        if not tuto_split_regex:
            raise Exception("No metadata found at the top of the tutorial")
        metadata = yaml.load(tuto_split_regex.group("metadata"))
        self.title = metadata["title"]
        self.zenodo_link = metadata["zenodo_link"]
        self.questions = metadata["questions"]
        self.objectives = metadata["objectives"]
        self.time_estimation = metadata["time_estimation"]
        self.key_points = metadata["key_points"]
        self.contributors = metadata["contributors"]

        # the the tutorial content
        self.body = tuto_split_regex.group("body")

        # get the data library
        self.init_data_lib()
	def load_preset_config(self, preset):
		config_fpath = preset[0] + "/zynconfig.yml"
		try:
			with open(config_fpath,"r") as fh:
				yml = fh.read()
				logging.info("Loading preset config file %s => \n%s" % (config_fpath,yml))
				self.preset_config = yaml.load(yml)
				return True
		except Exception as e:
			logging.error("Can't load preset config file '%s': %s" % (config_fpath,e))
			return False
def load_previous_trials(output_name):
    print('>>>>> Loading previous results')
    with tf.gfile.GFile(output_name, 'r') as f:
        yaml_str = f.read()

    results_dict = yaml.load(yaml_str)
    x0 = []
    y0 = []

    if results_dict:
        for timestamp, scores_dict in results_dict.items():
            score = scores_dict['score']
            params_dict = scores_dict['input_fn_params']
            params = [params_dict[d.name] for d in space]

            x0.append(params)
            y0.append(score)
    else:
        x0 = None
        y0 = None

    return x0, y0
Exemplo n.º 4
0
def api_guide(request):
    """Renders the API guide at opus/api

    Format: api/
        or: api/guide.html

    To edit guide content edit the examples.yaml
    """
    api_code = enter_api_call('api_guide', request)

    if not request or request.GET is None:
        ret = Http404('No request')
        exit_api_call(api_code, ret)
        raise ret

    uri = HttpRequest.build_absolute_uri(request)
    prefix = '/'.join(uri.split('/')[:3])

    path = os.path.dirname(os.path.abspath(__file__))
    guide_content_file = 'examples.yaml'
    with open(os.path.join(path, guide_content_file), 'r') as stream:
        text = stream.read()
        text = text.replace('<HOST>', prefix)
        try:
            guide = yaml.load(text)

        except yaml.YAMLError as exc:
            log.error('api_guide error: %s', str(exc))
            exit_api_call(api_code, None)
            raise Http404

    slugs = get_fields_info('raw', collapse=True)

    ret = render(request, 'guide/guide.html',
                 {'guide': guide, 'slugs': slugs})
    exit_api_call(api_code, ret)
    return ret
Exemplo n.º 5
0
    return coarse_idx, medium_idx, fine_idx


def load_taxonomy_codes(filepath):
    with open(filepath, 'r') as f:
        taxonomy_codes = yaml.load(f)

    return taxonomy_codes


TAXONOMY_PATH = os.path.join(os.path.dirname(__file__), '..', 'resources',
                             'taxonomy.yaml')
FILTER_PATH = os.path.join(os.path.dirname(__file__), 'filter.yaml')

with open(TAXONOMY_PATH, 'r') as f:
    TAXONOMY = yaml.load(f)

with open(FILTER_PATH, 'r') as f:
    FILTER = yaml.load(f)

MAPPINGS = get_taxonomy_mapping(TAXONOMY)

COARSE_IDXS, MEDIUM_IDXS, FINE_IDXS = get_taxonomy_idxs(TAXONOMY)
MOD_COARSE_IDXS, MOD_MEDIUM_IDXS, MOD_FINE_IDXS = get_modified_taxonomy_idxs(
    TAXONOMY, FILTER)
MOD_MEDIUM_COUNTS = get_modified_taxonomy_medium_children_count(
    TAXONOMY, FILTER)

NUM_COARSE = len(COARSE_IDXS)
NUM_MEDIUM = len(MEDIUM_IDXS)
NUM_FINE = len(FINE_IDXS)
Exemplo n.º 6
0
 def _get_config_from_file(self, file=None):
     """ Get config from file """
     self._users = yaml.load(open(file))
Exemplo n.º 7
0
def main():
    # Arguments
    parser = argparse.ArgumentParser(description='High Quality Monocular Depth Estimation via Transfer Learning')
    parser.add_argument('-c', '--configFile', required=True, help='Path to config yaml file', metavar='path/to/config')
    args = parser.parse_args()

    CONFIG_FILE_PATH = args.configFile
    with open(CONFIG_FILE_PATH) as fd:
        config_yaml = oyaml.load(fd)  # Returns an ordered dict. Used for printing

    config = AttrDict(config_yaml)
    print(colored('Config being used for training:\n{}\n\n'.format(oyaml.dump(config_yaml)), 'green'))

    # Create a new directory to save logs
    runs = sorted(glob.glob(os.path.join(config.train.logsDir, 'exp-*')))
    prev_run_id = int(runs[-1].split('-')[-1]) if runs else 0
    MODEL_LOG_DIR = os.path.join(config.train.logsDir, 'exp-{:03d}'.format(prev_run_id + 1))
    CHECKPOINT_DIR = os.path.join(MODEL_LOG_DIR, 'checkpoints')
    os.makedirs(CHECKPOINT_DIR)
    print('Saving logs to folder: ' + colored('"{}"'.format(MODEL_LOG_DIR), 'blue'))

    # Save a copy of config file in the logs
    shutil.copy(CONFIG_FILE_PATH, os.path.join(MODEL_LOG_DIR, 'config.yaml'))

    # Create a tensorboard object and Write config to tensorboard
    writer = SummaryWriter(MODEL_LOG_DIR, comment='create-graph')

    string_out = io.StringIO()
    oyaml.dump(config_yaml, string_out, default_flow_style=False)
    config_str = string_out.getvalue().split('\n')
    string = ''
    for line in config_str:
        string = string + '    ' + line + '\n\r'
    writer.add_text('Config', string, global_step=None)

    # Create model
    model = Model()
    print('Model created.')

    # to continue training from a checkpoint
    if config.train.continueTraining:
        print('Transfer Learning enabled. Model State to be loaded from a prev checkpoint...')
        if not os.path.isfile(config.train.pathPrevCheckpoint):
            raise ValueError('Invalid path to the given weights file for transfer learning.\
                    The file {} does not exist'.format(config.train.pathPrevCheckpoint))

        CHECKPOINT = torch.load(config.train.pathPrevCheckpoint, map_location='cpu')

        if 'model_state_dict' in CHECKPOINT:
            # Newer weights file with various dicts
            print(colored('Continuing training from checkpoint...Loaded data from checkpoint:', 'green'))
            print('Config Used to train Checkpoint:\n', oyaml.dump(CHECKPOINT['config']), '\n')
            print('From Checkpoint: Last Epoch Loss:', CHECKPOINT['epoch_loss'], '\n\n')

            model.load_state_dict(CHECKPOINT['model_state_dict'])
        elif 'state_dict' in CHECKPOINT:
            # reading original authors checkpoints
            if config.train.model != 'rednet':
                # original author deeplab checkpoint
                CHECKPOINT['state_dict'].pop('decoder.last_conv.8.weight')
                CHECKPOINT['state_dict'].pop('decoder.last_conv.8.bias')
            else:
                # rednet checkpoint
                # print(CHECKPOINT['state_dict'].keys())
                CHECKPOINT['state_dict'].pop('final_deconv.weight')
                CHECKPOINT['state_dict'].pop('final_deconv.bias')
                CHECKPOINT['state_dict'].pop('out5_conv.weight')
                CHECKPOINT['state_dict'].pop('out5_conv.bias')
                CHECKPOINT['state_dict'].pop('out4_conv.weight')
                CHECKPOINT['state_dict'].pop('out4_conv.bias')
                CHECKPOINT['state_dict'].pop('out3_conv.weight')
                CHECKPOINT['state_dict'].pop('out3_conv.bias')
                CHECKPOINT['state_dict'].pop('out2_conv.weight')
                CHECKPOINT['state_dict'].pop('out2_conv.bias')

            model.load_state_dict(CHECKPOINT['state_dict'], strict=False)
        else:
            # Old checkpoint containing only model's state_dict()
            model.load_state_dict(CHECKPOINT)

    # Enable Multi-GPU training
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    if torch.cuda.device_count() > 1:
        print('Multiple GPUs being used, can\'t save model graph to Tensorboard')
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model = nn.DataParallel(model)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # Training parameters
    optimizer = torch.optim.Adam( model.parameters(), config.train.optimAdam.learningRate )
    batch_size = config.train.batchSize
    prefix = 'densenet_' + str(batch_size)

    # Load data
    train_loader_list = []
    test_loader_list = []
    for dataset in config.train.datasetsTrain:
        train_data = getTrainingTestingData('rgb', 'train', dataset.images, dataset.labels)
        train_loader_list.append(train_data)

    for dataset in config.train.datasetsVal:
        print(dataset.images)
        test_data = getTrainingTestingData('rgb', 'eval', dataset.images, dataset.labels)
        test_loader_list.append(test_data)

    train_loader = DataLoader(torch.utils.data.ConcatDataset(train_loader_list), batch_size, num_workers=config.train.numWorkers, shuffle=True, drop_last=True, pin_memory=True)
    test_loader = DataLoader(torch.utils.data.ConcatDataset(test_loader_list), batch_size, num_workers=config.train.numWorkers, shuffle=False, drop_last=True, pin_memory=True)
    print(len(torch.utils.data.ConcatDataset(train_loader_list)))
    print(len(train_loader))
    print(len(test_loader))

    # Create a tensorboard object and Write config to tensorboard
    writer = SummaryWriter(MODEL_LOG_DIR, comment='create-graph')

    # Loss
    l1_criterion = nn.L1Loss()

    total_iter_num = 0
    # Start training...
    for epoch in range(config.train.numEpochs):
        batch_time = AverageMeter()
        losses = AverageMeter()
        N = len(train_loader)

        # Log the current Epoch Number
        writer.add_scalar('data/Epoch Number', epoch, total_iter_num)

        # Switch to train mode
        model.train()

        end = time.time()

        running_loss = 0.0
        for i, sample_batched in enumerate(train_loader):
            optimizer.zero_grad()
            total_iter_num += 1

            # Prepare sample and target
            image = torch.autograd.Variable(sample_batched['image'].cuda())
            depth = torch.autograd.Variable(sample_batched['depth'].cuda(non_blocking=True))

            # Normalize depth
            depth_n = DepthNorm( depth )

            # Predict
            output = model(image)

            # Compute the loss
            l_depth = l1_criterion(output, depth_n)
            l_ssim = torch.clamp((1 - ssim(output, depth_n, val_range = 1000.0 / 10.0)) * 0.5, 0, 1)

            loss = (1.0 * l_ssim) + (0.1 * l_depth)

            # Update step
            losses.update(loss.data.item(), image.size(0))
            loss.backward()
            optimizer.step()

            # statistics
            running_loss += loss.item()

            # Measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            eta = str(datetime.timedelta(seconds=int(batch_time.val*(N - i))))

            # Log progress
            niter = epoch*N+i
            if i % 5 == 0:
                # Print to console
                print('Epoch: [{0}][{1}/{2}]\t'
                'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
                'ETA {eta}\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})'
                .format(epoch, i, N, batch_time=batch_time, loss=losses, eta=eta))

                # Log to tensorboard
                writer.add_scalar('Train/Loss', losses.val, niter)

            if i % 50 == 0:
                LogProgress(model, writer, test_loader, niter)

        # Log Epoch Loss
        epoch_loss = running_loss / (len(train_loader))
        writer.add_scalar('data/Train Epoch Loss', epoch_loss, total_iter_num)
        print('\nTrain Epoch Loss: {:.4f}'.format(epoch_loss))

        metrics = compute_errors(depth_n, output)
        print(metrics)
        for keys, values in metrics.items():
            print(str(keys) + ':' + str(values))

        # Record epoch's intermediate results
        LogProgress(model, writer, test_loader, niter)
        writer.add_scalar('Train/Loss.avg', losses.avg, epoch)

        # Save the model checkpoint every N epochs
        if (epoch % config.train.saveModelInterval) == 0:
            filename = os.path.join(CHECKPOINT_DIR, 'checkpoint-epoch-{:04d}.pth'.format(epoch))
            if torch.cuda.device_count() > 1:
                model_params = model.module.state_dict()  # Saving nn.DataParallel model
            else:
                model_params = model.state_dict()

            torch.save(
                {
                    'model_state_dict': model_params,
                    'optimizer_state_dict': optimizer.state_dict(),
                    'epoch': epoch,
                    'total_iter_num': total_iter_num,
                    'epoch_loss': epoch_loss,
                    'config': config_yaml
                }, filename)
Exemplo n.º 8
0
def load_yaml(filepath):
    """Load the content of a YAML file to a dictionary."""
    with open(filepath, "r") as m_file:
        content = yaml.load(m_file)
    return content
Exemplo n.º 9
0
def parse_yaml(source_file):
    with open(source_file, mode='r') as f:
        data = load(f.read(), Loader=loader.Loader)
        return data
Exemplo n.º 10
0
def train(annotation_path,
          taxonomy_path,
          train_feature_dir,
          val_feature_dir,
          output_dir,
          load_checkpoint,
          load_checkpoint_path,
          exp_id,
          label_mode,
          batch_size=32,
          n_epochs=100,
          kernel_size=3,
          layer_depth=[64, 128, 256, 512],
          chs=1,
          max_ckpt=20,
          lr=1e-3,
          hidden_layer_size=256,
          snapshot=5,
          num_hidden_layers=1,
          standardize=True,
          timestamp=None):
    """
    Train and evaluate a MIL MLP model.
    Parameters
    ----------
    annotation_path
    emb_dir
    output_dir
    label_mode
    batch_size
    num_epochs
    patience
    learning_rate
    hidden_layer_size
    l2_reg
    standardize
    timestamp
    random_state

    Returns
    -------
    """

    # Load annotations and taxonomy
    print("* Loading dataset.")
    annotation_data = pd.read_csv(annotation_path).sort_values(
        'audio_filename')
    with open(taxonomy_path, 'r') as f:
        taxonomy = yaml.load(f, Loader=yaml.Loader)

    annotation_data_trunc = annotation_data[[
        'audio_filename', 'latitude', 'longitude', 'week', 'day', 'hour'
    ]].drop_duplicates()
    file_list = annotation_data_trunc['audio_filename'].to_list()
    latitude_list = annotation_data_trunc['latitude'].to_list()
    longitude_list = annotation_data_trunc['longitude'].to_list()
    week_list = annotation_data_trunc['week'].to_list()
    day_list = annotation_data_trunc['day'].to_list()
    hour_list = annotation_data_trunc['hour'].to_list()

    full_fine_target_labels = [
        "{}-{}_{}".format(coarse_id, fine_id, fine_label)
        for coarse_id, fine_dict in taxonomy['fine'].items()
        for fine_id, fine_label in fine_dict.items()
    ]
    fine_target_labels = [
        x for x in full_fine_target_labels
        if x.split('_')[0].split('-')[1] != 'X'
    ]
    coarse_target_labels = [
        "_".join([str(k), v]) for k, v in taxonomy['coarse'].items()
    ]

    print("* Preparing training data.")

    # For fine, we include incomplete labels in targets for computing the loss
    fine_target_list = get_file_targets(annotation_data,
                                        full_fine_target_labels)
    coarse_target_list = get_file_targets(annotation_data,
                                          coarse_target_labels)
    train_file_idxs, valid_file_idxs = get_subset_split(annotation_data)

    if label_mode == "fine":
        target_list = fine_target_list
        labels = fine_target_labels
        num_classes = len(labels)
        y_true_num = len(full_fine_target_labels)
    elif label_mode == "coarse":
        target_list = coarse_target_list
        labels = coarse_target_labels
        num_classes = len(labels)
        y_true_num = num_classes
    else:
        raise ValueError("Invalid label mode: {}".format(label_mode))




    X_train_meta, y_train, X_valid_meta, y_valid_meta, scaler \
        = prepare_data(train_file_idxs, valid_file_idxs,
                       latitude_list, longitude_list,
                       week_list, day_list, hour_list,
                       target_list, standardize=standardize)

    print('X_train meta shape', X_train_meta.shape)
    print('y_train shape', y_train.shape)
    print('X_valid_meta shape', X_valid_meta.shape)
    print('y_valid shape', y_valid_meta.shape)

    meta_dims = X_train_meta.shape[2]

    X_train = load_train_data(file_list, train_file_idxs, train_feature_dir)
    X_valid = load_train_data(file_list, valid_file_idxs, val_feature_dir)
    _, frames, bins = X_train.shape
    print('X_train shape', X_train.shape)
    print('X_valid shape', X_valid.shape)

    (mean_train,
     std_train) = calculate_scalar_of_tensor(np.concatenate(X_train, axis=0))

    model = CNN9_Res_train(kernel_size, layer_depth, num_classes,
                           hidden_layer_size)

    if not timestamp:
        timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")

    model_path = os.path.join(output_dir, 'exp' + exp_id)

    if scaler is not None:
        scaler_path = os.path.join(model_path, 'stdizer.pkl')
        with open(scaler_path, 'wb') as f:
            pk.dump(scaler, f)

    if label_mode == "fine":
        full_coarse_to_fine_terminal_idxs = np.cumsum(
            [len(fine_dict) for fine_dict in taxonomy['fine'].values()])
        incomplete_fine_subidxs = [
            len(fine_dict) - 1 if 'X' in fine_dict else None
            for fine_dict in taxonomy['fine'].values()
        ]
        coarse_to_fine_end_idxs = np.cumsum([
            len(fine_dict) - 1 if 'X' in fine_dict else len(fine_dict)
            for fine_dict in taxonomy['fine'].values()
        ])

        # Create loss function that only adds loss for fine labels for which
        # the we don't have any incomplete labels
        def masked_loss(y_true, y_pred):
            loss = None
            for coarse_idx in range(len(full_coarse_to_fine_terminal_idxs)):
                true_terminal_idx = full_coarse_to_fine_terminal_idxs[
                    coarse_idx]
                true_incomplete_subidx = incomplete_fine_subidxs[coarse_idx]
                pred_end_idx = coarse_to_fine_end_idxs[coarse_idx]

                if coarse_idx != 0:
                    true_start_idx = full_coarse_to_fine_terminal_idxs[
                        coarse_idx - 1]
                    pred_start_idx = coarse_to_fine_end_idxs[coarse_idx - 1]
                else:
                    true_start_idx = 0
                    pred_start_idx = 0

                if true_incomplete_subidx is None:
                    true_end_idx = true_terminal_idx

                    sub_true = y_true[:, true_start_idx:true_end_idx]
                    sub_pred = y_pred[:, pred_start_idx:pred_end_idx]

                else:
                    # Don't include incomplete label
                    true_end_idx = true_terminal_idx - 1
                    true_incomplete_idx = true_incomplete_subidx + true_start_idx
                    assert true_end_idx - true_start_idx == pred_end_idx - pred_start_idx
                    assert true_incomplete_idx == true_end_idx

                    # 1 if not incomplete, 0 if incomplete
                    mask = K.expand_dims(1 - y_true[:, true_incomplete_idx])

                    # Mask the target and predictions. If the mask is 0,
                    # all entries will be 0 and the BCE will be 0.
                    # This has the effect of masking the BCE for each fine
                    # label within a coarse label if an incomplete label exists
                    sub_true = y_true[:, true_start_idx:true_end_idx] * mask
                    sub_pred = y_pred[:, pred_start_idx:pred_end_idx] * mask

                if loss is not None:
                    loss += K.sum(K.binary_crossentropy(sub_true, sub_pred))
                else:
                    loss = K.sum(K.binary_crossentropy(sub_true, sub_pred))

            return loss

        loss_func = masked_loss
    else:

        def unmasked_loss(y_true, y_pred):

            loss = None
            loss = K.sum(K.binary_crossentropy(y_true, y_pred))
            return loss

        loss_func = unmasked_loss

    ###     placeholder
    x = tf.placeholder(tf.float32, shape=[None, frames, bins, chs], name='x')
    meta_x = tf.placeholder(tf.float32, shape=[None, meta_dims], name='meta_x')
    y = tf.placeholder(tf.float32, shape=[None, y_true_num], name='y')
    is_training = tf.placeholder(tf.bool, shape=None, name='is_training')

    ###     net output
    output = model.forward(input_tensor=x,
                           input_meta=meta_x,
                           is_training=is_training)
    sigmoid_output = tf.nn.sigmoid(output, name='sigmoid_output')
    loss = loss_func(y, sigmoid_output)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    learning_rate = tf.Variable(float(lr), trainable=False, dtype=tf.float32)
    learning_rate_decay_op = learning_rate.assign(learning_rate * 0.9)
    with tf.control_dependencies(update_ops):
        #        train_op = tf.train.MomentumOptimizer(learning_rate=lr,momentum=momentum).minimize(loss)
        train_op = tf.train.AdamOptimizer(
            learning_rate=learning_rate).minimize(loss)

    ###     start session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    saver = tf.train.Saver(max_to_keep=max_ckpt)
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())
    if load_checkpoint:
        saver.restore(sess, load_checkpoint_path)

    ###     tensorboard summary

    train_summary_dir = os.path.join(model_path, 'summaries', 'train')
    train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)

    loss_all = tf.placeholder(tf.float32, shape=None, name='loss_all')

    tf.add_to_collection("loss", loss_all)

    loss_summary = tf.summary.scalar('loss', loss_all)

    val_summary_dir = os.path.join(model_path, 'summaries', 'val')
    val_micro_auprc_summary_writer = tf.summary.FileWriter(
        os.path.join(val_summary_dir, 'micro_auprc'), sess.graph)
    val_macro_auprc_summary_writer = tf.summary.FileWriter(
        os.path.join(val_summary_dir, 'macro_auprc'), sess.graph)
    val_val_micro_F1score_summary_writer = tf.summary.FileWriter(
        os.path.join(val_summary_dir, 'micro_F1score'), sess.graph)
    val_summary = tf.placeholder(tf.float32, shape=None, name='loss_all')
    tf.add_to_collection("val_summary", val_summary)
    val_summary_op = tf.summary.scalar('val_summary', val_summary)

    ###     train loop
    print("* Training model.")
    class_auprc_dict = {}
    for epoch in range(n_epochs):
        train_loss = 0
        n_batch = 0
        for X_train_batch, X_meta_batch, y_train_batch in gen_train_batch(
                X_train, X_train_meta, y_train, batch_size):

            X_meta_batch = X_meta_batch.reshape(-1, meta_dims)
            X_train_batch = scale(X_train_batch, mean_train, std_train)
            X_train_batch = X_train_batch.reshape(-1, frames, bins, chs)
            _, train_loss_batch = sess.run(
                [train_op, loss],
                feed_dict={
                    x: X_train_batch,
                    meta_x: X_meta_batch,
                    y: y_train_batch,
                    is_training: True
                })
            train_loss += train_loss_batch
            n_batch += 1
        train_loss = train_loss / n_batch
        train_summary_op = tf.summary.merge([loss_summary])
        train_summaries = sess.run(train_summary_op,
                                   feed_dict={loss_all: train_loss})
        train_summary_writer.add_summary(train_summaries, epoch)

        print("step %d" % (epoch))
        print("   train loss: %f" % (train_loss))

        pre = []
        if ((epoch + 1) % snapshot == 0
                and epoch > 0) or epoch == n_epochs - 1:
            sess.run(learning_rate_decay_op)

            for val_data_batch, val_meta_batch in gen_val_batch(
                    X_valid, X_valid_meta, batch_size):

                val_meta_batch = val_meta_batch.reshape(-1, meta_dims)
                val_data_batch = scale(val_data_batch, mean_train, std_train)
                val_data_batch = val_data_batch.reshape(-1, frames, bins, chs)
                prediction = sess.run(sigmoid_output,
                                      feed_dict={
                                          x: val_data_batch,
                                          meta_x: val_meta_batch,
                                          is_training: False
                                      })
                pre.extend(prediction)
            # print(len(pre))
            generate_output_file(pre, valid_file_idxs, model_path, file_list,
                                 label_mode, taxonomy)
            submission_path = os.path.join(model_path, "output.csv")
            df_dict = metrics.evaluate(prediction_path=submission_path,
                                       annotation_path=annotation_path,
                                       yaml_path=taxonomy_path,
                                       mode=label_mode)
            val_micro_auprc, eval_df = metrics.micro_averaged_auprc(
                df_dict, return_df=True)
            val_macro_auprc, class_auprc = metrics.macro_averaged_auprc(
                df_dict, return_classwise=True)
            thresh_idx_05 = (eval_df['threshold'] >= 0.5).nonzero()[0][0]
            val_micro_F1score = eval_df['F'][thresh_idx_05]

            val_summaries = sess.run(val_summary_op,
                                     feed_dict={val_summary: val_micro_auprc})
            val_micro_auprc_summary_writer.add_summary(val_summaries, epoch)
            val_summaries = sess.run(val_summary_op,
                                     feed_dict={val_summary: val_macro_auprc})
            val_macro_auprc_summary_writer.add_summary(val_summaries, epoch)
            val_summaries = sess.run(
                val_summary_op, feed_dict={val_summary: val_micro_F1score})
            val_val_micro_F1score_summary_writer.add_summary(
                val_summaries, epoch)
            class_auprc_dict['class_auprc_' + str(epoch)] = class_auprc
            print('official')
            print('micro', val_micro_auprc)
            print('micro_F1', val_micro_F1score)
            print('macro', val_macro_auprc)

            print('-----save:{}-{}'.format(
                os.path.join(model_path, 'ckeckpoint', 'model'), epoch))
            saver.save(sess,
                       os.path.join(model_path, 'ckeckpoint', 'model'),
                       global_step=epoch)

            np.save(os.path.join(model_path, 'class_auprc_dict.npy'),
                    class_auprc_dict)
    sess.close()
Exemplo n.º 11
0
def test_history_r_install(r_setup):
    """Test the history.yaml in detail."""
    # pylint: disable=line-too-long
    name = r_setup["name"]
    env = r_setup["env"]
    env_dir = r_setup["env_dir"]
    channels = r_setup["channels"]

    history_file = env_dir / "history.yaml"
    actual_history_content = history_file.read_text()
    print(actual_history_content)

    actual = yaml.load(actual_history_content, Loader=yaml.FullLoader)

    expected_packages = {
        "conda": {"r-base": "*", "r-devtools": "*"},
        "r": {
            "jsonlite": 'library("devtools"); install_version("jsonlite",version="1.2")',
            "praise": 'install.packages("praise")',
        },
    }

    expected_log = (
        r'R --quiet --vanilla -e "library(\"devtools\"); '
        r"install_version(\"jsonlite\",version=\"1.2\"); "
        r"install.packages(\"praise\")"
    )

    expected_action = (
        r'R --quiet --vanilla -e "library(\"devtools\"); '
        r"install_version(\"jsonlite\",version=\"1.2\",date=\"2019-01-01\"); "
        r'library(\"devtools\"); install_mran(\"praise\",version=\"1.0.0\",date=\"2019-01-01\")"'
    )

    expected_debug = 2 * [
        {
            "platform": get_platform_name(),
            "conda_version": CONDA_VERSION,
            "pip_version": pip.get_pip_version(name=name),
            "timestamp": str(date.today()),
        }
    ]
    assert actual["name"] == name
    assert actual["channels"] == channels
    assert actual["packages"] == expected_packages
    assert actual["revisions"][-1]["log"] == expected_log
    assert len(actual["revisions"]) == 2
    assert actual["revisions"][-1]["action"] == expected_action
    for i in range(len(actual["revisions"])):
        for key, val in expected_debug[i].items():
            if key == "timestamp":
                assert actual["revisions"][i]["debug"][key].startswith(val)
            else:
                assert actual["revisions"][i]["debug"][key] == val

    dependencies = env.dependencies

    expected_history = [
        f"name: {name}",
        f"id: {env.history.id}",
        "history-file-version: '1.0'",
        "channels:",
    ]
    for channel in channels:
        expected_history.append(f"  - {channel}")
    expected_history_start = "\n".join(
        expected_history
        + [
            "packages:",
            "  conda:",
            "    r-base: '*'",
            "    r-devtools: '*'",
            "  r:",
            '    jsonlite: library("devtools"); install_version("jsonlite",version="1.2")',
            '    praise: install.packages("praise")',
            "revisions:",
            "  - packages:",
            "      conda:",
            "        r-base: '*'",
            "        r-devtools: '*'",
            "    diff:",
            "      conda:",
            "        upsert:",
            f"        - r-base={dependencies['conda']['r-base'].version}",
            f"        - r-devtools={dependencies['conda']['r-devtools'].version}",
            "    log: conda create --name r_end_to_end_test r-base r-devtools --override-channels",
            "      --strict-channel-priority --channel r",
            "      --channel defaults",
            "    action: conda create --name r_end_to_end_test r-base",
        ]
    )
    expected_second_revision = "\n".join(
        [
            "  - packages:",
            "      conda:",
            "        r-base: '*'",
            "        r-devtools: '*'",
            "      r:",
            '        jsonlite: library("devtools"); install_version("jsonlite",version="1.2")',
            '        praise: install.packages("praise")',
            "    diff:",
            "      r:",
            "        upsert:",
            f"        - jsonlite",
            f"        - praise",
            r'    log: R --quiet --vanilla -e "library(\"devtools\"); install_version(\"jsonlite\",version=\"1.2\");',
            r"      install.packages(\"praise\")",
            r'    action: R --quiet --vanilla -e "library(\"devtools\"); install_version(\"jsonlite\",version=\"1.2\");',
            r"      install.packages(\"praise\")",
        ]
    )
    index_first_action = actual_history_content.find(
        "action: conda create --name r_end_to_end_test r-base"
    ) + len("action: conda create --name r_end_to_end_test r-base")
    actual_history_start = actual_history_content[:index_first_action]
    assert actual_history_start == expected_history_start

    index_second_revision_start = actual_history_content.find(
        "  - packages:", index_first_action
    )
    index_second_debug = actual_history_content.find(
        "debug:", index_second_revision_start
    )
    actual_second_revision = actual_history_content[
        index_second_revision_start:index_second_debug
    ].rstrip()
    assert actual_second_revision == expected_second_revision
Exemplo n.º 12
0
def test_remove_package(end_to_end_setup):
    """Test the removal of a package."""
    name = end_to_end_setup["name"]
    env_dir = end_to_end_setup["env_dir"]
    channels = end_to_end_setup["channels"]
    channel_command = end_to_end_setup["channel_command"].replace(
        "--strict-channel-priority ", "")

    conda_remove(name=name, specs=["colorama"], yes=True)

    actual_env_content = (env_dir / "conda-env.yaml").read_text()
    assert "colorama" not in actual_env_content

    log_file = env_dir / "history.yaml"
    actual_history_content = log_file.read_text()
    print(actual_history_content)

    actual = yaml.load(actual_history_content, Loader=yaml.FullLoader)

    expected_packages = {"conda": {"python": "3.6", "pytest": "*"}}
    expected_log = f"conda remove --name {name} colorama"
    expected_debug = {
        "platform": get_platform_name(),
        "conda_version": CONDA_VERSION,
        "pip_version": get_pip_version(name=name),
        "timestamp": str(date.today()),
    }

    assert actual["packages"] == expected_packages
    assert actual["logs"][-1] == expected_log
    assert len(actual["logs"]) == 3
    assert actual["channels"] == channels
    assert (actual["actions"][-1] ==
            f"conda remove --name {name} colorama {channel_command}")
    assert len(actual["actions"]) == 3
    for key, val in expected_debug.items():
        if key == "timestamp":
            assert actual["debug"][2][key].startswith(val)
        else:
            assert actual["debug"][2][key] == val

    expected_start = [f"name: {name}", "channels:"]
    for channel in channels:
        expected_start.append(f"  - {channel}")
    expected_history_start = "\n".join(expected_start + [
        "packages:",
        "  conda:",
        "    python: '3.6'",
        "    pytest: '*'",
        "logs:",
        "  - conda create --name end_to_end_test python=3.6 colorama --override-channels --strict-channel-priority",
        "    --channel main",
        f"  - conda install --name {name} pytest",
        f"  - {expected_log}",
        "actions:",
        f"  - conda create --name {name} python=3.6",
    ])

    end = actual_history_content.rfind("python=3.6") + len("python=3.6")
    actual_history_start = actual_history_content[:end]
    assert actual_history_start == expected_history_start
Exemplo n.º 13
0
        return text


clap_app = od([("name", "openstack-client"),
               ("settings", ['ArgRequiredElseHelp']),
               ("args", [
                   od([("os-cloud", {
                       "long": "os-cloud",
                       "help":
                       "use this as the cloud name from the clouds.yaml",
                       "takes_value": True,
                   })])
               ]), ("subcommands", [])])

with open("data/commands.yaml") as f:
    commands = yaml.load(f)

with open("data/resources.yaml") as f:
    resources = yaml.load(f)

with open("data/actions.yaml") as f:
    actions = yaml.load(f)

# print(commands)
# pos_vals = parse_rust_enum_notation(t)
# clap_app["args"].append(od({"help": "", "index": 1, "possible_values": pos_vals}))

# possible_resources = [snake_to_kebabcase(x) for x in resources.keys()]
# resources_blub = od([
#     ("help", "resource to use"),
#     ("possible_values", possible_resources),
Exemplo n.º 14
0
import os
import re
import json
import sys

import oyaml as yaml

with open('kudos.yaml', 'r') as f:
    items = yaml.load(f)
    for i in items:
        i['name'] = i['name'].lower().replace(' ', '_')

with open('kudos.yaml', 'w') as f:
    f.write(yaml.dump(items, default_flow_style=False))
Exemplo n.º 15
0
            obs.points = [ct.latlon2xy(pt) for pt in obs.points]
            obs.heading = ct.headingglobal2local(obs.heading, units='degrees')
        dynamic_obstacles.append(obs)

    print("Loaded %d static obstacles and %d dynamic obstacles" %
          (len(static_obstacles), len(dynamic_obstacles)))
    return static_obstacles + dynamic_obstacles


if __name__ == '__main__':
    import os
    import oyaml as yaml
    import matplotlib.pyplot as plt

    with open(os.path.expandvars("$HOME/dev/sas_sim/data/obstacles_sim.yaml"),
              'rb') as f:
        obstacles_dict = yaml.load(f.read())

    with open(os.path.expandvars("$HOME/dev/sas_sim/cfg/mission.yaml"),
              'rb') as f:
        yaml_mission = yaml.load(f.read())

    obs = loadObstacles(obstacles_dict, yaml_mission)

    for o in obs:
        x, y = o.getBuffer().exterior.xy
        plt.figure()
        plt.plot(x, y)

    plt.show()
Exemplo n.º 16
0
Arquivo: stack.py Projeto: jpza/ekscli
    def create(self):
        reporter = ResourceReporter()
        resource = Resource('kubeconf', 'Kubernetes configuration file', Status.not_exist, resource_id=self.kubeconf)
        reporter.progress(resource)
        try:
            if os.path.isfile(self.kubeconf):
                import oyaml as yaml
                with open(self.kubeconf, 'r') as cf:
                    kc = yaml.load(cf)

                clusters = self._get_components(kc, 'clusters')
                cs = [c for c in clusters if c.get('name') == self.cluster_info.name]
                if not cs:
                    clusters.append(OrderedDict([
                        ('cluster', OrderedDict([
                            ('certificate-authority-data', self.cluster_info.cert),
                            ('server', self.cluster_info.endpoint),
                        ])),
                        ('name', self.cluster_info.name),
                    ]))
                else:
                    for c in cs:
                        c['cluster']['server'] = self.cluster_info.endpoint
                        c['cluster']['certificate-authority-data'] = self.cluster_info.cert

                users = self._get_components(kc, 'users')
                us = [u for u in users if u.get('name') == self.user]
                if not us:
                    users.append(OrderedDict([
                        ('name', self.user),
                        ('user', OrderedDict([
                            ('exec', OrderedDict([
                                ('apiVersion', 'client.authentication.k8s.io/v1alpha1'),
                                ('command', self.heptio),
                                ('args', ['token', '-i', self.cluster_info.name])
                            ]))]))]))
                else:
                    for u in users:
                        u['user'] = OrderedDict([
                            ('exec', OrderedDict([
                                ('apiVersion', 'client.authentication.k8s.io/v1alpha1'),
                                ('command', self.heptio),
                                ('args', ['token', '-i', self.cluster_info.name])
                            ]))])

                contexts = self._get_components(kc, 'contexts')
                cs = [c for c in contexts if c.get('context', {}).get('cluster') == self.cluster_info.name
                      and c.get('context', {}).get('user') == self.user]
                if not cs:
                    contexts.append(OrderedDict([
                        ('context', OrderedDict([
                            ('cluster', self.cluster_info.name),
                            ('namespace', 'default'),
                            ('user', self.user),
                        ])),
                        ('name', self.cluster_info.name),
                    ]))

                kc['current-context'] = self.cluster_info.name

                with open(self.kubeconf, 'w') as cf:
                    cf.write(yaml.safe_dump(kc, default_flow_style=False))
            else:
                s = Environment().from_string(KubeConfig.KUBE_CONFIG_YAML).render(ci=self.cluster_info, user=self.user,
                                                                                  heptio=self.heptio)
                with open(self.kubeconf, 'w') as cf:
                    cf.write(s)

            resource.status = Status.created
            resource.resource_id = self.kubeconf
            reporter.succeed(resource)
        except Exception as e:
            resource.status = Status.failed
            reporter.fail(resource)
            raise EKSCliException(e)

        return
    def parse(self, print_opt=True):
        ''' use update_fn() to do additional modifications on args
            before printing
        '''
        # initialize parser with basic options
        if not self.initialized:
            self.initialize()

        # parse options
        opt = self.parser.parse_args()

        # get arguments specified in config file
        if opt.config_file:
            data = yaml.load(opt.config_file, Loader=yaml.FullLoader)
            data = self._flatten_to_toplevel(data)
        else:
            data = {}

        # determine which options were specified
        # explicitly with command line args
        option_strings = {}
        for action_group in self.parser._action_groups:
            for action in action_group._group_actions:
                for option in action.option_strings:
                    option_strings[option] = action.dest
        specified_options = set(
            [option_strings[x] for x in sys.argv if x in option_strings])

        # make hierarchical namespace wrt groups
        # positional and optional arguments in toplevel
        args = {}
        for group in self.parser._action_groups:
            # by default, take the result from argparse
            # unless was specified in config file and not in command line
            group_dict = {
                a.dest: data[a.dest] if a.dest in data
                and a.dest not in specified_options else getattr(
                    opt, a.dest, None)
                for a in group._group_actions
            }
            if group.title == 'positional arguments' or \
               group.title == 'optional arguments':
                args.update(group_dict)
            else:
                args[group.title] = argparse.Namespace(**group_dict)

        opt = argparse.Namespace(**args)
        delattr(opt, 'config_file')

        # output directory
        if opt.name:
            output_dir = opt.name
        else:
            output_dir = '_'.join([
                opt.model, opt.transform, opt.walk_type,
                'lr' + str(opt.learning_rate), opt.loss
            ])
            if opt.model == 'biggan':
                subopt = opt.biggan
                if subopt.category:
                    output_dir += '_cat{}'.format(subopt.category)
            elif opt.model == 'stylegan':
                subopt = opt.stylegan
                output_dir += '_{}'.format(subopt.dataset)
                output_dir += '_{}'.format(subopt.latent)
            elif opt.model == 'pgan':
                subopt = opt.pgan
                output_dir += '_{}'.format(subopt.dset)
            if opt.walk_type.startswith('NN'):
                subopt = opt.nn
                if subopt.eps:
                    output_dir += '_eps{}'.format(subopt.eps)
                if subopt.num_steps:
                    output_dir += '_nsteps{}'.format(subopt.num_steps)
            if opt.transform.startswith(
                    'color') and opt.color.channel is not None:
                output_dir += '_chn{}'.format(opt.color.channel)

        if opt.suffix:
            output_dir += opt.suffix
        if opt.prefix:
            output_dir = opt.prefix + output_dir

        opt.output_dir = os.path.join(opt.models_dir, output_dir)

        # write the configurations to disk
        if print_opt:
            self.print_options(opt)

        self.opt = opt
        return opt
Exemplo n.º 18
0
def index():
    website_data = yaml.load(open('_config.yaml', encoding='utf8'))

    return render_template('index.html', data=website_data)
Exemplo n.º 19
0
    def handle(self, *args, **options):
        # config
        if options['debug']:
            logging.basicConfig(level=logging.DEBUG)
        else:
            logging.basicConfig(level=logging.INFO)
        network = options['network']
        gitcoin_account = options['gitcoin_account']
        gas_price_gwei = options['gas_price_gwei']
        kudos_filter = options['kudos_filter']

        if gitcoin_account:
            account = settings.KUDOS_OWNER_ACCOUNT
            private_key = settings.KUDOS_PRIVATE_KEY
        else:
            account = options['account']
            private_key = options['private_key']
        skip_sync = options['skip_sync']

        kudos_contract = KudosContract(network=network)

        yaml_file = options['yaml_file']

        with open(yaml_file) as f:
            all_kudos = yaml.load(f)

        for __, kudos in enumerate(all_kudos):
            if kudos_filter not in kudos['name']:
                continue
            image_name = urllib.parse.quote(kudos.get('image'))
            if image_name:
                # Support Open Sea
                if kudos_contract.network == 'rinkeby':
                    image_path = f'https://ss.gitcoin.co/static/v2/images/kudos/{image_name}'
                    external_url = f'https://stage.gitcoin.co/kudos/{kudos_contract.address}/{kudos_contract.getLatestId() + 1}'
                elif kudos_contract.network == 'mainnet':
                    image_path = f'https://s.gitcoin.co/static/v2/images/kudos/{image_name}'
                    external_url = f'https://gitcoin.co/kudos/{kudos_contract.address}/{kudos_contract.getLatestId() + 1}'
                elif kudos_contract.network == 'localhost':
                    image_path = f'v2/images/kudos/{image_name}'
                    external_url = f'http://localhost:8000/kudos/{kudos_contract.address}/{kudos_contract.getLatestId() + 1}'
                else:
                    raise RuntimeError(
                        'Need to set the image path for that network')
            else:
                image_path = ''

            attributes = []
            # "trait_type": "investor_experience",
            # "value": 20,
            # "display_type": "boost_number",
            # "max_value": 100
            rarity = {
                "trait_type": "rarity",
                "value": get_rarity_score(kudos['numClonesAllowed']),
            }
            attributes.append(rarity)

            artist = {"trait_type": "artist", "value": kudos.get('artist')}
            attributes.append(artist)

            platform = {
                "trait_type": "platform",
                "value": kudos.get('platform')
            }
            attributes.append(platform)

            tags = kudos['tags']
            price_finney = kudos['priceFinney']

            # append tags
            if price_finney < 2:
                tags.append('budget')
            if price_finney < 5:
                tags.append('affordable')
            if price_finney > 20:
                tags.append('premium')
            if price_finney > 200:
                tags.append('expensive')

            for tag in tags:
                attributes.append({"trait_type": "tag", "value": tag})

            readable_name = humanize_name(kudos['name'])
            metadata = {
                'name': readable_name,
                'image': image_path,
                'description': kudos['description'],
                'external_url': external_url,
                'background_color': 'fbfbfb',
                'attributes': attributes
            }

            if not options['mint_to']:
                mint_to = kudos_contract._w3.toChecksumAddress(
                    settings.KUDOS_OWNER_ACCOUNT)
            else:
                mint_to = kudos_contract._w3.toChecksumAddress(
                    options['mint_to'])

            is_live = options['live']
            if is_live:
                try:
                    token_uri_url = kudos_contract.create_token_uri_url(
                        **metadata)
                    args = (mint_to, kudos['priceFinney'],
                            kudos['numClonesAllowed'], token_uri_url)
                    kudos_contract.mint(
                        *args,
                        account=account,
                        private_key=private_key,
                        skip_sync=skip_sync,
                        gas_price_gwei=gas_price_gwei,
                    )
                    print('Live run - Name: ', readable_name, ' - Account: ',
                          account, 'Minted!')
                except Exception as e:
                    print(f'Error minting: {readable_name} - {e}')
            else:
                print('Dry run - Name: ', readable_name, ' - Account: ',
                      account, 'Skipping!')
Exemplo n.º 20
0
def evaluate_ensemble(annotation_path, taxonomy_path, mel_dir, models_dir1,
                      models_dir2, output_dir):

    os.makedirs(output_dir, exist_ok=True)

    # Load annotations and taxonomy
    print("* Loading dataset.")
    annotation_data = pd.read_csv(annotation_path).sort_values(
        'audio_filename')
    with open(taxonomy_path, 'r') as f:
        taxonomy = yaml.load(f, Loader=yaml.Loader)

    file_list = annotation_data['audio_filename'].unique().tolist()

    train_file_idxs, test_file_idxs = get_subset_split(annotation_data)

    print('load mel spectrograms')
    mel_list = load_mels(file_list, mel_dir)

    model_list = [f for f in os.listdir(models_dir1) if 'pth' in f]
    val_loss = [float(f.split('_')[-1][:-4]) for f in model_list]
    model_filename = model_list[np.argmin(val_loss)]

    model1 = MyCNN()
    if torch.cuda.is_available():
        model1.load_state_dict(
            torch.load(os.path.join(models_dir1, model_filename)))
        model1.cuda()
    else:
        model1.load_state_dict(
            torch.load(os.path.join(models_dir1, model_filename),
                       map_location='cpu'))

    model1.eval()

    model_list = [f for f in os.listdir(models_dir2) if 'pth' in f]
    val_loss = [float(f.split('_')[-1][:-4]) for f in model_list]
    model_filename = model_list[np.argmin(val_loss)]

    model2 = MyCNN()
    if torch.cuda.is_available():
        model2.load_state_dict(
            torch.load(os.path.join(models_dir2, model_filename)))
        model2.cuda()
    else:
        model2.load_state_dict(
            torch.load(os.path.join(models_dir2, model_filename),
                       map_location='cpu'))

    model2.eval()

    y_pred = predict_ensemble(mel_list, test_file_idxs, model1, model2)

    aggregation_type = 'max'
    label_mode = 'coarse'
    generate_output_file(y_pred, test_file_idxs, output_dir, file_list,
                         aggregation_type, label_mode, taxonomy)

    mode = 'coarse'
    prediction_path = os.path.join(output_dir, 'output_max.csv')
    df_dict = evaluate(prediction_path, annotation_path, taxonomy_path, mode)

    micro_auprc, eval_df = micro_averaged_auprc(df_dict, return_df=True)
    macro_auprc, class_auprc = macro_averaged_auprc(df_dict,
                                                    return_classwise=True)

    # Get index of first threshold that is at least 0.5
    thresh_0pt5_idx = (eval_df['threshold'] >= 0.5).nonzero()[0][0]

    print("{} level evaluation:".format(mode.capitalize()))
    print("======================")
    print(" * Micro AUPRC:           {}".format(micro_auprc))
    print(" * Micro F1-score (@0.5): {}".format(eval_df["F"][thresh_0pt5_idx]))
    print(" * Macro AUPRC:           {}".format(macro_auprc))
    print(" * Coarse Tag AUPRC:")

    for coarse_id, auprc in class_auprc.items():
        print("      - {}: {}".format(coarse_id, auprc))
Exemplo n.º 21
0
def test_history_after_install(end_to_end_setup):
    """Test the history.yaml file in detail after pytest has been installed."""
    name = end_to_end_setup["name"]

    conda_install(name=name, specs=["pytest"], yes=True)

    env_dir = end_to_end_setup["env_dir"]
    channels = end_to_end_setup["channels"]
    channel_command = end_to_end_setup["channel_command"]

    log_file = env_dir / "history.yaml"
    actual_history_content = log_file.read_text()
    print(actual_history_content)

    actual = yaml.load(actual_history_content, Loader=yaml.FullLoader)

    action_install_expected_pattern = (
        rf"(conda install --name {name})(\s)(pytest=)(.*)({channel_command})")

    expected_packages = {
        "conda": {
            "colorama": "*",
            "python": "3.6",
            "pytest": "*"
        }
    }
    expected_log = f"conda install --name {name} pytest"
    expected_debug = 2 * [{
        "platform": get_platform_name(),
        "conda_version": CONDA_VERSION,
        "pip_version": get_pip_version(name=name),
        "timestamp": str(date.today()),
    }]

    assert actual["packages"] == expected_packages
    assert actual["logs"][-1] == expected_log
    assert len(actual["logs"]) == 2
    assert actual["channels"] == channels
    assert re.match(action_install_expected_pattern, actual["actions"][-1])
    assert len(actual["actions"]) == 2
    for key, val in expected_debug[1].items():
        if key == "timestamp":
            assert actual["debug"][1][key].startswith(val)
        else:
            assert actual["debug"][1][key] == val

    expected_start = [f"name: {name}", "channels:"]
    for channel in channels:
        expected_start.append(f"  - {channel}")
    expected_history_start = "\n".join(expected_start + [
        "packages:",
        "  conda:",
        "    python: '3.6'",
        "    colorama: '*'",
        "    pytest: '*'",
        "logs:",
        "  - conda create --name end_to_end_test python=3.6 colorama --override-channels --strict-channel-priority",
        "    --channel main",
        f"  - {expected_log}",
        "actions:",
        f"  - conda create --name {name} python=3.6",
    ])
    end = actual_history_content.rfind("python=3.6") + len("python=3.6")
    actual_history_start = actual_history_content[:end]
    assert actual_history_start == expected_history_start
Exemplo n.º 22
0
def load_taxonomy_codes(filepath):
    with open(filepath, 'r') as f:
        taxonomy_codes = yaml.load(f)

    return taxonomy_codes
Exemplo n.º 23
0
def main():

	# Parsing user input
	parser = argparse.ArgumentParser()
	parser.add_argument(
			'-g','--grammar_filename',
			nargs='?',
			type=str,
			required=True,
			help='Grammar specification file.'
		)
	parser.add_argument(
			'-i','--input_filename',
			nargs='?',
			type=str,
			required=True,
			help='Input file to parse.'
		)
	parser.add_argument(
			'-o','--output_filename',
			nargs='?',
			type=str,
			required=True,
			help='Output file.'
		)
	parser.add_argument(
			'-y','--yaml_domain_file',
			nargs='?',
			type=str,
			default=None,
			help='YAML domain file.'
		)
	args = parser.parse_args()

	# Reading the grammar
	with open(args.grammar_filename,'r') as f:
		grammar = f.read()

	# Setting up the parser
	parser = Lark(grammar).parse

	# Reading the input file
	with open(args.input_filename,'rb') as f:
		inp = f.read()

	# Parsing the input file
	tree = parser(inp)

	# If domain database has not been provided
	if args.yaml_domain_file is None:

		# Parsing domain
		database = AplDomainTransformer().transform(tree)

	else:

		# Loading domain
		with open(args.yaml_domain_file,'r') as f:
			domain = yaml.load(f.read())

		# Parsing problem
		database = AplProblemTransformer(domain).transform(tree)

	# Storing the result in the output file
	with open(args.output_filename,'w') as f:
		f.write(yaml.dump(database))
Exemplo n.º 24
0
def unpack_competition(competition_dataset_pk):
    competition_dataset = Data.objects.get(pk=competition_dataset_pk)
    creator = competition_dataset.created_by

    status = CompetitionCreationTaskStatus.objects.create(
        dataset=competition_dataset,
        status=CompetitionCreationTaskStatus.STARTING,
    )

    try:
        with TemporaryDirectory() as temp_directory:
            # ---------------------------------------------------------------------
            # Extract bundle
            try:
                with zipfile.ZipFile(competition_dataset.data_file, 'r') as zip_pointer:
                    zip_pointer.extractall(temp_directory)
            except zipfile.BadZipFile:
                raise CompetitionUnpackingException("Bad zip file uploaded.")

            # ---------------------------------------------------------------------
            # Read metadata (competition.yaml)
            yaml_path = os.path.join(temp_directory, "competition.yaml")
            if not os.path.exists(yaml_path):
                raise CompetitionUnpackingException("competition.yaml is missing from zip, check your folder structure "
                                                    "to make sure it is in the root directory.")
            with open(yaml_path) as f:
                competition_yaml = yaml.load(f.read())

            yaml_version = str(competition_yaml.get('version', '1'))

            logger.info(f"The YAML version is: {yaml_version}")
            if yaml_version in ['1', '1.5']:
                unpacker_class = V15Unpacker
            elif yaml_version == '2':
                unpacker_class = V2Unpacker
            else:
                raise CompetitionUnpackingException(
                    'A suitable version could not be found for this competition. Make sure one is supplied in the yaml.'
                )

            unpacker = unpacker_class(
                competition_yaml=competition_yaml,
                temp_directory=temp_directory,
                creator=creator,
            )

            unpacker.unpack()

            try:
                competition = unpacker.save()
            except ValidationError as e:
                def _get_error_string(error_dict):
                    """Helps us nicely print out a ValidationError"""
                    for key, errors in error_dict.items():
                        try:
                            return f'{key}: {"; ".join(errors)}\n'
                        except TypeError:
                            # We ran into a list of nested dictionaries, start recursing!
                            nested_errors = []
                            for e in errors:
                                error_text = _get_error_string(e)
                                if error_text:
                                    nested_errors.append(error_text)
                            return f'{key}: {"; ".join(nested_errors)}\n'

                raise CompetitionUnpackingException(_get_error_string(e.detail))

            status.status = CompetitionCreationTaskStatus.FINISHED
            status.resulting_competition = competition
            status.save()
            # call again, to make sure phases get sent to chahub
            competition.save()
            logger.info("Competition saved!")

    except CompetitionUnpackingException as e:
        # We want to catch well handled exceptions and display them to the user
        logger.info(str(e))
        status.details = str(e)
        status.status = CompetitionCreationTaskStatus.FAILED
        status.save()
        raise e

    except Exception as e:
        # These are critical uncaught exceptions, make sure the end user is at least informed
        # that unpacking has failed -- do not share unhandled exception details
        logger.error(traceback.format_exc())
        status.details = "Contact an administrator, competition failed to unpack in a critical way."
        status.status = CompetitionCreationTaskStatus.FAILED
        status.save()
Exemplo n.º 25
0
def main(argv=None):

    try:
        # init
        citk_path = expanduser("~") + "/workspace/csra/citk"
        project_name = str(os.path.relpath(".", ".."))

        # setup command line
        parser = argparse.ArgumentParser(
            description=
            'Script upgrades the given project within the distribution file.')
        parser.add_argument(
            "--project",
            help='The name of the project to apply the version upgrade.')
        parser.add_argument(
            "--citk",
            default=citk_path,
            help=
            'Path to the citk project which contains the project and distribution descriptions.'
        )
        parser.add_argument(
            "--distribution",
            help='The name of the distribution to apply the version upgrade.')
        parser.add_argument(
            "--version",
            help=
            'Can be used to force the version update to the given project version.'
        )
        parser.add_argument(
            "--dry-run",
            help=
            'This mode does not push modified changes to any git repositories.',
            action='store_true')
        parser.add_argument(
            "-v",
            help=
            'Enable this verbose flag to get more logging and exception printing during application errors.',
            action='store_true')

        # parse command line
        args = parser.parse_args(argv)

        # print proper help screen if not all needed arguments are given
        _LOGGER.debug(args)
        if not all([args.project, args.distribution]):
            parser.print_help()
            return 1

        project_name = args.project
        citk_path = args.citk
        distribution_name = args.distribution
        version_to_force = args.version

        # config logger
        if args.v:
            _LOGGER.setLevel(logging.DEBUG)
            coloredlogs.install(level='DEBUG', logger=_LOGGER)
            _LOGGER.debug('Debug log enabled.')
        else:
            _LOGGER.setLevel(logging.INFO)

        # post init
        project_file_name = os.path.join(citk_path, "projects",
                                         project_name + ".project")
        tmp_repo_directory = "/tmp/" + str(
            getpass.getuser()) + "/" + project_name
        distribution_file_uri = citk_path + "/distributions/" + distribution_name + ".distribution"
        distribution_tmp_file_uri = citk_path + "/distributions/." + distribution_name + ".distribution.tmp"

        # verify
        if not os.path.exists(distribution_file_uri):
            raise ValueError("distribution " +
                             colored(str(distribution_file_uri), 'red') +
                             " does not exist!")

        # load and process
        with open(project_file_name, "r+") as project_file:
            data = yaml.load(project_file)

            # load repo
            try:
                _LOGGER.debug(
                    "cache repo " +
                    colored(data["variables"]["repository"], 'blue') +
                    " into " + colored(tmp_repo_directory, 'blue'))
                if os.path.exists(tmp_repo_directory):
                    shutil.rmtree(tmp_repo_directory)
                repo = Repo.clone_from(data["variables"]["repository"],
                                       tmp_repo_directory)
                assert not repo.bare
            except Exception as ex:
                _LOGGER.info(
                    "project repository entry could not found in project description "
                    + colored(project_file_name, 'red'))
                if ex.message:
                    _LOGGER.error(colored("ERROR", 'red') + ": " + ex.message)
                    _LOGGER.debug(ex, exc_info=True)
                return 233

            # count existing branches
            if "branches" not in data["variables"]:
                branch_counter = 0
            else:
                branch_counter = len(data["variables"]["branches"])

            # remove existing branches
            data["variables"]["branches"] = []

            # store branches
            for branch_type in repo.refs:

                # filter local branches
                if not branch_type.is_remote():
                    continue

                # filter head
                if branch_type.remote_head == "HEAD":
                    continue

                # filter origin refs
                if branch_type.remote_head.startswith("origin"):
                    continue

                branch = str(branch_type.remote_head)
                data["variables"]["branches"].append(branch)

            # sort branches
            data["variables"]["branches"].sort()

            # count existing tags
            if "tags" not in data["variables"]:
                tag_counter = 0
            else:
                tag_counter = len(data["variables"]["tags"])

            # remove existing tags
            data["variables"]["tags"] = []

            # store tags
            for tag_type in repo.tags:
                tag = str(tag_type)
                data["variables"]["tags"].append(tag)

            # sort tags
            data["variables"]["tags"].sort()

        # store back
        if not args.dry_run:
            with open(project_file_name, "w") as project_file:
                project_file.write(
                    yaml.dump(data,
                              allow_unicode=True,
                              default_flow_style=False,
                              encoding="utf-8"))

        branch_counter = len(data["variables"]["branches"]) - branch_counter
        tag_counter = len(data["variables"]["tags"]) - tag_counter
        if branch_counter != 0:
            _LOGGER.info("update " + colored(str(branch_counter), 'green') +
                         " branch" + ("" if branch_counter == 1 else "s") +
                         " of project " + colored(project_name, 'green') +
                         " in " + colored(project_file_name, 'blue') + "!")
        if tag_counter != 0:
            _LOGGER.info("update " + colored(str(tag_counter), 'green') +
                         " tag" + ("" if tag_counter == 1 else "s") +
                         " of project " + colored(project_name, 'green') +
                         " in " + colored(project_file_name, 'blue') + "!")
    except Exception as ex:
        _LOGGER.info("versions [branches|tags] of project " +
                     colored(project_name, 'red') + " not updated in " +
                     colored(project_file_name, 'blue') + "!")
        if ex.message:
            _LOGGER.error(colored("ERROR", 'red') + ": " + ex.message)
            _LOGGER.debug(ex, exc_info=True)
        return 1

    # check if forced version is available
    if version_to_force:
        forced_version_verified = False
        for tag_type in repo.tags:
            if version_to_force == str(tag_type):
                forced_version_verified = True
        for branch_type in repo.refs:
            # filter local branches
            if not branch_type.is_remote():
                continue

            # filter head
            if branch_type.remote_head == "HEAD":
                continue

            if version_to_force == str(branch_type.remote_head):
                forced_version_verified = True
        if not args.dry_run and not forced_version_verified:
            _LOGGER.error(
                colored("ERROR", 'red') + ": the forced version " +
                colored(version_to_force, 'red') + " is not available for " +
                colored(project_name, 'blue'))
            return 1

    # check if distribution updated is needed
    if not distribution_name:
        _LOGGER.info(
            "skip project upgrade within distribution because no distribution was defined!"
        )
        shutil.rmtree(tmp_repo_directory)
        return 0

    if version_to_force:
        # force version
        selected_version = version_to_force
    else:
        if len(repo.tags) == 0:
            _LOGGER.error(
                colored("ERROR", 'red') + ": " + colored("no tags", 'red') +
                " available for project " + colored(project_name, 'blue'))
            return 22

        # dectect version
        for tag_type in repo.tags:
            tag = str(tag_type)
            # skip if non regular version
            if not tag.startswith('v'):
                _LOGGER.debug("skip tag[" + tag +
                              "] because it starts not with letter v")
                continue

                _LOGGER.debug("## found: " + tag)
            tagSplit = tag.split('-')
            versionSplit = tagSplit[0].split('.')
            major_version = int(versionSplit[0].replace("v", ""))

            if len(versionSplit) >= 2:
                minor_version = int(versionSplit[1])
            else:
                minor_version = 0

            if len(versionSplit) >= 3:
                patch_version = int(versionSplit[2])
            else:
                patch_version = 0

            if len(versionSplit) >= 4:
                build_number = int(versionSplit[3])
            else:
                build_number = None

            if len(tagSplit) > 1:
                releaseType = tagSplit[1]
            else:
                releaseType = "stable"

            _LOGGER.debug("detected: major[" + str(major_version) +
                          "] minor[" + str(minor_version) + "] patch[" +
                          str(patch_version) + "] type[" + str(releaseType) +
                          "]")

            current_tag = Version(major_version, minor_version, patch_version,
                                  build_number, releaseType, tag)

            try:
                selected_tag
            except NameError:
                selected_tag = current_tag
                continue
            else:
                if current_tag.major > selected_tag.major:
                    selected_tag = current_tag
                    continue
                elif current_tag.major < selected_tag.major:
                    continue

                if current_tag.minor > selected_tag.minor:
                    selected_tag = current_tag
                    continue
                elif current_tag.minor < selected_tag.minor:
                    continue

                if current_tag.patch > selected_tag.patch:
                    selected_tag = current_tag
                    continue
                elif current_tag.patch < selected_tag.patch:
                    continue

                if build_number:
                    if current_tag.build > selected_tag.build:
                        selected_tag = current_tag
                        continue
                    elif current_tag.build < selected_tag.build:
                        continue

                if not "rc" in current_tag.release_type and "rc" in selected_tag.release_type:
                    selected_tag = current_tag
                    continue
                elif not "beta" in current_tag.release_type and "beta" in selected_tag.release_type:
                    selected_tag = current_tag
                    continue
                elif not "alpha" in current_tag.release_type and "alpha" in selected_tag.release_type:
                    selected_tag = current_tag
                    continue
        if not selected_tag.tag:
            _LOGGER.error(
                colored("ERROR", 'red') + ": " +
                colored("no valid tags", 'red') + " available for project " +
                colored(project_name, 'blue'))
            return 23
        selected_version = selected_tag.tag
    project_found = False

    # update version in distribution file
    with open(distribution_tmp_file_uri, 'w') as tmpFile:
        _LOGGER.debug("detect projects...")
        with open(distribution_file_uri) as distributionFile:
            for line in distributionFile.readlines():
                if project_name in line:
                    _LOGGER.debug("found project :   " + str(line))
                    context = line.split('@')

                    # verify project name
                    if str(context[0]).startswith("- " + project_name + " "):

                        # verify current version
                        if context[1] == selected_version + "\n":
                            _LOGGER.info(
                                colored(project_name, 'blue') +
                                " is already " +
                                colored("up-to-date", 'green') + " within " +
                                colored(distribution_name, 'blue'))
                            return 0

                        # upgrade
                        _LOGGER.info(
                            "upgrade " + project_name + " version from " +
                            colored(str(context[1]).replace("\n", ""), 'blue')
                            + " to " + colored(selected_version, 'green'))
                        context[1] = selected_version + "\n"
                        line = '@'.join(context)
                        _LOGGER.debug("update line to: " + line)
                        project_found = True
                tmpFile.write(line)

    if not project_found:
        _LOGGER.debug("project " + colored(project_name, 'blue') +
                      " skipped! " + colored("Entry not found", 'yellow') +
                      " in " + colored(distribution_file_uri, 'blue'))
        return 0

    # write back and cleanup
    if os.path.exists(tmp_repo_directory):
        shutil.rmtree(tmp_repo_directory)

    if not args.dry_run:
        shutil.move(distribution_tmp_file_uri, distribution_file_uri)

    return 0
Exemplo n.º 26
0
from collections import OrderedDict

import oyaml as yaml

t = yaml.load(open("data/resources.yaml"))

r = OrderedDict(sorted(t.items(), key=lambda x: (x[1]["resource_type"], x[0])))

yaml.dump(r, open("data/resources.yaml.tmp", "w"), default_flow_style=False)
Exemplo n.º 27
0
def test_r_remove_package(r_setup):
    # pylint: disable=too-many-locals
    name = r_setup["name"]
    env_dir = r_setup["env_dir"]
    channels = r_setup["channels"]

    r_remove(name=name, specs=["praise"], yes=True)

    history_file = env_dir / "history.yaml"
    actual_history_content = history_file.read_text()
    print(actual_history_content)
    expected_packages = {
        "conda": {"r-base": "*", "r-devtools": "*"},
        "r": {
            "jsonlite": 'library("devtools"); install_version("jsonlite", version="1.2")'
        },
    }
    actual = yaml.load(actual_history_content, Loader=yaml.FullLoader)
    remove_command = r"remove.packages(c(\"praise\"))"
    expected_log = f'R --quiet --vanilla -e "{remove_command}"'

    assert actual["packages"] == expected_packages
    assert actual["revisions"][-1]["log"] == expected_log
    assert actual["revisions"][-1]["action"] == expected_log

    packages = get_dependencies(name=name)
    conda_packages = packages["conda"]

    expected_start = [f"name: {name}", "channels:"]
    for channel in channels + ["nodefaults"]:
        expected_start.append(f"  - {channel}")
    expected_start.append("dependencies:")

    expected_conda_packages = [
        "  - r-base=" + conda_packages["r-base"].version,
        "  - r-devtools=" + conda_packages["r-devtools"].version,
    ]

    expected = "\n".join(expected_start + expected_conda_packages) + "\n"

    actual = (env_dir / "environment.yml").read_text()
    print(actual)
    assert actual == expected

    install_r = "\n".join(
        ['library("devtools"); install_version("jsonlite", version="1.2")']
    )

    actual_install_r = (env_dir / "install.R").read_text()
    print(actual_install_r)
    assert actual_install_r == install_r

    expected_packages_section = "\n".join(
        [
            "packages:",
            "  conda:",
            "    r-base: '*'",
            "    r-devtools: '*'",
            "  r:",
            '    jsonlite: library("devtools"); install_version("jsonlite",version="1.2")',
            "revisions:",
        ]
    )
    assert expected_packages_section in actual_history_content

    expected_third_revision = "\n".join(
        [
            "  - packages:",
            "      conda:",
            "        r-base: '*'",
            "        r-devtools: '*'",
            "      r:",
            '        jsonlite: library("devtools"); install_version("jsonlite",version="1.2")',
            "    diff:",
            "      r:",
            "        remove:",
            "        - praise",
            rf'    log: R --quiet --vanilla -e "remove.packages(c(\"praise\"))"',
            rf'    action: R --quiet --vanilla -e "remove.packages(c(\"praise\"))"',
        ]
    )
    index_first_revision = actual_history_content.find("  - packages:")
    index_second_revision = actual_history_content.find(
        "  - packages:", index_first_revision + 1
    )
    index_third_revision = actual_history_content.find(
        "  - packages:", index_second_revision + 1
    )
    third_action = f"    action: {expected_log}"
    index_third_action = actual_history_content.find(
        third_action, index_third_revision
    ) + len(third_action)
    actual_third_revision = actual_history_content[
        index_third_revision:index_third_action
    ]
    assert actual_third_revision == expected_third_revision
Exemplo n.º 28
0
def evaluate(prediction_path, annotation_path, yaml_path, mode):
    # Set minimum threshold.
    min_threshold = 0.01

    # Create dictionary to parse tags
    with open(yaml_path, 'r') as stream:
        yaml_dict = yaml.load(stream, Loader=yaml.Loader)

    # Parse ground truth.
    gt_df = parse_ground_truth(annotation_path, yaml_path)

    # Parse predictions.
    if mode == "fine":
        pred_df = parse_fine_prediction(prediction_path, yaml_path)
    elif mode == "coarse":
        pred_df = parse_coarse_prediction(prediction_path, yaml_path)

    # Check consistency between ground truth and predictions.
    # Make sure the files evaluated in both tables match.
    pred_audio_set = set(pred_df['audio_filename'].tolist())
    true_audio_set = set(gt_df['audio_filename'].tolist())
    if not (pred_audio_set == true_audio_set):
        extra_files = pred_audio_set - true_audio_set
        missing_files = true_audio_set - pred_audio_set
        err_msg =\
            "File mismatch between ground truth and prediction table.\n\n" \
            "Missing files: {}\n\n Extra files: {}"
        raise ValueError(err_msg.format(list(missing_files), list(extra_files)))

    # Make sure the size of the tables match
    if not (len(gt_df) == len(pred_df)):
        err_msg =\
            "Size mismatch between ground truth ({} files) " \
            "and prediction table ({} files)."
        raise ValueError(err_msg.format(len(gt_df), len(pred_df)))

    # Initialize dictionary of DataFrames.
    df_dict = {}

    # Loop over coarse categories.
    for coarse_id in yaml_dict["coarse"]:
        # List columns corresponding to that category
        if mode == "coarse":
            columns = [str(coarse_id)]
        else:
            columns = [column for column in pred_df.columns
                if (str(column).startswith(str(coarse_id))) and
                   ("-" in str(column)) and
                   (not str(column).endswith("X"))]

        # Sort columns in alphanumeric order.
        columns.sort()

        # Restrict prediction to columns of interest.
        restricted_pred_df = pred_df[columns]

        # Restrict ground truth to columns of interest.
        restricted_gt_df = gt_df[columns]

        # Aggregate all prediction values into a "raveled" vector.
        # We make an explicit numpy, so that the original DataFrame
        # is left unchanged.
        thresholds = np.ravel(np.copy(restricted_pred_df.values))

        # Sort in place.
        thresholds.sort()

        # Skip very low values.
        # This is to speed up the computation of the precision-recall curve
        # in the low-precision regime.
        thresholds = thresholds[np.searchsorted(thresholds, min_threshold):]

        # Append a 1 to the list of thresholds.
        # This will cause TP and FP to fall down to zero, but FN will be nonzero.
        # This is useful for estimating the low-recall regime, and it
        # facilitates micro-averaged AUPRC because if provides an upper bound
        # on valid thresholds across coarse categories.
        thresholds = np.append(thresholds, 1.0)

        # List thresholds by restricting observed confidences to unique elements.
        thresholds = np.unique(thresholds)[::-1]

        # Count number of thresholds.
        n_thresholds = len(thresholds)
        TPs = np.zeros((n_thresholds,)).astype('int')
        FPs = np.zeros((n_thresholds,)).astype('int')
        FNs = np.zeros((n_thresholds,)).astype('int')

        # FINE MODE.
        if mode == "fine":
            incomplete_tag = str(coarse_id) + "-X"

            # Load ground truth as numpy array.
            Y_true = restricted_gt_df.values
            is_true_incomplete = gt_df[incomplete_tag].values

            # Loop over thresholds in a decreasing order.
            for i, threshold in enumerate(thresholds):
                # Threshold prediction for complete tag.
                Y_pred = restricted_pred_df.values >= threshold

                # Threshold prediction for incomplete tag.
                is_pred_incomplete =\
                    pred_df[incomplete_tag].values >= threshold

                # Evaluate.
                TPs[i], FPs[i], FNs[i] = confusion_matrix_fine(
                    Y_true, Y_pred, is_true_incomplete, is_pred_incomplete)

        # COARSE MODE.
        elif mode == "coarse":
            # Load ground truth as numpy array.
            Y_true = restricted_gt_df.values

            # Loop over thresholds in a decreasing order.
            for i, threshold in enumerate(thresholds):
                # Threshold prediction.
                Y_pred = restricted_pred_df.values >= threshold

                # Evaluate.
                TPs[i], FPs[i], FNs[i] = confusion_matrix_coarse(Y_true, Y_pred)

        # Build DataFrame from columns.
        eval_df = pd.DataFrame({
            "threshold": thresholds, "TP": TPs, "FP": FPs, "FN": FNs})

        # Add columns for precision, recall, and F1-score.
        # NB: we take the maximum between TPs+FPs and mu=0.5 in the
        # denominator in order to avoid division by zero.
        # This only ever happens if TP+FP < 1, which
        # implies TP = 0 (because TP and FP are nonnegative integers),
        # and therefore a numerator of exactly zero. Therefore, any additive
        # offset mu would do as long as 0 < mu < 1. Choosing mu = 0.5 is
        # purely arbitrary and has no effect on the outcome (i.e. zero).
        mu = 0.5
        eval_df["P"] = TPs / np.maximum(TPs + FPs, mu)

        # Likewise for recalls, although this numerical safeguard is probably
        # less necessary given that TP+FN=0 implies that there are zero
        # positives in the ground truth, which is unlikely but no unheard of.
        eval_df["R"] = TPs / np.maximum(TPs + FNs, mu)

        # Compute F1-scores.
        # NB: we use the harmonic mean formula (2/F = 1/P + 1/R) rather than
        # the more common F = (2*P*R)/(P+R) in order circumvent the edge case
        # where both P and R are equal to 0 (i.e. TP = 0).
        eval_df["F"] = 2 / (1/eval_df["P"] + 1/eval_df["R"])

        # Store DataFrame in the dictionary.
        df_dict[coarse_id] = eval_df

    # Return dictionary.
    return df_dict
Exemplo n.º 29
0
def main():

    parser = argparse.ArgumentParser(description='Parser for MIP testing')
    parser.add_argument(
        '-i', '--infile_path',
        nargs='?',
        type=str,
        default="/home/mlfrantz/Documents/MIP_Research/mip_research/test_fields/test_field_2.csv",
        help='Input file that represents the world',
        )
    parser.add_argument(
        '-o', '--outfile_path',
        nargs='?',
        type=str,
        default="/home/mlfrantz/Documents/MIP_Research/mip_research/Pictures/",
        help='Directory where pictures are stored',
        )
    parser.add_argument(
        '-g','--gradient',
        action='store_true',
        help='By adding this flag you will compute the gradient of the input field.',
        )
    parser.add_argument(
        '-r', '--robots',
        nargs='*',
        type=str,
        default='glider1',
        help='List of robots to plan for. Must be in the robots.yaml file.',
        )
    parser.add_argument(
        '--robots_cfg',
        nargs='?',
        type=str,
        default='cfg/robots.yaml',
        help='Configuration file of robots availalbe for planning.',
        )
    parser.add_argument(
        '--sim_cfg',
        nargs='?',
        type=str,
        default='cfg/sim.yaml',
        help='Simulation-specific configuration file name.',
        )
    parser.add_argument(
        '-n', '--planning_time',
        nargs='?',
        type=float,
        default=5,
        help='Length of the path to be planned in (units).',
        )
    parser.add_argument(
        '-s', '--start_point',
        nargs='*',
        type=int,
        default=(0,0),
        help='Starting points for robots for planning purposes, returns list [x0,y0,x1,y1,...,xN,yN] for 1...N robots.',
        )
    parser.add_argument(
        '-e', '--end_point',
        nargs=2,
        type=int,
        default=[],
        help='Ending point for planning purposes, returns list [x,y].',
        )
    parser.add_argument(
        '-t', '--time_limit',
        nargs='?',
        type=float,
        default=0.0,
        help='Time limit in seconds you want to stop the simulation. Default lets it run until completion.',
        )
    parser.add_argument(
        '-d', '--direction_constr',
        nargs='?',
        type=str,
        default='8_direction',
        help='Sets the direction constraint. Default allows it to move in any of the 8 directions each move. \
        "nsew" only lets it move north-south-east-west. \
        "diag" only lets it move diagonally (NW,NE,SW,SE).',
        )
    parser.add_argument(
        '--same_point',
        action='store_false',
        help='By default it will not allow a point to be visited twice in the same planning period.',
        )
    parser.add_argument(
        '--gen_image',
        action='store_true',
        help='Set to true if you want the image to be saved to file.',
        )
    parser.add_argument(
        '--test',
        action='store_true',
        help='Will load ROMS maps by default, otherwise loads a test map.',
        )
    parser.add_argument(
        '--experiment_name',
        nargs='?',
        type=str,
        default="Test Experiment",
        help='Name of the Experiement you are running',
        )

    args = parser.parse_args()

    # Path lenth in time (hours).
    Np = args.planning_time

    # Load the map from either ROMS data or test file
    if not args.test:
        # ROMS map
        # Loading Simulation-Specific Parameters

        with open(os.path.expandvars(args.sim_cfg),'rb') as f:
            yaml_sim = yaml.load(f.read())

        fieldSavePath = '/home/mlfrantz/Documents/MIP_Research/mip_research/cfg/normal_field_{}_{}.npy'.format(str(abs(yaml_sim['sim_world']['center_longitude'])),yaml_sim['sim_world']['center_latitude'])
        
        try:
            field = np.load(fieldSavePath)
            norm_field = np.load(fieldSavePath)
            print("Loaded Map Successfully")
        except IOError:

            wd = World.roms(
                datafile_path=yaml_sim['roms_file'],
                xlen        = yaml_sim['sim_world']['width'],
                ylen        = yaml_sim['sim_world']['height'],
                center      = Location(xlon=yaml_sim['sim_world']['center_longitude'], ylat=yaml_sim['sim_world']['center_latitude']),
                feature     = yaml_sim['science_variable'],
                resolution  = (yaml_sim['sim_world']['resolution'],yaml_sim['sim_world']['resolution']),
                )

            # This is the scalar_field in a static word.
            # The '0' is the first time step and goes up to some max time
            # field = np.copy(wd.scalar_field[:,:,0])
            field = np.copy(wd.scalar_field)

            norm_field = normalize(field)
            field = normalize(field) # This will normailze the field between 0-1

            # fieldSavePath = '/home/mlfrantz/Documents/MIP_Research/mip_research/cfg/normal_field.npy'
            np.save(fieldSavePath, field)

        # Example of an obstacle, make the value very low in desired area
        # field[int(len(field)/4):int(3*len(field)/4),int(len(field)/4):int(3*len(field)/4)] = -100

        field_resolution = (yaml_sim['sim_world']['resolution'],yaml_sim['sim_world']['resolution'])

    else:
        # Problem data, matrix transposed to allow for proper x,y coordinates to be mapped wih i,j
        field = np.genfromtxt(args.infile_path, delimiter=',', dtype=float).transpose()
        field_resolution = (1,1)

    if args.gradient:
        grad_field = np.gradient(field)
        mag_grad_field = np.sqrt(grad_field[0]**2 + grad_field[1]**2)

    # Load the robots.yaml Configuration file.
    with open(os.path.expandvars(args.robots_cfg),'rb') as f:
        yaml_mission = yaml.load(f.read())

    # Get the speed of each robot that we are planning for.
    steps = []
    colors = []
    for key,value in [(k,v) for k,v in yaml_mission.items() if k in args.robots]:
        # Number of 1Km steps the planner can plan for.
        # The expresseion solves for the number of waypoints so a +1 is needed for range.
        # For instance, a glider going 0.4m/s would travel 1.44Km in 1 hour so it needs at least 2 waypoints, start and end.
        plan_range = int(np.round(value['vel']*Np*60*60*0.001*(1/min(field_resolution))))+1
        if plan_range > 0:
            steps.append(range(plan_range))
        else:
            steps.append(range(2))
        colors.append(value['color'])

    temp_len = [len(s) for s in steps]
    steps = [steps[np.argmax(temp_len)]]*len(steps) # This makes everything operate at the same time
    velocity_correction = [t/max(temp_len) for t in temp_len] # To account for time difference between arriving to waypoints

    # Make time correction for map forward propagation
    max_steps = max([len(s) for s in steps])
    field_delta = int(max_steps/Np)
    t_step = 0
    k_step = 0
    field_time_steps = []
    for i in range(max_steps):
        field_time_steps.append(t_step)
        k_step += 1
        if k_step == field_delta:
            k_step = 0
            t_step += 1

    # Number of robots we are planning for.
    robots = range(len(args.robots))

    DX = np.arange(field.shape[0]) # Integer values for range of X coordinates
    DY = np.arange(field.shape[1]) # Integer values for range of Y coordinates

    # Starting position contraint
    start = args.start_point
    if len(start) > 2:
        # More than one robot.
        start = [start[i:i + 2] for i in range(0, len(start), 2)]
    else:
        # One robot, extra list needed for nesting reasons.
        start = [start]

    # Greedy one step look ahead

    # Build the direction vectors for checking values
    if args.direction_constr == '8_direction':
        # Check each of the 8 directions (N,S,E,W,NE,NW,SE,SW)
        directions = [(0,1), (0,-1), (1,0), (-1,0), (1,1), (-1,1), (1,-1), (-1,-1)]
    elif args.direction_constr == 'nsew':
        directions = [(0,1), (0,-1), (1,0), (-1,0)] # N-S-E-W
    elif args.direction_constr == 'diag':
        directions = [(1,1), (-1,1), (1,-1), (-1,-1)] # Diag

    startTime = time.time()


    paths = []
    for r in robots:
        for s in steps[r]:
            # Check each of the directions
            if s == 0:
                path = [start[r]]
                continue
            values = np.zeros(len(directions))

            for i,d in enumerate(directions):
                try:
                    if args.same_point:
                        move = [path[-1][0] + velocity_correction[r]*d[0], path[-1][1] + velocity_correction[r]*d[1]]
                        if move[0] >= 0 and move[0] <= field.shape[0]-1 and move[1] >= 0 and move[1] <= field.shape[1]-1:
                            # print(move,move[0], move[1],move[0] >= 0 and move[0] < field.shape[0] and move[1] >= 0 and move[1] < field.shape[1])
                            # Makes sure we are in
                            if [round(move[0],3),round(move[1],3)] not in [[round(p[0],3),round(p[1],3)] for p in path]:
                                # print(move)
                                values[i] = bilinear_interpolation(move, field)
                                # values[i] = field[path[-1][0] + d[0], path[-1][1] + d[1], 0]
                            else:
                                continue
                        else:
                            # print(move[0], move[1],move[0] >= 0 and move[0] < field.shape[0] and move[1] >= 0 and move[1] < field.shape[1])
                            # Makes sure we are in
                            continue
                    else:
                        move = [path[-1][0] + velocity_correction[r]*d[0], path[-1][1] + velocity_correction[r]*d[1]]
                        if move[0] >= 0 and move[0] <= field.shape[0]-1 and move[1] >= 0 and move[1] <= field.shape[1]-1:
                            values[i] = bilinear_interpolation(move, field)
                except:
                    continue
            # print(values)
            new_point = [path[-1][0] + velocity_correction[r]*directions[np.argmax(values)][0], path[-1][1] + velocity_correction[r]*directions[np.argmax(values)][1]]
            # print(new_point, values, np.argmax(values), directions[np.argmax(values)])
            path.append(new_point)
        paths.append(path)
    # print(paths)
    runTime = time.time() - startTime

    if args.gen_image:
        wd = World.roms(
            datafile_path=yaml_sim['roms_file'],
            xlen        = yaml_sim['sim_world']['width'],
            ylen        = yaml_sim['sim_world']['height'],
            center      = Location(xlon=yaml_sim['sim_world']['center_longitude'], ylat=yaml_sim['sim_world']['center_latitude']),
            feature     = yaml_sim['science_variable'],
            resolution  = (yaml_sim['sim_world']['resolution'],yaml_sim['sim_world']['resolution']),
            )

        if args.gradient:
            # plt.imshow(mag_grad_field.transpose())#, interpolation='gaussian', cmap= 'gnuplot')
            if not args.test:
                plt.imshow(wd.scalar_field[:,:,0].transpose(), interpolation='gaussian', cmap= 'gnuplot')
                plt.xticks(np.arange(0,len(wd.lon_ticks), (1/min(field_resolution))), np.around(wd.lon_ticks[0::int(1/min(field_resolution))], 2))
                plt.yticks(np.arange(0,len(wd.lat_ticks), (1/min(field_resolution))), np.around(wd.lat_ticks[0::int(1/min(field_resolution))], 2))
                plt.xlabel('Longitude', fontsize=20)
                plt.ylabel('Latitude', fontsize=20)
                plt.text(1.25, 0.5, str(yaml_sim['science_variable']),{'fontsize':20}, horizontalalignment='left', verticalalignment='center', rotation=90, clip_on=False, transform=plt.gca().transAxes)
            else:
                plt.imshow(field.transpose(), interpolation='gaussian', cmap= 'gnuplot')
        else:
            if not args.test:
                plt.imshow(norm_field[:,:,0].transpose(), interpolation='gaussian', cmap= 'gnuplot')
                plt.xticks(np.arange(0,len(wd.lon_ticks), (1/min(field_resolution))), np.around(wd.lon_ticks[0::int(1/min(field_resolution))], 2))
                plt.yticks(np.arange(0,len(wd.lat_ticks), (1/min(field_resolution))), np.around(wd.lat_ticks[0::int(1/min(field_resolution))], 2))
                plt.xlabel('Longitude', fontsize=20)
                plt.ylabel('Latitude', fontsize=20)
                plt.text(1.25, 0.5, "normalized " + str(yaml_sim['science_variable']),{'fontsize':20}, horizontalalignment='left', verticalalignment='center', rotation=90, clip_on=False, transform=plt.gca().transAxes)
            else:
                plt.imshow(field.transpose(), interpolation='gaussian', cmap= 'gnuplot')

        plt.colorbar()
        for i,path in enumerate(paths):
            path_x = [x for x,y in path]
            path_y = [y for x,y in path]
            plt.plot(path_x, path_y, color=colors[i], linewidth=2.0)
            plt.plot(path[0][0], path[0][1], color='g', marker='o')
            plt.plot(path[-1][0], path[-1][1], color='r', marker='o')

        points = []
        for path in paths:
            for i, point in enumerate(path):
                points.append(points)
                if point in path and point not in point:
                    plt.annotate(i+1, (path[i][0], path[i][1]))

        robots_str = '_robots_%d' % len(robots)

        path_len_str = '_pathLen_%d' % len(steps[0])

        if len(args.end_point) > 0 :
            end_point_str = '_end%d%d' % (args.end_point[0], args.end_point[1])
        else:
            end_point_str = ''

        if args.gradient:
            grad_str = '_gradient'
        else:
            grad_str = ''

        if args.time_limit > 0:
            time_lim_str = '_timeLim_%d' % args.time_limit
        else:
            time_lim_str = ''

        if args.direction_constr == 'nsew':
            dir_str = '_%s' % args.direction_constr
        elif args.direction_constr == 'diag':
            dir_str = '_%s' % args.direction_constr
        else:
            dir_str = ''

        # print(sum([field[p[0],p[1],0] for p in path]))

        try:
            score_str = '_score_%f' % sum([bilinear_interpolation(p, field) for path in paths for p in path])
        except TypeError:
            score_str = '_no_solution'

        file_string = 'greedy_' + time.strftime("%Y%m%d-%H%M%S") + \
                                                                    robots_str + \
                                                                    path_len_str + \
                                                                    end_point_str + \
                                                                    grad_str + \
                                                                    time_lim_str + \
                                                                    dir_str + \
                                                                    score_str + \
                                                                    '.png'

        print(file_string)
        plt.savefig(args.outfile_path + file_string)
        plt.show()
    else:
        filename = args.outfile_path
        check_empty = os.path.exists(filename)

        if args.direction_constr == 'nsew':
            dir_str = '_%s' % args.direction_constr
        elif args.direction_constr == 'diag':
            dir_str = '_%s' % args.direction_constr
        else:
            dir_str = ''

        constraint_string = dir_str

        try:
            score_str = sum([bilinear_interpolation(p, field) for path in paths for p in path])
        except TypeError:
            score_str = 0#'_no_solution'

        with open(filename, 'a', newline='') as csvfile:
            fieldnames = [  'Experiment', \
                            'Algorithm', \
                            'Map', \
                            'Map Center', \
                            'Map Resolution', \
                            'Start Point', \
                            'End Point', \
                            'Score', \
                            'Run Time (sec)', \
                            'Budget (hours)', \
                            'Number of Robots', \
                            'Constraints']

            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            if not check_empty:
                print("File is empty")
                writer.writeheader()

            writer.writerow({   'Experiment': args.experiment_name, \
                                'Algorithm': 'Greedy', \
                                'Map': str(yaml_sim['roms_file']), \
                                'Map Center': Location(xlon=yaml_sim['sim_world']['center_longitude'], ylat=yaml_sim['sim_world']['center_latitude']).__str__(), \
                                'Map Resolution': (yaml_sim['sim_world']['resolution'],yaml_sim['sim_world']['resolution']), \
                                'Start Point': args.start_point, \
                                'End Point': args.end_point if len(args.end_point) > 0 else 'NA' , \
                                'Score': score_str, \
                                'Run Time (sec)': runTime, \
                                'Budget (hours)': args.planning_time, \
                                'Number of Robots': len(args.robots), \
                                'Constraints': constraint_string})
Exemplo n.º 30
0
def parse_fine_prediction(pred_csv_path, yaml_path):
    """
    Parse fine-level predictions from a CSV file containing both fine-level
    and coarse-level predictions (and possibly additional metadata).
    Returns a Pandas DataFrame in which the column names are mixed (coarse-fine)
    IDs of the form 1-1, 1-2, 1-3, ..., 1-X, 2-1, 2-2, 2-3, ... 2-X, 3-1, etc.
    Parameters
    ----------
    pred_csv_path: string
        Path to the CSV file containing predictions.
    yaml_path: string
        Path to the YAML file containing fine taxonomy.
    Returns
    -------
    pred_fine_df: DataFrame
        Fine-level complete predictions.
    """

    # Create dictionary to parse tags
    with open(yaml_path, 'r') as stream:
        yaml_dict = yaml.load(stream, Loader=yaml.Loader)

    # Collect tag names as strings and map them to mixed (coarse-fine) ID pairs.
    # The "mixed key" is a hyphenation of the coarse ID and fine ID.
    fine_dict = {}
    for coarse_id in yaml_dict["fine"]:
        for fine_id in yaml_dict["fine"][coarse_id]:
            mixed_key = "-".join([str(coarse_id), str(fine_id)])
            fine_dict[mixed_key] = "_".join([
                mixed_key, yaml_dict["fine"][coarse_id][fine_id]])

    # Invert the key-value relationship between mixed key and tag.
    # Now, tags are the keys, and mixed keys (coarse-fine IDs) are the values.
    # This is possible because tags are unique.
    rev_fine_dict = {fine_dict[k]: k for k in fine_dict}

    # Read comma-separated values with the Pandas library
    pred_df = pd.read_csv(pred_csv_path)

    # Assign a predicted column to each mixed key, by using the tag as an
    # intermediate hashing step.
    pred_fine_dict = {}
    for f in sorted(rev_fine_dict.keys()):
        if f in pred_df:
            pred_fine_dict[rev_fine_dict[f]] = pred_df[f]
        else:
            pred_fine_dict[rev_fine_dict[f]] = np.zeros((len(pred_df),))
            warnings.warn("Column not found: " + f)

    # Loop over coarse tags.
    n_samples = len(pred_df)
    coarse_dict = yaml_dict["coarse"]
    for coarse_id in yaml_dict["coarse"]:
        # Construct incomplete fine tag by appending -X to the coarse tag.
        incomplete_tag = str(coarse_id) + "-X"

        # If the incomplete tag is not in the prediction, append a column of zeros.
        # This is the case e.g. for coarse ID 7 ("dogs") which has a single
        # fine-level tag ("7-1_dog-barking-whining") and thus no incomplete
        # tag 7-X.
        if incomplete_tag not in fine_dict.keys():
            pred_fine_dict[incomplete_tag] =\
                np.zeros((n_samples,)).astype('int')


    # Copy over the audio filename strings corresponding to each sample.
    pred_fine_dict["audio_filename"] = pred_df["audio_filename"]

    # Build a new Pandas DataFrame with mixed keys as column names.
    pred_fine_df = pd.DataFrame.from_dict(pred_fine_dict)

    # Return output in DataFrame format.
    # Column names are 1-1, 1-2, 1-3 ... 1-X, 2-1, 2-2, 2-3 ... 2-X, 3-1, etc.
    return pred_fine_df.sort_values('audio_filename')
Exemplo n.º 31
0
def read_technology(url):
    filename = url.replace(
        "https://github.com/cloudmesh/technologies/blob/master",
        "../../cloudmesh/technologies")
    with open(filename, "r") as f:
        content = f.read()
    return content


#pprint (files)
readmes = {}
for readme in files:
    with open(readme, 'r') as stream:
        filename = readme.replace("/README.yml", "")  # use dir name or so
        try:
            d = yaml.load(stream)
            readmes[filename] = d
        except yaml.YAMLError as exc:
            print(exc)

template = {}
template['paper'] = '''
@InBook{open}{LABEL},
  author =       "{name}",
  editor =       "Gregor von Laszewski",
  title =        "Paper: {title}",
  publisher =    "Indiana University",
  year =         "2018",
  volume =       "Fall 2018",
  series =       "Class",
  type =         "Paper",
Exemplo n.º 32
0
def parse_ground_truth(annotation_path, yaml_path):
    """
    Parse ground truth annotations from a CSV file containing both fine-level
    and coarse-level predictions (and possibly additional metadata).
    Returns a Pandas DataFrame in which the column names are coarse
    IDs of the form 1, 2, 3 etc.
    Parameters
    ----------
    annotation_path: string
        Path to the CSV file containing predictions.
    yaml_path: string
        Path to the YAML file containing coarse taxonomy.
    Returns
    -------
    gt_df: DataFrame
        Ground truth.
    """
    # Create dictionary to parse tags
    with open(yaml_path, 'r') as stream:
        yaml_dict = yaml.load(stream, Loader=yaml.Loader)

    # Load CSV file into a Pandas DataFrame.
    ann_df = pd.read_csv(annotation_path)

    # Restrict to ground truth ("annotator zero").
    gt_df = ann_df[
        (ann_df["annotator_id"]==0) & (ann_df["split"]=="validate")]

    # Rename coarse columns.
    coarse_dict = yaml_dict["coarse"]
    coarse_renaming = {
        "_".join([str(c), coarse_dict[c], "presence"]): str(c)
        for c in coarse_dict}
    gt_df = gt_df.rename(columns=coarse_renaming)

    # Collect tag names as strings and map them to mixed (coarse-fine) ID pairs.
    # The "mixed key" is a hyphenation of the coarse ID and fine ID.
    fine_dict = {}
    for coarse_id in yaml_dict["fine"]:
        for fine_id in yaml_dict["fine"][coarse_id]:
            mixed_key = "-".join([str(coarse_id), str(fine_id)])
            fine_dict[mixed_key] = yaml_dict["fine"][coarse_id][fine_id]

    # Rename fine columns.
    fine_renaming = {"_".join([k, fine_dict[k], "presence"]): k
        for k in fine_dict}
    gt_df = gt_df.rename(columns=fine_renaming)

    # Loop over coarse tags.
    n_samples = len(gt_df)
    coarse_dict = yaml_dict["coarse"]
    for coarse_id in yaml_dict["coarse"]:
        # Construct incomplete fine tag by appending -X to the coarse tag.
        incomplete_tag = str(coarse_id) + "-X"

        # If the incomplete tag is not in the prediction, append a column of zeros.
        # This is the case e.g. for coarse ID 7 ("dogs") which has a single
        # fine-level tag ("7-1_dog-barking-whining") and thus no incomplete
        # tag 7-X.
        if incomplete_tag not in gt_df.columns:
            gt_df[incomplete_tag] = np.zeros((n_samples,)).astype('int')

    # Return output in DataFrame format.
    return gt_df.sort_values('audio_filename')
    # Return output in DataFrame format.
    # Column names are 1-1, 1-2, 1-3 ... 1-X, 2-1, 2-2, 2-3 ... 2-X, 3-1, etc.
    return pred_fine_df


annotation_path = '/home/liuzhuangzhuang/DCASE_2020_TASK_5DATA/annotations.csv'

yaml_path = '/home/liuzhuangzhuang/DCASE_2020_TASK_5DATA/dcase-ust-taxonomy.yaml'
prediction_path = '/home/liuzhuangzhuang/pycharm_P/task5-多任务0.0/work_space/submissions/main/logmel_64frames_64melbins/taxonomy_level=fine/holdout_fold=1/Cnn_9layers_AvgPooling/submission.csv'
annotation_2019 = '/home/liuzhuangzhuang/pycharm_P/dataset_root/annotations.csv'
min_threshold = 0.01

# Create dictionary to parse tags
with open(yaml_path, 'r') as stream:
    yaml_dict = yaml.load(stream, Loader=yaml.Loader)

# Parse ground truth.
gt_df = parse_ground_truth(annotation_path, yaml_path)

# Parse predictions.
pred_df = parse_fine_prediction(prediction_path, yaml_path, gt_df)
# if mode == "fine":
#     pred_df = parse_fine_prediction(prediction_path, yaml_path)
# elif mode == "coarse":
#     pred_df = parse_coarse_prediction(prediction_path, yaml_path)

# Check consistency between ground truth and predictions.
# Make sure the files evaluated in both tables match.
pred_audio_set = set(pred_df['audio_filename'].tolist())
true_audio_set = set(gt_df['audio_filename'].tolist())
Exemplo n.º 34
0
 def config(self, name="~/.cloudmesh/cloudmesh4.yaml"):
     name = os.path.expanduser(name)
     # reads in the yaml file
     with open(name, "r") as stream:
         self._conf = yaml.load(stream)
         print(yaml.dump(self._conf))