Example #1
0
 def _check_args(self):
     """ Perfunctory argument checks and modification.
     """
     args = self.args
     # Check scp and labels
     common.CHK_GE(len(args.valid_scp), 1)
     common.CHK_EQ(len(args.valid_labels) % len(args.valid_scp), 0)
     labels_per_valid_scp = len(args.valid_labels) / len(args.valid_scp)
     common.CHK_EQ(len(args.train_labels), labels_per_valid_scp)
     # Check task
     if len(args.task) == 1:
         args.task = args.task * len(args.train_labels)
     common.CHK_EQ(len(args.task), len(args.train_labels))
     for task in args.task:
         common.CHK_VALS(task, TASKS)
     if args.num_classes is not None:
         common.CHK_EQ(len(args.task), len(args.num_classes))
     if args.task_weights is None:
         args.task_weights = [1.0] * len(args.task)
     common.CHK_EQ(len(args.task), len(args.task_weights))
     # Check others
     for layer_type in args.layer_type:
         common.CHK_VALS(layer_type, LAYERS.keys())
     common.CHK_VALS(args.optimizer, OPTIMIZERS.keys())
     common.CHK_VALS(args.lrate, LRATES.keys())
Example #2
0
 def _check_args(self):
     """ Perfunctory argument checks.
     """
     args = self.args
     common.CHK_GE(len(args.valid_scp), 1)
     common.CHK_EQ(len(args.valid_scp), len(args.valid_labels))
     common.CHK_VALS(args.task, TASKS)
     for layer_type in args.layer_type:
         common.CHK_VALS(layer_type, LAYERS.keys())
     common.CHK_VALS(args.optimizer, OPTIMIZERS.keys())
     common.CHK_VALS(args.lrate, LRATES.keys())
Example #3
0
def init_metric(m):
    if type(m) == str:
        common.CHK_VALS(m, VALIDATION_METRICS.keys())
        m = VALIDATION_METRICS[m]
    if inspect.isfunction(m):
        return KerasMetric(m)
    assert issubclass(m, AbstractMetric)
    return m()
Example #4
0
 def get_end_time(self, unit='s'):
     """ Return end time in seconds ('s') or milliseconds ('ms').
     """
     common.CHK_VALS(unit, ['s', 'ms'])
     return self.xmax if unit == 's' else common.s_to_ms(self.xmax)
Example #5
0
 def get_start_time(self, unit='s'):
     """ Return start time in seconds ('s') or milliseconds ('ms').
     """
     common.CHK_VALS(unit, ['s', 'ms'])
     return self.xmin if unit == 's' else common.s_to_ms(self.xmin)
Example #6
0
 def get_end_time(self, unit='s'):
     """ Return end time in seconds ('s') or milliseconds ('ms').
     """
     common.CHK_VALS(unit, ['s', 'ms'])
     end_time_ms = float((self.end_frame + 1) * self.step_ms)
     return end_time_ms if unit == 'ms' else common.ms_to_s(end_time_ms)
Example #7
0
 def get_start_time(self, unit='s'):
     """ Return start time in seconds ('s') or milliseconds ('ms').
     """
     common.CHK_VALS(unit, ['s', 'ms'])
     start_time_ms = float(self.start_frame * self.step_ms)
     return start_time_ms if unit == 'ms' else common.ms_to_s(start_time_ms)
Example #8
0
def _train_mt(model,
              buf_train,
              ntasks=None,
              metrics=['acc'],
              stateful=False,
              loss='sparse_categorical_crossentropy',
              report_interval=20):
    train_metrics = []
    if ntasks is None:
        ntasks = len(buf_train.dataset().get_frame_labels())
    for _ in range(ntasks):
        train_metric = []
        for _ in metrics:
            train_metric.append([])
        train_metrics.append(train_metric)
    if type(loss) != list:
        loss = [loss] * ntasks
    chunks_read = 0
    while True:
        # Load data chunk
        X = buf_train.read_next_chunk()
        if X is None:
            break
        report = chunks_read % report_interval == 0
        chunks_read += 1
        # Train
        train_Xs, train_ys, _, utt_indices = X
        if report:
            io.log('Training on chunk {} ({} utts, max dur {})'.format(
                buf_train.get_progress(), len(utt_indices),
                train_Xs[0].shape[1]))
        for train_X, train_y in zip(train_Xs, train_ys):
            for i in range(len(train_y)):
                if loss[i] == 'sparse_categorical_crossentropy':
                    train_y[i] = numpy.expand_dims(train_y[i], -1)
            history = model.fit(train_X,
                                train_y,
                                batch_size=len(utt_indices),
                                nb_epoch=1,
                                verbose=0)
            # Save metrics (this assumes that there are more than 1 output)
            # https://github.com/fchollet/keras/blob/master/keras/engine/training.py
            key_cache = {}
            for t in range(ntasks):
                for i in range(len(metrics)):
                    key = model.output_layers[t].name + '_' + metrics[i]
                    # HACKY: special case when there's only 1 task
                    if ntasks == 1:
                        key = metrics[i]
                    # HACKY: account for duplicate output layer names
                    if key not in history.history:
                        if key not in key_cache:
                            key_cache[key] = 0
                        key_cache[key] += 1
                        key = '{}_{}'.format(key, key_cache[key])
                    common.CHK_VALS(key, history.history.keys())
                    train_metrics[t][i].append(history.history[key][0])
        if report:
            last_metrics = []
            for t in range(ntasks):
                last_metric = []
                for i in range(len(metrics)):
                    last_metric.append(train_metrics[t][i][-1])
                last_metrics.append(last_metric)
            io.log('...last metrics = {}'.format(last_metrics))
        if stateful:
            model.reset_states()
    for t in range(ntasks):
        for i in range(len(metrics)):
            train_metrics[t][i] = numpy.mean(train_metrics[t][i])
            if metrics[i] == 'acc':  # We want training error in percentage
                train_metrics[t][i] = 100 * (1.0 - train_metrics[t][i])
    return train_metrics