def decision_function(model, Xi):
        # Compute the score for each timestep. This computes all potentials but does not compute the best label for eachl
        if type(Xi) is list:
            return [model.decision_function(Xi[i]) for i in range(len(Xi))]

        # Check that Xi is of size FxT
        if Xi.shape[0] > Xi.shape[0]:
            Xi = Xi.T

        _, n_timesteps = Xi.shape
        n_nodes = model.n_nodes

        # Initialize score
        score = np.zeros([n_nodes, n_timesteps], np.float64)

        # Add potentials to score
        for key in model.potentials:
            score = model.potentials[key].compute(model, Xi, score)

        # Reduce latent states
        if model.is_latent:
            score = ssvm.reduce_latent_states(score, model.n_latent,
                                              model.n_classes)

        return score
Beispiel #2
0
    def decision_function(model, Xi):
        # Compute the score for each timestep. This computes all potentials but does not compute the best label for eachl
        if type(Xi) is list:
            return [model.decision_function(Xi[i]) for i in range(len(Xi))]

        # Check that Xi is of size FxT
        if Xi.shape[0] > Xi.shape[0]:
            Xi = Xi.T

        _, n_timesteps = Xi.shape
        n_nodes = model.n_nodes

        # Initialize score
        score = np.zeros([n_nodes, n_timesteps], np.float64)

        # Add potentials to score
        for key in model.potentials:
            score = model.potentials[key].compute(model, Xi, score)

        # Reduce latent states
        if model.is_latent:
            score = ssvm.reduce_latent_states(score, model.n_latent, model.n_classes)

        return score
Beispiel #3
0
    def predict(model, Xi, Yi=None, is_training=False, output_latent=False, inference=None, known_order=None):
        # Compute the best label for each timesteo
        if type(Xi) is list:
            out = []
            for i in range(len(Xi)):
                Yi_ = None if Yi is None else Yi[i]
                out += [model.predict(Xi[i], Yi_, is_training, output_latent, inference, known_order)]
            return out

        # Check that Xi is of size FxT
        if Xi.shape[0] > Xi.shape[0]:
            Xi = Xi.T

        if Yi is not None:
            assert Xi.shape[1]==Yi.shape[0], "Error: Xi and Yi are of shapes {} and {}".format(Xi.shape[1],Yi.shape[0])

        _, n_timesteps = Xi.shape
        n_nodes = model.n_nodes

        # Initialize score
        score = np.zeros([n_nodes, n_timesteps], np.float64)

        # loss augmented inference (if training)
        if is_training:
            if model.is_latent:
                score += ssvm.latent_loss_augmented_unaries(score, Yi, model.n_latent)
            else:
                score += ssvm.loss_augmented_unaries(score, Yi)

        # Add potentials to score
        for key in model.potentials:
            score = model.potentials[key].compute(model, Xi, score)

        if model.is_latent and (not is_training and not output_latent):
            score = ssvm.reduce_latent_states(score, model.n_latent, model.n_classes)

        # Get predictions
        inference_type = inference if inference is not None else model.inference_type
        if inference_type is "framewise":
            path = score.argmax(0)

        elif inference_type is "filtered":
            assert hasattr(model, "filter_len"), "filter_len must be set"
            path = score.argmax(0)
            path = nd.median_filter(path, model.filter_len)

        elif "segmental" in inference_type:
            normalized = True if "normalized" in inference_type else False


            if known_order is not None:
                path = infer_known_ordering(score.T, known_order)
            else:
                assert hasattr(model, "max_segs"), "max_segs must be set"
                # Check if there is a segmental pw.pairwise term
                seg_term = [p.name for p in model.potentials.values() if type(p) is pw.segmental_pairwise]                
                if len(seg_term) >= 1:
                    path = segmental_inference(score.T, model.max_segs, pw=model.ws[seg_term[0]], normalized=normalized)
                else:
                    path = segmental_inference(score.T, model.max_segs, normalized=normalized)

        return path 
    def predict(model,
                Xi,
                Yi=None,
                is_training=False,
                output_latent=False,
                inference=None,
                known_order=None):
        # Compute the best label for each timesteo
        if type(Xi) is list:
            out = []
            for i in range(len(Xi)):
                Yi_ = None if Yi is None else Yi[i]
                out += [
                    model.predict(Xi[i], Yi_, is_training, output_latent,
                                  inference, known_order)
                ]
            return out

        # Check that Xi is of size FxT
        if Xi.shape[0] > Xi.shape[0]:
            Xi = Xi.T

        if Yi is not None:
            assert Xi.shape[1] == Yi.shape[
                0], "Error: Xi and Yi are of shapes {} and {}".format(
                    Xi.shape[1], Yi.shape[0])

        _, n_timesteps = Xi.shape
        n_nodes = model.n_nodes

        # Initialize score
        score = np.zeros([n_nodes, n_timesteps], np.float64)

        # loss augmented inference (if training)
        if is_training:
            if model.is_latent:
                score += ssvm.latent_loss_augmented_unaries(
                    score, Yi, model.n_latent)
            else:
                score += ssvm.loss_augmented_unaries(score, Yi)

        # Add potentials to score
        for key in model.potentials:
            score = model.potentials[key].compute(model, Xi, score)

        if model.is_latent and (not is_training and not output_latent):
            score = ssvm.reduce_latent_states(score, model.n_latent,
                                              model.n_classes)

        # Get predictions
        inference_type = inference if inference is not None else model.inference_type
        if inference_type is "framewise":
            path = score.argmax(0)

        elif inference_type is "filtered":
            assert hasattr(model, "filter_len"), "filter_len must be set"
            path = score.argmax(0)
            path = nd.median_filter(path, model.filter_len)

        elif "segmental" in inference_type:
            normalized = True if "normalized" in inference_type else False

            if known_order is not None:
                path = infer_known_ordering(score.T, known_order)
            else:
                assert hasattr(model, "max_segs"), "max_segs must be set"
                # Check if there is a segmental pw.pairwise term
                seg_term = [
                    p.name for p in model.potentials.values()
                    if type(p) is pw.segmental_pairwise
                ]
                if len(seg_term) >= 1:
                    path = segmental_inference(score.T,
                                               model.max_segs,
                                               pw=model.ws[seg_term[0]],
                                               normalized=normalized)
                else:
                    path = segmental_inference(score.T,
                                               model.max_segs,
                                               normalized=normalized)

        return path