示例#1
0
def create_s2(window_length, window_step, fft_min_freq, fft_max_freq,
              sampling_frequency, file_path):

    warnings.filterwarnings("ignore")
    type_data = pickle.load(open(file_path, 'rb'))
    pipeline = Pipeline([Center_surround_diff()])
    time_series_data = type_data.data
    start, step = 0, int(np.floor(window_step * sampling_frequency))
    stop = start + int(np.floor(window_length * sampling_frequency))
    s2_data = []

    while stop < time_series_data.shape[1]:
        signal_window = time_series_data[:, start:stop]
        window = pipeline.apply(signal_window)
        s2_data.append(window)
        start, stop = start + step, stop + step

    s2_data = np.array(s2_data)
    named_data = seizure_type_data(patient_id=type_data.patient_id,
                                   seizure_type=type_data.seizure_type,
                                   data=type_data.data,
                                   s1=type_data.s1,
                                   s2=s2_data)

    return named_data, os.path.basename(file_path)
示例#2
0
def create_s1(window_length, window_step, fft_min_freq, fft_max_freq,
              sampling_frequency, file_path):

    warnings.filterwarnings("ignore")
    type_data = pickle.load(open(file_path, 'rb'))
    pipeline = Pipeline(
        [Substract_average_plus_P_2(),
         IFFT(), Smooth_Gaussian()])
    time_series_data = type_data.data
    start, step = 0, int(np.floor(window_step * sampling_frequency))
    stop = start + int(np.floor(window_length * sampling_frequency))
    s1_data = []

    while stop < time_series_data.shape[1]:
        signal_window = time_series_data[:, start:stop]
        window = pipeline.apply(signal_window)
        s1_data.append(window)
        start, stop = start + step, stop + step

    s1_data = np.array(s1_data)
    named_data = seizure_type_data(patient_id=type_data.patient_id,
                                   seizure_type=type_data.seizure_type,
                                   data=type_data.data,
                                   s1=s1_data)

    return named_data, os.path.basename(file_path)
示例#3
0
def create_d(window_length, window_step, fft_min_freq, fft_max_freq,
             sampling_frequency, file_path):

    warnings.filterwarnings("ignore")
    type_data = pickle.load(open(file_path, 'rb'))
    #Three of these pipelines are needed, as concatenation takes a different kind of parameter (three maps)
    pipeline1 = Pipeline([Normalise()])
    pipeline2 = Pipeline([Concatenation()])
    pipeline3 = Pipeline([RGB_0_255()])

    #The three feature maps
    data_ft = type_data.data
    data_s1 = type_data.s1
    data_s2 = type_data.s2

    start, step = 0, int(np.floor(window_step * sampling_frequency))
    stop = start + int(np.floor(window_length * sampling_frequency))
    d_data = []

    while stop < data_ft.shape[1]:
        #Window definitions, the maps are of same size & shape so 1 looper can be used for all
        window_ft = data_ft[:, start:stop]
        window_s1 = data_s1[:, start:stop]
        window_s2 = data_s2[:, start:stop]
        #Normalise each window value
        window_ft_norm = pipeline1.apply(window_ft)
        window_s1_norm = pipeline1.apply(window_s1)
        window_s2_norm = pipeline1.apply(window_s2)
        #Concatenate normalised values
        d_norm = pipeline2.apply(window_ft_norm, window_s1_norm,
                                 window_s2_norm)
        #RGB 0-255 conversion
        d_rgb = pipeline3.apply(d_norm)

        d_data.append(d_rgb)
        start, stop = start + step, stop + step

    d_data = np.array(d_data)
    named_data = seizure_type_data(patient_id=type_data.patient_id,
                                   seizure_type=type_data.seizure_type,
                                   data=d_data)

    return named_data, os.path.basename(file_path)
示例#4
0
def model_pipeline(train_config):
    pipeline = Pipeline()
    pipeline.enqueue(
        "train-model", "Train Model",
        TrainModelPipeline.mutate({
            "train-config": train_config,
            "test-config": DEFAULT_TEST_CONFIG
        }))
    pipeline.enqueue(
        "translate-naive", "Translate Naive Plans",
        PlannerTranslatePipeline.mutate({"planner-name": "naive"}))
    pipeline.enqueue(
        "translate-neural", "Translate Neural Plans",
        PlannerTranslatePipeline.mutate({"planner-name": "neural"}))
    return pipeline
示例#5
0
def card_classifier(text, algorithm, parser):
    classifier = dict()
    text = remove_punctuation(text)
    print(text)
    types, prefix = card_type(text)
    lines = text.splitlines()

    if parser == 'regex':
        preds = regex_extractor(lines, types, prefix)
    else:
        clean = [word_extractor(line, prefix) for line in lines]
        clf = Pipeline(clean)
        preds = clf.predicts(model=algorithm)

    classifier['type'] = types
    classifier['data'] = preds

    pprint.pprint(classifier)

    return classifier
示例#6
0
from utils.pipeline import Pipeline

TrainPlannerPipeline = Pipeline()
TrainPlannerPipeline.enqueue("planner", "Learn planner",
                             lambda _, x: x["config"].planner.learn(x["pre-process"]["train"], x["pre-process"]["dev"]))
TrainPlannerPipeline.enqueue("out", "Expose the planner", lambda f, _: f["planner"])
示例#7
0
from reg.naive import NaiveREG
from scorer.global_direction import GlobalDirectionExpert
from scorer.product_of_experts import WeightedProductOfExperts
from scorer.relation_direction import RelationDirectionExpert
from scorer.relation_transitions import RelationTransitionsExpert
from scorer.splitting_tendencies import SplittingTendenciesExpert
from utils.pipeline import Pipeline

naive_planner = NaivePlanner(
    WeightedProductOfExperts([
        RelationDirectionExpert, GlobalDirectionExpert,
        SplittingTendenciesExpert, RelationTransitionsExpert
    ]))
neural_planner = NeuralPlanner()

PlanPipeline = Pipeline()
PlanPipeline.enqueue("train-planner", "Train Planner", TrainPlannerPipeline)
PlanPipeline.enqueue("test-corpus", "Pre-process test data",
                     TestingPreProcessPipeline)

ExperimentsPipeline = Pipeline()
ExperimentsPipeline.enqueue("pre-process", "Pre-process training data",
                            TrainingPreProcessPipeline)

# Train all planners
# # Naive Planner
ExperimentsPipeline.enqueue(
    "naive-planner", "Train Naive Planner",
    PlanPipeline.mutate(
        {"config": Config(reader=WebNLGDataReader, planner=naive_planner)}))
# # Neural Planner
示例#8
0
from utils.pipeline import Pipeline

REGPipeline = Pipeline()
REGPipeline.enqueue(
    "reg", "Learn planner", lambda _, x: x["config"].reg(
        x["pre-process"]["train"], x["pre-process"]["dev"]))
REGPipeline.enqueue("out", "Expose the reg", lambda f, _: f["reg"])