示例#1
0
def extract_features_from_file(features, filename):
    """Extract data of specific 'feature' from 'filename'."""
    data = read_data(filename)
    time_series = data['data']
    freq = data['sampling_frequency']
    windowed_data = {
        'data': None,
        'sampling_frequency': freq,
        'sequence': data['sequence'],
        'channels': data['channels']
    }
    T = 0
    freq_floor = np.floor(freq)
    num_windows = int((data['data_length_sec'] - WINDOW) / WINDOW_SHIFT) + 1
    res = {}
    for i in range(num_windows):
        start, end = int(T * freq_floor), int((T + WINDOW) * freq_floor)
        windowed_data['data'] = time_series[:, start:end]
        fe = Feature_Extractor(windowed_data)
        T += WINDOW_SHIFT
        for feature in features:
            method = getattr(fe, 'extract_' + feature)
            res[feature] = method() if feature not in res.keys(
            ) else np.vstack([res[feature], method()])
    return res
示例#2
0
	def __init__(self):
		self.feature_extractor=Feature_Extractor.Feature_Extractor()
		self.crf = sklearn_crfsuite.CRF(algorithm='lbfgs', c1=0.05, c2=0.01, max_iterations=50, all_possible_transitions=True, min_freq=0)#micro: 52.13
# configurations
pretrained_vgg = False
simplified = True
sample_per_texture = 5
data_sample_path = "dataset/texture_simplified_20" if simplified else "dataset/texture"
model_dict_path = "state_dict/features/(margin = 5) feature_extraction_dict_150_epochs_best_0-0357"

# Stage 1: Preprocess Images
image_preprocessor = Image_Preprocessor(sample_per_texture, data_sample_path)
image_preprocessor.validate_data_in_file()
preprocessed_images = image_preprocessor.read_and_process_images_in_directory()

# Stage 2: Feature Extraction
vgg19_model = models.vgg19(pretrained=True)
if pretrained_vgg:
    feature_extraction_manager = Feature_Extractor(vgg19_model, train=True)
    print("pretrained model used")
else:
    base_net = Base_VGG19_Features_Net(vgg19_model)
    model = Triplet_Net(base_net)
    model.load_state_dict(torch.load(model_dict_path))
    print("model path added")
    feature_extraction_manager = Feature_Extractor(model, train=False)

extracted_features = feature_extraction_manager.classify_features(preprocessed_images)

# Stage 3: Sanity check
feature_extraction_manager.sanity_check_for_same_class(5, extracted_features)
feature_extraction_manager.sanity_check_for_different_class(extracted_features, len(preprocessed_images))

# Stage 4: TSNE dimension reduction