Ejemplo n.º 1
0
 def run(self):
     while True:
         # Get the work from the queue and expand the tuple
         item = self.queue.get()
         try:
             features[item[1], item[2], :] = ft.calculate_features(item[0])
         finally:
             self.queue.task_done()
Ejemplo n.º 2
0
 def go_horse():
     # snr_array = np.linspace(-20, 20, 21)  # Let's make sure we're getting only the necessary SNR
     snr_array = list(map(int, [info_json['snr']['values'][i] for i in info_json['snr']['using']]))
     while True:
         item = q.get()  # This line gets values from queue to evaluate
         if item is None:  # This line will run only when queue is empty (job done)
             break
         features[item[1], item[2], :] = ft.calculate_features(item[0])
         if item[2] == nb_of_frames - 1:
             print('Task done for SNR = {0} - Modulation = {1} - Process ID = {2}'.format(snr_array[item[1]],
                                                                                          modulation,
                                                                                          os.getpid()))
         q.task_done()  # This line says "hey, I'm done, give-me more!"
Ejemplo n.º 3
0
def serial_communication():
    # Setup UART COM on Windows
    ser = serial.Serial(port='COM3',
                        baudrate=115200,
                        parity='N',
                        bytesize=8,
                        stopbits=1,
                        timeout=1)

    snr_range = np.linspace(0, 9, 10,
                            dtype=np.int16)  # 2 4 6 8 10 12 14 16 18 20
    frame_range = np.linspace(0, 500, 500, dtype=np.int16)

    # Write to UART
    print('Transmitting Neural Network...')
    for point in range(0, len(weights)):
        binary = struct.pack('<h', weights[point])
        ser.write(binary)
    for point in range(0, len(biases)):
        binary = struct.pack('<h', biases[point])
        ser.write(binary)

    received_wandb = receive_wandb(ser, len(weights) + len(biases))
    err_weights = received_wandb[0:len(weights)] - weights
    if np.max(err_weights) == 0:
        print('Weights loaded successfully')
        print(weights)
    else:
        print('Error loading weights')
    err_biases = received_wandb[len(weights):len(weights) +
                                len(biases)] - biases
    if np.max(err_biases) == 0:
        print('Biases loaded successfully')
        print(biases)
    else:
        print('Error loading biases')

    print('Wait...')
    time.sleep(3)

    print('Transmit scaler for Standardization')
    # Write to UART
    for point in range(0, number_of_used_features):
        binary = struct.pack('<f', np.float32(scaler.mean_[point]))
        ser.write(binary)
    for point in range(0, number_of_used_features):
        binary = struct.pack('<f', np.float32(scaler.scale_[point]))
        ser.write(binary)

    received_scaler = receive_scaler(ser, number_of_used_features * 2)
    err_scaler = received_scaler - np.concatenate(
        (np.float32(scaler.mean_), np.float32(scaler.scale_)))
    if np.max(err_scaler) == 0:
        print('Scaler loaded successfully')
        print(received_scaler)
    else:
        print('Error loading scaler')

    print('Wait...')
    time.sleep(3)

    print('Starting modulation signals transmission!')
    for mod in signals:
        # Create error information arrays and features results from microcontroller
        err_abs_vector = []
        err_phase_vector = []
        err_unwrapped_phase_vector = []
        err_freq_vector = []
        err_cn_abs_vector = []
        err_features = []
        predictions = []
        gathered_data = []
        parsed_signal = data_mat[mat_info[mod]]
        for snr in snr_range:
            for frame in frame_range:
                print('Modulation = ' + mod)
                print('SNR = {}'.format(snr))
                print('Frame = {}'.format(frame))
                i_values = functions.InstValues(parsed_signal[snr, frame,
                                                              0:frame_size])
                ft = np.float32(
                    features.calculate_features(parsed_signal[snr, frame,
                                                              0:frame_size]))
                ft_scaled = (ft - np.float32(scaler.mean_)) / np.float32(
                    scaler.scale_)
                q_format = quantization.find_best_q_format(
                    np.min(ft_scaled), np.max(ft_scaled))
                q_ft = quantization.quantize_data(ft_scaled, q_format)
                print('Scaled features: {}'.format(ft_scaled))
                print('Quantized scaled features: {}'.format(q_ft))
                print('Transmitting...')
                for point in range(0, 2048):
                    binary = struct.pack(
                        '<f', np.real(parsed_signal[snr, frame, point]))
                    ser.write(binary)
                    binary = struct.pack(
                        '<f', np.imag(parsed_signal[snr, frame, point]))
                    ser.write(binary)

                received_list = []
                for results in range(0, 3):
                    num_array, counter_array, real, imag = receive_data(ser)
                    if results == 0:
                        err = False
                        for n in range(0, 2048):
                            if abs(real[n] -
                                   np.real(parsed_signal[snr, frame, n])) > 0:
                                err = True
                                print(
                                    'Error at real sample number {}'.format(n))
                            if abs(imag[n] -
                                   np.imag(parsed_signal[snr, frame, n])) > 0:
                                err = True
                                print(
                                    'Error at real sample number {}'.format(n))
                        if err:
                            received_list.append((0, 0))
                        else:
                            print('Echo ok - data validated')
                            received_list.append([real, imag])
                    else:
                        received_list.append([num_array, counter_array])

                # err_abs_vector.append(i_values.inst_abs.T - received_list[1][0])
                # print('Err abs: {}'.format(np.max(err_abs_vector)))
                # err_phase_vector.append(i_values.inst_phase.T - received_list[2][0])
                # print('Err phase: {}'.format(np.max(err_phase_vector)))
                # err_unwrapped_phase_vector.append(i_values.inst_unwrapped_phase.T - received_list[3][0])
                # print('Err unwrapped phase: {}'.format(np.max(err_unwrapped_phase_vector)))
                # err_freq_vector.append(i_values.inst_freq[0:frameSize - 1].T - received_list[4][0][0:frameSize - 1])
                # print('Err freq: {}'.format(np.max(err_freq_vector)))
                # err_cn_abs_vector.append(i_values.inst_cna.T - received_list[5][0])
                # print('Err CN abs: {}'.format(np.max(err_cn_abs_vector)))
                err_features.append(ft - received_list[1][0])
                print('Err features: {}'.format(np.max(err_features)))

                predictions.append(received_list[2][0])
                correct = 0
                for p in predictions:
                    if mod == 'BPSK' and p == (0.0, ):
                        correct += 1
                    elif mod == 'QPSK' and p == (1.0, ):
                        correct += 1
                    elif mod == 'PSK8' and p == (2.0, ):
                        correct += 1
                    elif mod == 'QAM16' and p == (3.0, ):
                        correct += 1
                    elif mod == 'QAM64' and p == (4.0, ):
                        correct += 1
                    elif mod == 'noise' and p == (5.0, ):
                        correct += 1
                    else:
                        correct += 0

                print('Last prediction = {}'.format(received_list[2][0]))
                print('Accuracy = {}'.format(correct * 100 / len(predictions)))

                print('Wait...')
                gathered_data.append(received_list)
                time.sleep(2.5)

        save_dict = {
            'Data': gathered_data,
            'err_abs_vector': err_abs_vector,
            'err_phase_vector': err_phase_vector,
            'err_unwrapped_phase_vector': err_unwrapped_phase_vector,
            'err_freq_vector': err_freq_vector,
            'err_cn_abs_vector': err_cn_abs_vector,
            'err_features': err_features,
            'snr_range': snr_range,
            'frame_range': frame_range,
            'modulation': mod,
            'predictions': predictions
        }
        scipy.io.savemat(
            pathlib.Path(join(os.getcwd(), 'arm-data', mod + '.mat')),
            save_dict)
Ejemplo n.º 4
0
def print_timer(purpose_message=""):
    global start_time
    print("%s timer: %ss" % (purpose_message, int(time.clock()-start_time)))
    start_time = time.clock()

""" Reading Data """

svg_parser = SvgParser.SvgParser("ground-truth/locations/", "images/", "task/train.txt", "task/valid.txt")
svg_parser.get_cropped_images()
print_timer("Cropping")
training_samples, validation_samples = svg_parser.get_binarized_word_images(50)

print("binarized training sample size: %s" % len(training_samples))
print_timer("Binarizing")

""" Calculating Features """

features = features.calculate_features(training_samples)
print("Features of trainingset[0]: %s" % features[0])
print_timer("Feature computation")

""" Searching for Keyword """
keyword = svg_parser.binarize("cropped/train/270_32_out.png", 50)
dtw = DTW(keyword)

results = [dtw.calculate_cost_and_matrix(sample) for sample in training_samples]
print_timer("Computing String Distance")

print([costs[0] for costs in sorted(results)][:30])
print("Word found in image numbers: &s" % [i for i, x in enumerate(results[:, 0]) if x < 6000])
Ejemplo n.º 5
0
def sort_by_cost(signature):
    return signature.cost


def apply_dtw(template):

    dtw = DTW(template)

    results = [dtw.calculate_cost_and_matrix(enr) for enr in enrollment if enr.get_user() == template.get_user()]
    template.cost = min(results)


""" Parsing """


enrollment = sorted(features.calculate_features(Parser.parse_files_in_directory(enrollment_path)), key=sort_by_name)
verification = sorted(features.calculate_features(Parser.parse_files_in_directory(verification_path)), key=sort_by_name)
# dev verification
# verification_gt = dict(Parser.parse_validation_file(verification_gt_path))
print_timer("parsing")


""" Applying DTW """

output = [["0%s" % (i+31)] for i in range(70)]
print("applying dtw. this might take a while")
for template in verification:
    apply_dtw(template)
    output[int(template.get_user())-31].append(template)

def prep_column(column):
Ejemplo n.º 6
0
def assess_model_accuracies(df_tr,
                            df_te,
                            models_and_feats,
                            feature_scaling=False,
                            get_conf_mats=False):
    """
    Given some models, train and test data, evaluate the performance of each
    :param df_tr: training data
    :param df_te: test data
    :param models_and_feats: models with train() and predict method(), and feature sets
    they require. Feature sets is a list of strings, each is a suffix indicating a feature group
    in feat_names below
    :type models_and_feats: dict, Model -> str
    :param feature_scaling: whether to do zero-mean, unit variance scaling on feats (not for NB!)
    :type feature_scaling: bool
    :param get_conf_mats: whether to return confusion matrices for test predictions
    :return: either dict: model name -> f1 scores or
    tuple: (dict: model name -> f1, dict: model_name -> confusion matrix)
    """
    # Get outputs
    y_tr = df_tr['Stance'].values.astype('U')  # Unicode more robust
    y_te = df_te['Stance'].values.astype('U')

    # ...And targets
    targets_tr = df_tr['Target']
    targets_te = df_te['Target']

    # ...And calculate features
    x_tr, x_te, feat_names = \
        features.calculate_features(
            df_tr['Tweet'], df_te['Tweet'], targets_tr, targets_te,
            **utils.BOW_KWARGS)

    if feature_scaling:
        # Zero-mean, unit-variance scaling
        x_tr, x_te = features.scale_inputs(x_tr, x_te)

    # Dict to store results in
    model_f1s = {}
    if get_conf_mats:
        model_cms = {}

    for _model, _feat_sets in models_and_feats.items():
        print('Evaluating performance for model: {}'.format(_model.name))

        # Get ids of features to use
        feat_ids = [
            _i for _i, _fn in enumerate(feat_names)
            if _fn.split('_')[-1] in _feat_sets
        ]

        # Collate arguments for training and held-out f1 evaluation
        _train_args = [x_tr.copy()[:, feat_ids], y_tr.copy()]
        _eval_args = [x_te.copy()[:, feat_ids], y_te.copy()]

        # If training separate model for each target - add targets to args
        if isinstance(_model, multitarget.StanceDetectorMultiTarget):
            _train_args.append(targets_tr.values)
            _eval_args.append(targets_te.values)

        # Do training
        _model.train(*_train_args)

        # Assess accuracy
        _f1 = _model.f1_score(*_eval_args)
        _conf_mat = _model.confusion_matrix(*_eval_args)

        # Print metrics
        print('F1 score: {}'.format(_f1))
        print('Confusion matrix:\n{}\n\n'.format(_conf_mat))

        # Store accuracy
        model_f1s[_model.name] = _f1
        if get_conf_mats:
            model_cms[_model.name] = _conf_mat
    if get_conf_mats:
        return model_f1s, model_cms
    else:
        return model_f1s
Ejemplo n.º 7
0
def worker_job(data, list_to_append, signals_list):
    for signal_name in signals_list:
        list_to_append.append(
            {signal_name: calculate_features(data[signal_name])})
        print("calculated ", signal_name, datetime.now().time())
Ejemplo n.º 8
0
        self.plt_output(plt, 'feature_mapping')

    @staticmethod
    def plt_output(plt, name):
        plt.title(name)
        plt.show()  # shows results in popup
        # plt.savefig(name + '.png')    # saves results in file system


def sort_by_name(signature):
    return signature.filename


# An example that takes the first word and searches in the images for the same word
# Pictures need to be already cropped.
if __name__ == "__main__":
    enrollment = sorted(features.calculate_features(
        Parser.parse_files_in_directory(enrollment_path)),
                        key=sort_by_name)
    verification = sorted(features.calculate_features(
        Parser.parse_files_in_directory(verification_path)),
                          key=sort_by_name)
    verification_gt = Parser.parse_validation_file(verification_gt_path)

    dtw = DTW(verification[0])
    result = dtw.calculate_cost_and_matrix(verification[1])
    print("cost: ", result[0])

    dtw.plot_matrix_cost(result[1])
Ejemplo n.º 9
0
def serial_communication(weights, biases):
    with open("./info.json") as handle:
        info_json = json.load(handle)

    modulations = info_json['modulations']['names']
    frameSize = info_json['frameSize']

    # Filename setup
    mat_file_name = pathlib.Path(
        join(os.getcwd(), 'mat-data', 'all_modulations_data.mat'))

    # Dictionary to access variable inside MAT file
    info = {
        'BPSK': 'signal_bpsk',
        'QPSK': 'signal_qpsk',
        'PSK8': 'signal_8psk',
        'QAM16': 'signal_qam16',
        'QAM64': 'signal_qam64',
        'noise': 'signal_noise'
    }

    # Load MAT file and parse data
    data_mat = scipy.io.loadmat(mat_file_name)
    print(str(mat_file_name) + ' file loaded...')

    # Setup UART COM on Windows
    ser = serial.Serial(port='COM3',
                        baudrate=115200,
                        parity='N',
                        bytesize=8,
                        stopbits=1,
                        timeout=1)

    snr_range = np.linspace(11, 15, 5, dtype=np.int16)  # 12 14 16 18 20
    frame_range = np.linspace(0, 4, 5, dtype=np.int16)

    # Write to UART
    print('Transmitting Neural Network...')
    for point in range(0, 1700):
        binary = struct.pack('<h', weights[point])
        ser.write(binary)
    for point in range(0, 82):
        binary = struct.pack('<h', biases[point])
        ser.write(binary)

    received_wandb = receive_wandb(ser, 1782)

    err_weights = received_wandb[0:1700] - weights
    if np.max(err_weights) == 0:
        print('Weights loaded successfully')
    else:
        print('Error loading weights')
    err_biases = received_wandb[1700:1782] - biases
    if np.max(err_biases) == 0:
        print('Biases loaded successfully')
    else:
        print('Error loading biases')

    print('Wait...')
    time.sleep(3)
    print('Starting modulation signals transmission!')

    for mod in modulations:
        # Create error information arrays and features results from microcontroller
        err_abs_vector = []
        err_phase_vector = []
        err_unwrapped_phase_vector = []
        err_freq_vector = []
        err_cn_abs_vector = []
        err_features = []
        predictions = []
        gathered_data = []
        parsed_signal = data_mat[info[mod]]
        print('Modulation = ' + mod)
        for snr in snr_range:
            print('SNR = {}'.format(snr))
            for frame in frame_range:
                print('Frame = {}'.format(frame))
                i_values = functions.InstValues(parsed_signal[snr, frame,
                                                              0:frameSize])
                ft = np.float32(
                    features.calculate_features(parsed_signal[snr, frame,
                                                              0:frameSize]))
                # Write to UART
                print('Transmitting...')
                for point in range(0, 2048):
                    binary = struct.pack(
                        '<f', np.real(parsed_signal[snr, frame, point]))
                    ser.write(binary)
                    binary = struct.pack(
                        '<f', np.imag(parsed_signal[snr, frame, point]))
                    ser.write(binary)

                received_list = []
                for results in range(1, 3):
                    num_array, counter_array, real, imag = receive_data(
                        ser, frameSize)
                    if results == 0:
                        err = False
                        for n in range(0, 2048):
                            if abs(real[n] -
                                   np.real(parsed_signal[snr, frame, n])) > 0:
                                err = True
                                print(
                                    'Error at real sample number {}'.format(n))
                            if abs(imag[n] -
                                   np.imag(parsed_signal[snr, frame, n])) > 0:
                                err = True
                                print(
                                    'Error at real sample number {}'.format(n))
                        if err:
                            received_list.append((0, 0))
                        else:
                            print('Echo ok - data validated')
                            received_list.append([real, imag])
                    else:
                        received_list.append([num_array, counter_array])

                # err_abs_vector.append(i_values.inst_abs.T - received_list[1][0])
                # print('Inst absolute max error: {:.3}'.format(np.max(np.abs(err_abs_vector))))
                # print('Calc time in clock cycles: {}'.format(received_list[1][1][0]))
                # print('Calc time in ms: {:.3}'.format(received_list[1][1][0] / 200000))
                #
                # err_phase_vector.append(i_values.inst_phase.T - received_list[2][0])
                # print('Inst phase max error: {:.3}'.format(np.max(np.abs(err_phase_vector))))
                # print('Calc time in clock cycles: {}'.format(received_list[2][1][0]))
                # print('Calc time in ms: {:.3}'.format(received_list[2][1][0] / 200000))
                #
                # err_unwrapped_phase_vector.append(i_values.inst_unwrapped_phase.T - received_list[3][0])
                # print('Inst unwrapped phase max error: {:.3}'.format(np.max(np.abs(err_unwrapped_phase_vector))))
                # print('Calc time in clock cycles: {}'.format(received_list[3][1][0]))
                # print('Calc time in ms: {:.3}'.format(received_list[3][1][0] / 200000))
                #
                # err_freq_vector.append(i_values.inst_freq[0:frameSize - 1].T - received_list[4][0][0:frameSize - 1])
                # print('Inst frequency max error: {:.3}'.format(np.max(np.abs(err_freq_vector))))
                # print('Calc time in clock cycles: {}'.format(received_list[4][1][0]))
                # print('Calc time in ms: {:.3}'.format(received_list[4][1][0] / 200000))
                #
                # err_cn_abs_vector.append(i_values.inst_cna.T - received_list[5][0])
                # print('Inst CN amplitude max error: {:.3}'.format(np.max(np.abs(err_cn_abs_vector))))
                # print('Calc time in clock cycles: {}'.format(received_list[5][1][0]))
                # print('Calc time in ms: {:.3}'.format(received_list[5][1][0] / 200000))

                # err_features.append(ft - received_list[6][0])
                # print('Error list: {}'.format(err_features))
                # print('Timings list: {}'.format(received_list[6][1]))
                # print('Timings list (ms): {}'.format(received_list[6][1] / 200000))

                err_features.append(ft - received_list[0][0])
                print('Error list: {}'.format(err_features))
                print('Timings list: {}'.format(received_list[0][1]))
                print('Timings list (ms): {}'.format(received_list[0][1] /
                                                     200000))

                # predictions.append(info[mod] - received_list[7][0])
                # print('Predictions for ' + mod + ': {}'.format(predictions))

                predictions.append(received_list[1][0])
                print('Predictions for ' + mod + ': {}'.format(predictions))
                print('Wait...')
                gathered_data.append(received_list)
                save_dict = {
                    'Modulation': mod,
                    'SNR': snr,
                    'Frame': frame,
                    'Data': received_list,
                    'inst_values': i_values,
                    'features': ft
                }
                file_name = mod + '_' + str(snr) + '_' + str(frame) + '.mat'
                scipy.io.savemat(
                    pathlib.Path(join(os.getcwd(), 'arm-data', file_name)),
                    save_dict)
                time.sleep(3)

        save_dict = {
            'Data': gathered_data,
            'err_abs_vector': err_abs_vector,
            'err_phase_vector': err_phase_vector,
            'err_unwrapped_phase_vector': err_unwrapped_phase_vector,
            'err_freq_vector': err_freq_vector,
            'err_cn_abs_vector': err_cn_abs_vector,
            'err_features': err_features,
            'snr_range': snr_range,
            'frame_range': frame_range,
            'modulation': mod
        }
        scipy.io.savemat(
            pathlib.Path(join(os.getcwd(), 'arm-data', mod + '.mat')),
            save_dict)