Beispiel #1
0
def main():

    start = '2021-03-02'
    end = '2021-03-05'
    dates = (start, end)
    BTC = dr.DataReader('BTCUSDT', 'binance', dates, tick='1d')
    TSLA = dr.DataReader('TSLA', 'yahoo', dates)
    print('BTC:', BTC.Dates, '\n', 'TSLA:', TSLA.Dates)
def plot_whole_signal_and_tasks_times(path, ident):
    """Plots signal, which was acquired wirelessly with GNU Radio, along with colored task timeframes."""
    dataread = datareader.DataReader(path, ident)  # initialize path to data
    data = dataread.read_grc_data()  # read from files
    data = dataread.unwrap_grc_data()  # unwrap phase. returns time and y values

    task_timestamps = dataread.get_data_task_timestamps()
    relax_timestamps = dataread.get_relax_timestamps()

    plt.figure(2)
    plt.clf()
    plt.plot(data[0], data[1])
    plt.xlabel('time (s)', fontsize=12)
    plt.ylabel('distance', fontsize=12)

    loc = plticker.MultipleLocator(base=50.0)
    for (start, stop) in task_timestamps:
        plt.axvspan(start, stop, alpha=0.4, color='r')

    for (start, stop) in relax_timestamps:
        plt.axvspan(start, stop, alpha=0.4, color='b')

    tasks = ['HP\nhigh', 'HP\nlow', 'HP\nmedium',
             'FA\nhigh', 'FA\nmedium', 'FA\nlow',
             'GC\nmedium', 'GC\nhigh', 'GC\nlow',
             'NC\nlow', 'NC\nhigh', 'NC\nmedium',
             'SX\nlow', 'SX\nmedium', 'SX\nhigh',
             'PT\nmedium', 'PT\nlow', 'PT\nhigh']

    task_i = 0
    for (start, stop) in task_timestamps:
        plt.text((start + stop) / 2, 0, tasks[task_i], horizontalalignment='center', fontsize=12, clip_on=True)
        task_i += 1

    plt.show()
Beispiel #3
0
def do_validation(m_valid, sess, valid_path):

    # dataset reader setting #

    valid_dr = dr.DataReader(valid_path["valid_input_path"], valid_path["valid_output_path"],
                             valid_path["norm_path"], dist_num=config.dist_num, is_shuffle=False, is_val=True)

    valid_cost_list = []

    while True:

        valid_inputs, valid_labels, valid_inphase, valid_outphase = valid_dr.whole_batch(valid_dr.num_samples)

        feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
                     m_valid.keep_prob: 1.0}

        valid_cost = sess.run(m_valid.cost, feed_dict=feed_dict)
        valid_cost_list.append(np.expand_dims(valid_cost, axis=1))

        if valid_dr.file_change_checker():

            valid_dr.file_change_initialize()
            if valid_dr.eof_checker():
                valid_dr.reader_initialize()
                print('Valid data reader was initialized!')  # initialize eof flag & num_file & start index
                break

    valid_cost_list = np.concatenate(valid_cost_list, axis=0)

    total_avg_valid_cost = np.asscalar(np.mean(valid_cost_list))

    return total_avg_valid_cost
def do_validation_G(m_valid, sess, valid_path):

    # dataset reader setting #

    valid_dr = dr.DataReader(valid_path["valid_input_path"],
                             valid_path["valid_output_path"],
                             valid_path["norm_path"],
                             dist_num=config.dist_num)

    avg_valid_accuracy = 0.
    avg_valid_cost = 0.
    itr_sum = 0.

    accuracy_list = [0 for i in range(valid_dr._file_len)]
    cost_list = [0 for i in range(valid_dr._file_len)]
    itr_file = 0

    while True:

        valid_inputs, valid_labels = valid_dr.next_batch(config.batch_size)

        if valid_dr.file_change_checker():
            # print(itr_file)
            accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
            cost_list[itr_file] = avg_valid_cost / itr_sum
            avg_valid_cost = 0.
            avg_valid_accuracy = 0.
            itr_sum = 0
            itr_file += 1
            valid_dr.file_change_initialize()

        if valid_dr.eof_checker():
            valid_dr.reader_initialize()
            print('Valid data reader was initialized!'
                  )  # initialize eof flag & num_file & start index
            break

        feed_dict = {
            m_valid.inputs: valid_inputs,
            m_valid.labels: valid_labels
        }

        # valid_cost, valid_softpred, valid_raw_labels\
        #     = sess.run([m_valid.cost, m_valid.softpred, m_valid.raw_labels], feed_dict=feed_dict)
        #
        # fpr, tpr, thresholds = metrics.roc_curve(valid_raw_labels, valid_softpred, pos_label=1)
        # valid_auc = metrics.auc(fpr, tpr)

        valid_cost, valid_accuracy = sess.run(
            [m_valid.C1_loss, m_valid.d1_loss], feed_dict=feed_dict)

        avg_valid_accuracy += valid_accuracy
        avg_valid_cost += valid_cost
        itr_sum += 1

    total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
    total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))

    return total_avg_valid_accuracy, total_avg_valid_cost
    def get_data(self, symbol):
        coin_data = dr.DataReader('{}USDT'.format(symbol),
                                  'binance',
                                  self.Dates,
                                  tick='1d')
        data = (coin_data.Dates, coin_data.Closes)

        return data
Beispiel #6
0
 def __init__(self, parent=None, filename='default.csv'):
     super(ProcessorThread, self).__init__()
     self.file = filename
     self.rowBuffer = []
     self.changeZ = False
     self.running = True
     self.reader = datareader.DataReader(parent=self, filename=self.file)
     self.reader.lineRead.connect(self.newLineRead)
Beispiel #7
0
    def enhance(self, wav_dir):

        noisy_speech = utils.read_raw(wav_dir)
        temp_dir = './temp/temp.npy'
        np.save(temp_dir, noisy_speech)

        test_dr = dr.DataReader(temp_dir,
                                '',
                                self.norm_path,
                                dist_num=config.dist_num,
                                is_training=False,
                                is_shuffle=False)
        mean, std = test_dr.norm_process(self.norm_path + '/norm_noisy.mat')

        while True:
            test_inputs, test_labels, test_inphase, test_outphase = test_dr.whole_batch(
                test_dr.num_samples)
            if config.mode != 'lstm' and config.mode != 'fcn':
                feed_dict = {
                    self.node_inputs: test_inputs,
                    self.node_labels: test_labels,
                    self.node_keep_prob: 1.0
                }
            else:
                feed_dict = {
                    self.node_inputs: test_inputs,
                    self.node_labels: test_labels
                }

            pred = self.sess.run(self.node_prediction, feed_dict=feed_dict)

            if test_dr.file_change_checker():
                print(wav_dir)

                lpsd = np.expand_dims(np.reshape(pred, [-1, config.freq_size]),
                                      axis=2)

                lpsd = np.squeeze((lpsd * std * config.global_std) + mean)

                recon_speech = utils.get_recon(np.transpose(lpsd, (1, 0)),
                                               np.transpose(
                                                   test_inphase, (1, 0)),
                                               win_size=config.win_size,
                                               win_step=config.win_step,
                                               fs=config.fs)

                test_dr.reader_initialize()

                break

        file_dir = self.save_dir + '/' + os.path.basename(wav_dir).replace(
            'noisy', 'enhanced').replace('raw', 'wav')
        librosa.output.write_wav(file_dir,
                                 recon_speech,
                                 int(config.fs),
                                 norm=True)

        return recon_speech
def speech_enhance(wav_dir, graph_name):

    noisy_speech = utils.read_raw(wav_dir)

    temp_dir = './temp/temp.npy'
    np.save(temp_dir, noisy_speech)
    graph = gt.load_graph(graph_name)
    norm_path = os.path.abspath('./data/train/norm')

    test_dr = dr.DataReader(temp_dir, '', norm_path, dist_num=config.dist_num, is_training=False, is_shuffle=False)

    node_inputs = graph.get_tensor_by_name('prefix/model_1/inputs:0')
    node_labels = graph.get_tensor_by_name('prefix/model_1/labels:0')
    node_keep_prob = graph.get_tensor_by_name('prefix/model_1/keep_prob:0')
    node_prediction = graph.get_tensor_by_name('prefix/model_1/pred:0')

    pred = []
    lab = []

    sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    sess_config.gpu_options.allow_growth = True

    while True:

        test_inputs, test_labels = test_dr.next_batch(config.test_batch_size)

        feed_dict = {node_inputs: test_inputs, node_labels: test_labels, node_keep_prob: 1.0}

        with tf.Session(graph=graph, config=sess_config) as sess:
            pred_temp, lab_temp = sess.run([node_prediction, node_labels], feed_dict=feed_dict)

        pred.append(pred_temp)
        lab.append(lab_temp)

        # print(test_dr.file_change_checker())
        if test_dr.file_change_checker():
            print(wav_dir)
            phase = test_dr.phase[0]

            lpsd = np.expand_dims(np.reshape(np.concatenate(pred, axis=0), [-1, config.freq_size])[0:phase.shape[0], :], axis=2)

            mean, std = test_dr.norm_process(norm_path + '/norm_noisy.mat')

            lpsd = np.squeeze((lpsd * std) + mean)  # denorm

            recon_speech = utils.get_recon(np.transpose(lpsd, (1, 0)), np.transpose(phase, (1, 0)),
                                           win_size=config.win_size, win_step=config.win_step, fs=config.fs)

            # plt.plot(recon_speech)
            # plt.show()
            # lab = np.reshape(np.asarray(lab), [-1, 1])
            test_dr.reader_initialize()
            break

    return recon_speech
Beispiel #9
0
 def __init__(self,
              symbols,
              period,
              start,
              end,
              username,
              password,
              cache=True):
     self.symbols = symbols
     self.start = start
     self.end = end
     self.period = period
     self.cache = cache
     self.datareader = datareader.DataReader(username, password, cache)
def main():

    start = '2020-02-05'
    end = '2021-03-05'
    dates = (start, end)
    BTC = dr.DataReader('BTCUSDT', 'binance', dates)
    BTCVariations = vr.Variations(BTC.Dates, BTC.Closes, normalized=True)

    plt.plot(BTCVariations.Dates, BTCVariations.Variations)
    plt.xlim(BTCVariations.Dates[0], BTCVariations.Dates[-1])
    plt.ylim(min(BTCVariations.Variations), max(BTCVariations.Variations))
    plt.show()
    plt.plot(BTCVariations.ShiftedDates, BTCVariations.ShiftedCloses)
    plt.xlim(BTCVariations.ShiftedDates[0], BTCVariations.ShiftedDates[-1])
    plt.show()
Beispiel #11
0
    def read_data_from_prepared_datapath(self):
        """
        Function is called in the end of process
        :return:
        """

        reader = datareader.DataReader()

        self.datap = reader.Get3DData(self.datapath, dataplus_format=True)

        _set_label_text(self.text_dcm_dir, _make_text_short(self.datapath),
                        self.datapath)
        _set_label_text(self.text_dcm_data, self.get_data_info())
        if self.after_function is not None:
            self.after_function(self)
        self.__show_message('Data read finished')
Beispiel #12
0
 def newFile(self, newfile):
     global DEP_DATA
     DEP_DATA = []
     self.rowBuffer = []
     if self.reader:
         self.reader.end()
     self.reader = datareader.DataReader(parent=self, filename=newfile)
     self.reader.lineRead.connect(self.newLineRead)
     self.reader.start()
     # re-initialize DATA_DICT column numbers used for data processing
     try:
         self.tcolnum = getCol('Src%d Motor Tilt Position' %int(filename_handler.FILE_INFO['Source']))
     except IndexError:
         self.srcError.emit(int(filename_handler.FILE_INFO['Source']))
     self.zcolnum = getCol('Platen Zshift Motor 1 Position')
     self.anglecolnum = getCol('Platen Motor Position')
Beispiel #13
0
def show(data3d_a_path, sliver_seg_path, ourSegmentation):
    reader = datareader.DataReader()
    #data3d_a_path = os.path.join(path_to_script, data3d_a_path)
    datap_a = reader.Get3DData(data3d_a_path, dataplus_format=True)

    if 'orig_shape' in datap_a.keys():
        # pklz
        data3d_a = qmisc.uncrop(datap_a['data3d'], datap_a['crinfo'],
                                datap_a['orig_shape'])
    else:
        #dicom
        data3d_a = datap_a['data3d']

    if sliver_seg_path is not None:
        sliver_seg_path = os.path.join(path_to_script, sliver_seg_path)
        sliver_datap = reader.Get3DData(sliver_seg_path, dataplus_format=True)
        if 'segmentation' in sliver_datap.keys():
            sliver_seg = sliver_datap['segmentation']
            sliver_seg = qmisc.uncrop(sliver_datap['segmentation'],
                                      sliver_datap['crinfo'], data3d_a.shape)
        else:
            sliver_seg = sliver_datap['data3d']

        pyed = sed3.sed3(data3d_a, contour=sliver_seg)
        print("Sliver07 segmentation")
        pyed.show()

    if ourSegmentation != None:
        ourSegmentation = os.path.join(path_to_script, ourSegmentation)
        datap_our = reader.Get3DData(ourSegmentation, dataplus_format=True)
        #data_our = misc.obj_from_file(ourSegmentation, 'pickle')
        #data3d_our = data_our['segmentation']
        our_seg = qmisc.uncrop(datap_our['segmentation'], datap_our['crinfo'],
                               data3d_a.shape)

    if ourSegmentation != None:
        pyed = sed3.sed3(data3d_a, contour=our_seg)
        print("Our segmentation")
        pyed.show()

    if (ourSegmentation is not None) and (sliver_seg_path is not None):
        diff = (our_seg.astype(np.int8) - sliver_seg)
        diff[diff == -1] = 2
        #import ipdb; ipdb.set_trace() # BREAKPOINT
        pyed = sed3.sed3(data3d_a, contour=our_seg, seeds=diff)
        print("Sliver07 and our segmentation differences")
        pyed.show()
Beispiel #14
0
def get_engagement_increase_vs_decrease_timeframes(path, ident, seconds):
    """Returns raw data from either engagement 'increase' or 'decrease' time frames and their class (0 or 1).
    TODO: join functions"""

    dataread = datareader.DataReader(path, ident)  # initialize path to data
    data = dataread.read_grc_data()  # read from files
    samp_rate = int(round(len(data[1]) / max(data[0])))
    cog_res = dataread.read_cognitive_load_study(
        str(ident) + '-primary-extract.txt')

    tasks_data = np.empty((0, seconds * samp_rate))
    tasks_y = np.empty((0, 1))

    busy_n = dataread.get_data_task_timestamps(return_indexes=True)
    relax_n = dataread.get_relax_timestamps(return_indexes=True)

    for i in cog_res['task_number']:
        task_num_table = i - 225  # 0 - 17

        ### engagement increase / decrease
        if task_num_table == 0:
            continue
        mid = int(
            (relax_n[task_num_table][0] + relax_n[task_num_table][1]) / 2)
        length = int(samp_rate * 30)
        for j in range(10):
            new_end = int(mid - j * samp_rate)

            new_start2 = int(mid + j * samp_rate)

            dataextract_decrease = dataextractor.DataExtractor(
                data[0][new_end - length:new_end],
                data[1][new_end - length:new_end], samp_rate)

            dataextract_increase = dataextractor.DataExtractor(
                data[0][new_start2:new_start2 + length],
                data[1][new_start2:new_start2 + length], samp_rate)

            try:
                tasks_data = np.vstack((tasks_data, dataextract_increase.y))
                tasks_y = np.vstack((tasks_y, 1))
                tasks_data = np.vstack((tasks_data, dataextract_decrease.y))
                tasks_y = np.vstack((tasks_y, 0))
            except ValueError:
                print(ident)  # ignore short windows

    return tasks_data, tasks_y
Beispiel #15
0
def read_data_orig_and_seg(inputdata, i):
    """ Loads data_orig and data_seg from yaml file
    """

    reader = datareader.DataReader()
    data3d_a_path = os.path.join(inputdata['basedir'],
                                 inputdata['data'][i]['sliverseg'])
    data3d_a, metadata_a = reader.Get3DData(data3d_a_path)

    data3d_b_path = os.path.join(inputdata['basedir'],
                                 inputdata['data'][i]['sliverorig'])
    data3d_b, metadata_b = reader.Get3DData(data3d_b_path)

    #import pdb; pdb.set_trace()
    data3d_seg = (data3d_a > 0).astype(np.int8)
    data3d_orig = data3d_b

    return data3d_orig, data3d_seg
Beispiel #16
0
def get_task_complexities_timeframes(path, ident, seconds):
    """Returns raw data along with task complexity class.
    TODO: join functions. Add parameter to choose different task types and complexities"""

    dataread = datareader.DataReader(path, ident)  # initialize path to data
    data = dataread.read_grc_data()  # read from files
    samp_rate = int(round(len(data[1]) / max(data[0])))
    cog_res = dataread.read_cognitive_load_study(
        str(ident) + '-primary-extract.txt')

    tasks_data = np.empty((0, seconds * samp_rate))
    tasks_y = np.empty((0, 1))

    busy_n = dataread.get_data_task_timestamps(return_indexes=True)
    relax_n = dataread.get_relax_timestamps(return_indexes=True)

    for i in cog_res['task_number']:
        task_num_table = i - 225  # 0 - 17

        ### task complexity classification
        if cog_res['task_complexity'][task_num_table] == 'medium':
            continue
        # if cog_res['task_label'][task_num_table] == 'FA' or cog_res['task_label'][task_num_table] == 'HP':
        #     continue
        if cog_res['task_label'][task_num_table] != 'NC':
            continue
        map_compl = {'low': 0, 'medium': 2, 'high': 1}
        for j in range(10):
            new_end = int(busy_n[task_num_table][1] - j * samp_rate)
            new_start = int(new_end - samp_rate * 30)
            dataextract = dataextractor.DataExtractor(
                data[0][new_start:new_end], data[1][new_start:new_end],
                samp_rate)
            try:
                tasks_data = np.vstack((tasks_data, dataextract.y))
                tasks_y = np.vstack(
                    (tasks_y,
                     map_compl.get(
                         cog_res['task_complexity'][task_num_table])))
            except ValueError:
                print(ident)

    return tasks_data, tasks_y
Beispiel #17
0
def get_busy_vs_relax_timeframes(path, ident, seconds):
    """Returns raw data from either 'on task' or 'relax' time frames and their class (0 or 1).
    TODO: join functions"""

    dataread = datareader.DataReader(path, ident)  # initialize path to data
    data = dataread.read_grc_data()  # read from files
    samp_rate = int(round(len(data[1]) / max(data[0])))
    cog_res = dataread.read_cognitive_load_study(
        str(ident) + '-primary-extract.txt')

    tasks_data = np.empty((0, seconds * samp_rate))
    tasks_y = np.empty((0, 1))

    busy_n = dataread.get_data_task_timestamps(return_indexes=True)
    relax_n = dataread.get_relax_timestamps(return_indexes=True)

    for i in cog_res['task_number']:
        task_num_table = i - 225  # 0 - 17

        ### task versus relax (1 sample each)
        dataextract = dataextractor.DataExtractor(
            data[0][busy_n[task_num_table][0]:busy_n[task_num_table][1]],
            data[1][busy_n[task_num_table][0]:busy_n[task_num_table][1]],
            samp_rate)

        dataextract_relax = dataextractor.DataExtractor(
            data[0][relax_n[task_num_table][0]:relax_n[task_num_table][1]],
            data[1][relax_n[task_num_table][0]:relax_n[task_num_table][1]],
            samp_rate)
        try:
            tasks_data = np.vstack(
                (tasks_data, dataextract.y[-samp_rate * seconds:]))
            tasks_y = np.vstack((tasks_y, 1))
            tasks_data = np.vstack(
                (tasks_data, dataextract_relax.y[-samp_rate * seconds:]))
            tasks_y = np.vstack((tasks_y, 0))
        except ValueError:
            print(ident)  # ignore short windows

    return tasks_data, tasks_y
Beispiel #18
0
def do_validation(m_valid, sess, valid_path):

    # dataset reader setting #

    valid_dr = dr.DataReader(valid_path["valid_input_path"], valid_path["valid_output_path"],
                             valid_path["norm_path"], dist_num=config.dist_num, is_shuffle=False, is_val=True)

    valid_cost_list = []

    while True:

        # valid_inputs, valid_labels = valid_dr.next_batch(config.batch_size)
        valid_inputs, valid_labels = valid_dr.next_batch(config.batch_size)

        feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels, m_valid.keep_prob: 1.0}

        # valid_cost, valid_softpred, valid_raw_labels\
        #     = sess.run([m_valid.cost, m_valid.softpred, m_valid.raw_labels], feed_dict=feed_dict)
        #
        # fpr, tpr, thresholds = metrics.roc_curve(valid_raw_labels, valid_softpred, pos_label=1)
        # valid_auc = metrics.auc(fpr, tpr)

        valid_cost = sess.run(m_valid.raw_cost, feed_dict=feed_dict)
        valid_cost_list.append(np.expand_dims(valid_cost, axis=1))

        if valid_dr.file_change_checker():

            valid_dr.file_change_initialize()
            if valid_dr.eof_checker():
                valid_dr.reader_initialize()
                print('Valid data reader was initialized!')  # initialize eof flag & num_file & start index
                break

    valid_cost_list = np.concatenate(valid_cost_list, axis=0)
    total_avg_valid_cost = np.asscalar(np.mean(valid_cost_list))

    return total_avg_valid_cost
import time
import numpy as np
# ========================================================================== #
plt.style.use('classic')
size = 20
size_config = .8
legendfont = 10
thickness = 0.3
width = .5
resolution = 300
color_value = '#6b8ba4'
# ========================================================================== #
start = '2020-04-03'
end = '2021-04-03'
dates = (start, end)
BTC = dr.DataReader('BTCUSDT', 'binance', dates)
BTCVariations = vr.Variations(BTC.Dates, BTC.Closes, normalized=True)

STORJ = dr.DataReader('STORJUSDT', 'binance', dates)
STORJVariations = vr.Variations(STORJ.Dates, STORJ.Closes, normalized=True)

ETH = dr.DataReader('ETHUSDT', 'binance', dates)
ETHVariations = vr.Variations(ETH.Dates, ETH.Closes, normalized=True)
# ========================================================================== #
max_variations = [max(BTCVariations.Variations),
                  max(ETHVariations.Variations),
                  max(STORJVariations.Variations)]

min_variations = [min(BTCVariations.Variations),
                  min(ETHVariations.Variations),
                  min(STORJVariations.Variations)]
Beispiel #20
0
def update_extraction_files(path, ident):
    dataread = datareader.DataReader(path, ident)  # initialize path to data
    dataread.extract_cognitive_load_study(save_path=path + '/' + str(ident) + '/' + str(ident) + '-primary-extract.txt')

    dataread.read_grc_data()
    dataread.fix_grc_data_and_save()
Beispiel #21
0
def conv_net_model_train(learning_rate, train_dir, save_dir):
    """
    The feed forward convolutional neural network model

    Hyper parameters include learning rate, number of convolutional layers and
    fully connected layers. (Currently TBD)

    """
    # Reset graphs
    tf.reset_default_graph()

    # Create placeholders
    x = tf.placeholder(dtype=tf.float32,
                       shape=[
                           None, INPUT_IMAGE_DIMENSION, INPUT_IMAGE_DIMENSION,
                           INPUT_IMAGE_CHANNELS
                       ],
                       name="x")
    y = tf.placeholder(dtype=tf.float32,
                       shape=[None, OUTPUT_VECTOR_SIZE],
                       name="y")
    weight1 = tf.Variable(tf.truncated_normal([4, 4, 3, 16], stddev=0.1),
                          dtype=tf.float32,
                          name="W1")
    bias1 = tf.Variable(tf.constant(0.1, shape=[16]),
                        dtype=tf.float32,
                        name="B1")
    weight2 = tf.Variable(tf.truncated_normal([4, 4, 16, 32], stddev=0.1),
                          dtype=tf.float32,
                          name="W2")
    bias2 = tf.Variable(tf.constant(0.1, shape=[32]),
                        dtype=tf.float32,
                        name="B2")
    weight3 = tf.Variable(tf.truncated_normal([4608, 2], stddev=0.1),
                          dtype=tf.float32,
                          name="W3")
    bias3 = tf.Variable(tf.constant(0.1, shape=[2]),
                        dtype=tf.float32,
                        name="B3")

    # First convolutional layer
    conv1 = ly.conv_layer(x, weight1, bias1, False)

    # First pooling
    pool1 = ly.pool_layer(conv1)

    # Second convolutional layer
    conv2 = ly.conv_layer(pool1, weight2, bias2, True)

    # Second pooling
    pool2 = ly.pool_layer(conv2)

    # Flatten input
    flattened = tf.reshape(pool2, shape=[-1, 12 * 12 * 32])

    # Create fully connected layer
    logits = ly.fully_connected_layer(flattened, weight3, bias3)

    # Create loss function
    with tf.name_scope("cross_entropy"):
        cross_entropy = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))

    # Create optimizer
    with tf.name_scope("train"):
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
            cross_entropy)

    # Compute accuracy
    with tf.name_scope("accuracy"):
        # argmax gets the highest value in a given dimension (in this case, dimension 1)
        # equal checks if the label is equal to the computed logits
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
        # tf.reduce_mean computes the mean across the vector
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()

    with tf.Session() as sess:
        # Run model
        sess.run(tf.global_variables_initializer())
        data_reader = dr.DataReader(sess, train_dir, INPUT_IMAGE_DIMENSION,
                                    OUTPUT_VECTOR_SIZE, INPUT_IMAGE_CHANNELS)

        coord = tf.train.Coordinator()

        # Train the model
        for i in range(STEP_SIZE):
            images, labels = data_reader.get_train_batch(coord, BATCH_SIZE)

            if i % 10 == 0:
                a = sess.run(accuracy, feed_dict={x: images, y: labels})

                print("step", i, "of ", STEP_SIZE)
                print("Acc: ", a)

            # Run the training step
            sess.run(train_step, feed_dict={x: images, y: labels})

        saver.save(sess, save_dir)

    coord.request_stop()
Beispiel #22
0
def compare_extracted_hr_and_band(path, ident):
    """Compater heart rates acquired wirelessly and with Microfost Band.

    :param path: (str) main path to data, where user data is located in specific folders
    :param ident: (str) user identifier
    :return: MAE, MSE, CORRelation values of the aligned HR time series
    """

    dataread = datareader.DataReader(path, ident)  # initialize path to data
    data = dataread.read_grc_data()  # read from files
    data = dataread.unwrap_grc_data()  # unwrap phase. returns time and y values

    samp_rate = round(len(data[1]) / max(data[0]))

    dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)

    cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
    end_epoch_time = dataread.get_end_time_cognitive_load_study()  # end t

    extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
    extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
    extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
        6).mean()
    extracted_br_features_roll_avg['times'] = extracted_br_features['times']
    extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']

    extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
    extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
    extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
        10).mean()
    extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
    extracted_hr_features_roll_avg['hr_ok1'] = extracted_hr_features['hr_ok']

    bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
    band_data = bandread.load()
    band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
    band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
    band_data = [band_data[0][band_data_time_start:band_data_time_stop],
                 band_data[1][band_data_time_start:band_data_time_stop]]
    band_data_new_data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]

    plt.figure(1)
    plt.clf()
    plt.plot(extracted_hr_features_roll_avg['times'], extracted_hr_features_roll_avg['hr_rate'], color='orange',
             label='Wi-Mind heart rate')

    plt.plot(band_data_new_data[0], band_data_new_data[1], color='green', label='Microsoft Band heart rate')
    plt.xlabel('time (s)')
    plt.ylabel('heart rate')
    plt.legend()
    plt.show()

    hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
    hr_data['times'] = hr_data['times'].astype(int)
    band_data = pd.DataFrame()
    band_data['times'] = band_data_new_data[0]
    band_data['times'] = band_data['times'].astype(int)
    band_data['rate'] = band_data_new_data[1]
    band_data = band_data.drop_duplicates(subset=['times'])

    together_data = pd.merge(hr_data, band_data, on='times')
    together_data = together_data.dropna()

    # new_hr = res_ind[intersect]
    # new_band = band_data_new__data[1][intersect]
    mae = metrics.mean_absolute_error(together_data['rate'], together_data['hr_rate'])
    mse = metrics.mean_squared_error(together_data['rate'], together_data['hr_rate'])
    corr = stats.pearsonr(together_data['rate'], together_data['hr_rate'])
    # print('mae amd mse: ', mae, mse)

    return mae, mse, corr
Beispiel #23
0
        maxXPoints = statx[1][1]
        xpoints = numpy.linspace(0.0, maxXPoints)

        plt.plot(x, y, 'x', xpoints, poly(xpoints), '-')

    pdf.savefig(fig)
    print("---")


if __name__ == '__main__':

    start = int(round(time.time() * 1000))
    files = {}

    for filename in sys.argv[1:]:
        data = datareader.DataReader(filename)
        files[data.getBasename()] = data

    helper = {}
    for fileno, filename in enumerate(files.keys()):
        for attribute in files[filename].get_numerical_attributes():
            helper[str(fileno) + "." +
                   str(attribute['position'])] = (filename,
                                                  attribute['attr_name'])

    for combination in list(itertools.combinations(helper.keys(), 2)):

        filename1, attr1 = helper[combination[0]]
        filename2, attr2 = helper[combination[1]]

        data1 = files[filename1].getData(attr1)
Beispiel #24
0
def full_signal_extract(path, ident):
    """Extract breathing and heartbeat features from one user and save features to file.

    :param path: (str) main path to data, where user data is located in specific folders
    :param ident: (str) user identifier
    :return: Nothing. It saves features (dataframe) to a .csv file
    """

    dataread = datareader.DataReader(path, ident)  # initialize path to data
    data = dataread.read_grc_data()  # read from files

    data = dataread.unwrap_grc_data()  # unwrap phase. returns time and y values

    samp_rate = round(len(data[1]) / max(data[0]))

    dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)

    cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
    end_epoch_time = dataread.get_end_time_cognitive_load_study()  # end t

    extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
    extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
    extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
        6).mean()
    extracted_br_features_roll_avg['times'] = extracted_br_features['times']
    extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']

    extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
    extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
    extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
        10).mean()
    extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
    extracted_hr_features_roll_avg['hr_ok'] = extracted_hr_features['hr_ok']
    extracted_hr_features2 = dataextract.raw_windowing_heartrate(100, 1)  # longer time to extract HRV frequency feat.
    extracted_hr_features2 = extracted_hr_features2[['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf', 'times']]
    extracted_hr_features2_roll_avg = extracted_hr_features2.loc[:, extracted_hr_features2.columns != 'times'].rolling(
        10).mean()
    extracted_hr_features2_roll_avg['times'] = extracted_hr_features2['times']

    all_features = extracted_br_features_roll_avg
    all_features = pd.merge(all_features, extracted_hr_features_roll_avg, on='times')
    all_features = pd.merge(all_features, extracted_hr_features2_roll_avg, on='times')

    task_timestamps = dataread.get_data_task_timestamps()
    relax_timestamps = dataread.get_relax_timestamps()

    bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
    band_data = bandread.load()
    band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
    band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
    band_data = [band_data[0][band_data_time_start:band_data_time_stop],
                 band_data[1][band_data_time_start:band_data_time_stop]]
    band_data_new__data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]

    hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
    hr_data['times'] = hr_data['times'].astype(int)
    band_data = pd.DataFrame()
    band_data['times'] = band_data_new__data[0]
    band_data['times'] = band_data['times'].astype(int)
    band_data['band_rate'] = band_data_new__data[1]
    band_data = band_data.drop_duplicates(subset=['times'])
    together_data = pd.merge(hr_data, band_data, on='times')
    together_data = together_data.dropna()

    for i in range(len(all_features['times'])):
        find_in_hr_data = bisect(together_data['times'], all_features['times'][i])
        all_features.ix[i, 'band_rate'] = together_data['band_rate'][find_in_hr_data]

    for i in range(len(cog_res)):
        all_feat_ind_task_start = bisect(all_features['times'], task_timestamps[i][0])
        all_feat_ind_task_end = bisect(all_features['times'], task_timestamps[i][1])
        for j in cog_res.columns:
            all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, j] = cog_res.iloc[i][j]
            if cog_res.iloc[i][j] == 'GC' or cog_res.iloc[i][j] == 'PT':
                all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'keyboard_task'] = True
            elif cog_res.iloc[i][j] == 'HP' or cog_res.iloc[i][j] == 'FA' or cog_res.iloc[i][j] == 'NC' or \
                    cog_res.iloc[i][j] == 'SX':
                all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'keyboard_task'] = False
        for k in range(all_feat_ind_task_end - all_feat_ind_task_start + 1):
            all_features.ix[k + all_feat_ind_task_start, 'on_task_or_break_index'] = k
        for k in range(all_feat_ind_task_end - all_feat_ind_task_start, -1, -1):
            all_features.ix[all_feat_ind_task_end - k, 'on_task_or_break_index_down'] = k
        all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'on_task'] = True

    for i in range(len(relax_timestamps)):
        all_feat_ind_task_start = bisect(all_features['times'], relax_timestamps[i][0])
        all_feat_ind_task_end = bisect(all_features['times'], relax_timestamps[i][1])
        new_end = all_feat_ind_task_end + 30
        # if i==0:
        #     continue
        for k in range(all_feat_ind_task_end - all_feat_ind_task_start + 1):
            all_features.ix[k + all_feat_ind_task_start, 'on_task_or_break_index'] = k
            all_features.ix[k + all_feat_ind_task_start, 'consecutive_break'] = i
        for k in range(new_end - all_feat_ind_task_start + 1):
            all_features.ix[k + all_feat_ind_task_start, 'on_break_and_after_index'] = k
            if k <= 15:
                all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = False
            elif k <= 30:
                all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = np.nan
            else:
                all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = True
        for k in range(all_feat_ind_task_end - all_feat_ind_task_start, -1, -1):
            all_features.ix[all_feat_ind_task_end - k, 'on_task_or_break_index_down'] = k
        all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'on_task'] = False

    all_features['person_id'] = cog_res['person_id'][0]
    all_features.to_csv(path_or_buf=path + ident + '/' + ident + '-data.csv', index=False)
Beispiel #25
0
# -*-coding:utf-8 -*-
# @Author:king
# @time:2020/2/24 11:05
# @File:tiaojianlvbo.py
# @Software:PyCharm
# 条件滤波

import datareader

x, y, z = datareader.DataReader()
with open("tiaojian.txt", 'w') as file2write:
    for i in range(len(x)):
        if int(x[i]) > -3000 and int(x[i]) < 8000 and int(y[i]) > 4000 and int(y[i]) < 10000 and int(z[i]) > 40 and int(
                z[i]) < 2000:
            file2write.write(str(x[i]) + " " + str(y[i]) + " " + str(z[i]) + "\n")
Beispiel #26
0
def main(argv=None):

    assert not (argv is None), "The project path must be provided."

    # set train path

    train_input_path = argv[0] + '/data/train/noisy'
    train_output_path = argv[0] + '/data/train/clean'
    norm_path = argv[0] + '/data/train/norm'

    # set valid path
    valid_input_path = argv[0] + '/data/valid/noisy'
    valid_output_path = argv[0] + '/data/valid/clean'
    logs_dir = argv[1]
    #                               Graph Part                               #

    print("Graph initialization...")

    global_step = tf.Variable(0, trainable=False)

    with tf.device(config.device):
        with tf.variable_scope("model", reuse=None):
            m_train = trnmodel.Model(is_training=True, global_step=global_step)
        with tf.variable_scope("model", reuse=True):
            m_valid = trnmodel.Model(is_training=False, global_step=global_step)

    print("Done")

    #                               Summary Part                             #

    tensor_names = [n.name for n in tf.get_default_graph().as_graph_def().node]

    with open(logs_dir + "/tensor_names.txt", 'w') as f:
        for t_name in tensor_names:
            f.write("%s\n" % str(t_name))

    print("Setting up summary op...")

    writer = SummaryWriter(log_dir=logs_dir + '/summary')

    print("Done")

    #                               Model Save Part                           #

    print("Setting up Saver...")
    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    print("Done")

    #                               Session Part                              #

    sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    sess_config.gpu_options.allow_growth = True
    sess = tf.Session(config=sess_config)

    if ckpt and ckpt.model_checkpoint_path:  # model restore

        print("Model restored...")

        saver.restore(sess, ckpt.model_checkpoint_path)

        print("Done")
    else:
        sess.run(tf.global_variables_initializer())  # if the checkpoint doesn't exist, do initialization

    # datareader initialization
    train_dr = dr.DataReader(train_input_path, train_output_path, norm_path, dist_num=config.dist_num, is_training=True, is_shuffle=False)

    valid_path = {'valid_input_path': valid_input_path, 'valid_output_path': valid_output_path, 'norm_path': norm_path}
    train_path = {'train_input_path': train_input_path, 'train_output_path': train_output_path, 'norm_path': norm_path}

    for itr in range(config.max_epoch):

        start_time = time.time()
        train_inputs, train_labels = train_dr.next_batch(config.batch_size)
        feed_dict = {m_train.inputs: train_inputs, m_train.labels: train_labels, m_train.keep_prob: config.keep_prob}
        sess.run(m_train.train_op, feed_dict=feed_dict)
        elapsed_time = time.time() - start_time

        # print("time_per_step:%.4f" % elapsed_time)

        if itr % 5 == 0 and itr >= 0:

            train_cost, train_lr = sess.run([m_train.cost, m_train.lr], feed_dict=feed_dict)

            # print("Step: %d, train_cost: %.4f, train_accuracy=%4.4f, train_time=%.4f"
            #       % (itr, train_cost, train_accuracy * 100, el_tim))
            print("Step: %d, train_cost: %.4f, learning_rate: %.7f" % (itr, train_cost, train_lr))

            writer.add_scalars('training_procedure', {'train': train_cost}, itr)

        if itr % config.val_step == 0 and itr > 0:

            saver.save(sess, logs_dir + "/model.ckpt", itr)  # model save
            print('validation start!')
            valid_cost = do_validation(m_valid, sess, valid_path)

            print("valid_cost: %.4f" % valid_cost)

            writer.add_scalars('training_procedure', {'train': train_cost, 'valid': valid_cost}, itr)

        if itr % config.summary_step == 0 and itr > 0:
            if itr == config.summary_step:
                train_summary = Summary(train_path, logs_dir, name='train')
                valid_summary = Summary(valid_path, logs_dir, name='valid')

                train_summary.do_summary(m_valid, sess, itr)
                valid_summary.do_summary(m_valid, sess, itr)

            else:

                train_summary.do_summary(m_valid, sess, itr)
                valid_summary.do_summary(m_valid, sess, itr)

    writer.close()
                        counter = 0
                if counter == optimizing_parameter:
                    backtester.buy(percent, i)

            elif positions > 0:
                for previous_value in range(1, 2 * optimizing_parameter + 1):
                    if current_close > self.Closes[i - previous_value]:
                        counter += 1
                    else:
                        counter = 0
                if counter == optimizing_parameter:
                    backtester.sell(i)


if __name__ == '__main__':
    start = '2021-03-02'
    end = '2021-04-09'
    symbol = 'DOGEUSDT'
    dates = (start, end)
    BTC = dr.DataReader(symbol, 'binance', dates, tick='1h')
    Strat = ExampleStrategy(BTC.Closes, BTC.Dates, symbol)
    # Strat.BackTester.runner()
    optimize_range = range(1, 10)
    for i in optimize_range:
        Strat.N = i
        Strat.BackTester.runner()

    plt.scatter(optimize_range, Strat.BackTester.Gains, marker='x')
    plt.plot(optimize_range, Strat.BackTester.Gains, lw=.5)
    Strat.BackTester.optimizer()
Beispiel #28
0
print("Done")

#                               Model Save Part                            #
print("Setting up Saver...")
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(logs_dir)
print("Done")

#                               Session Part                               #
sess_config = tf.ConfigProto(allow_soft_placement=True,
                             log_device_placement=False)
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)

if ckpt and ckpt.model_checkpoint_path:  # model restore
    print("Model restored...")
    print(logs_dir + ckpt_name)
    saver.restore(sess, logs_dir + ckpt_name)
    print("Done")
else:
    sess.run(tf.global_variables_initializer()
             )  # if the checkpoint doesn't exist, do initialization
test_dataset = dr.DataReader(test_in,
                             test_out,
                             max_len=max_len,
                             is_shuffle=False)

acc = utils.evaluation_last(m_valid, sess, test_dataset)
print('test_acc = %f' % acc)
Beispiel #29
0
    def do_summary(self, m_summary, sess, itr):

        valid_path = self.valid_path
        clean_speech = self.clean_speech
        clean_speech = utils.identity_trans(clean_speech)

        noisy_speech = self.noisy_speech
        noisy_speech = utils.identity_trans(noisy_speech)

        temp_dir = self.temp_dir
        name = self.name
        logs_dir = self.logs_dir

        writer = SummaryWriter(log_dir=self.logs_dir + '/summary')

        summary_dr = dr.DataReader(temp_dir, '', valid_path["norm_path"], dist_num=config.dist_num, is_training=False,
                                   is_shuffle=False)
        pred = []

        while True:

            summary_inputs, summary_labels = summary_dr.next_batch(config.batch_size)

            feed_dict = {m_summary.inputs: summary_inputs, m_summary.labels: summary_labels, m_summary.keep_prob: 1.0}

            pred_temp = sess.run(m_summary.pred, feed_dict=feed_dict)

            pred.append(pred_temp)

            if summary_dr.file_change_checker():
                phase = summary_dr.phase[0]

                lpsd = np.expand_dims(
                    np.reshape(np.concatenate(pred, axis=0), [-1, config.freq_size])[0:phase.shape[0], :],
                    axis=2)

                mean, std = summary_dr.norm_process(valid_path["norm_path"] + '/norm_noisy.mat')

                lpsd = np.squeeze((lpsd * std * 1.18) + mean)  # denorm

                recon_speech = utils.get_recon(np.transpose(lpsd, (1, 0)), np.transpose(phase, (1, 0)),
                                               win_size=config.win_size, win_step=config.win_step, fs=config.fs)

                # plt.plot(recon_speech)
                # plt.show()
                # lab = np.reshape(np.asarray(lab), [-1, 1])
                summary_dr.reader_initialize()
                break

        # write summary

        if itr == config.summary_step:
            writer.close()
            self.noisy_measure = utils.se_eval(clean_speech,
                                          np.squeeze(noisy_speech), float(config.fs))
            summary_fname = tf.summary.text(name + '_filename', tf.convert_to_tensor(self.noisy_dir))

            if name == 'train':

                config_str = "<br>sampling frequency: %d</br>" \
                             "<br>window step: %d ms</br>" \
                             "<br>window size: %d ms</br>" \
                             "<br>fft size: %d</br>" \
                             "<br>learning rate: %f</br><br>learning rate decay: %.4f</br><br>learning" \
                             " rate decay frequency: %.4d</br>" \
                             "<br>dropout rate: %.4f</br><br>max epoch:" \
                             " %.4e</br><br>batch size: %d</br><br>model type: %s</br>"\
                             % (config.fs, (config.win_step/config.fs*1000), (config.win_size/config.fs*1000),
                                config.nfft, config.lr, config.lrDecayRate, config.lrDecayFreq, config.keep_prob,
                                config.max_epoch, config.batch_size, config.mode)

                summary_config = tf.summary.text(name + '_configuration', tf.convert_to_tensor(config_str))

                code_list = []
                read_flag = False

                with open('./lib/trnmodel.py', 'r') as f:
                    while True:
                        line = f.readline()
                        if "def inference(self, inputs):" in line:
                            read_flag = True

                        if "return fm" in line:
                            code_list.append('<br>' + line.replace('\n', '') + '</br>')
                            break

                        if read_flag:
                            code_list.append('<br>' + line.replace('\n', '') + '</br>')

                code_list = "<pre>" + "".join(code_list) + "</pre>"

                summary_model = tf.summary.text('train_model', tf.convert_to_tensor(code_list))

                summary_op = tf.summary.merge([summary_fname, summary_config, summary_model])
            else:
                summary_op = tf.summary.merge([summary_fname])

            with tf.Session() as sess:
                summary_writer = tf.summary.FileWriter(logs_dir + '/summary/text')
                text = sess.run(summary_op)
                summary_writer.add_summary(text, 1)
            summary_writer.close()

            writer = SummaryWriter(log_dir=logs_dir + '/summary')

            writer.add_audio(name + '_audio_ref' + '/clean', clean_speech
                             /np.max(np.abs(clean_speech)), itr,
                             sample_rate=config.fs)
            writer.add_audio(name + '_audio_ref' + '/noisy', noisy_speech
                             /np.max(np.abs(noisy_speech)), itr,
                             sample_rate=config.fs)
            clean_S = get_spectrogram(clean_speech)
            noisy_S = get_spectrogram(noisy_speech)

            writer.add_image(name + '_spectrogram_ref' + '/clean', clean_S, itr)  # image_shape = (C, H, W)
            writer.add_image(name + '_spectrogram_ref' + '/noisy', noisy_S, itr)  # image_shape = (C, H, W)

        enhanced_measure = utils.se_eval(clean_speech, recon_speech, float(config.fs))
        writer.add_scalars(name + '_speech_quality' + '/pesq', {'enhanced': enhanced_measure['pesq'],
                                                                'ref': self.noisy_measure['pesq']}, itr)
        writer.add_scalars(name + '_speech_quality' + '/stoi', {'enhanced': enhanced_measure['stoi'],
                                                                'ref': self.noisy_measure['stoi']}, itr)
        writer.add_scalars(name + '_speech_quality' + '/lsd', {'enhanced': enhanced_measure['lsd'],
                                                               'ref': self.noisy_measure['lsd']}, itr)
        writer.add_scalars(name + '_speech_quality' + '/ssnr', {'enhanced': enhanced_measure['ssnr'],
                                                                'ref': self.noisy_measure['ssnr']}, itr)

        writer.add_audio(name + '_audio_enhanced' + '/enhanced', recon_speech/np.max(np.abs(recon_speech)),
                         itr, sample_rate=config.fs)
        enhanced_S = get_spectrogram(recon_speech)
        writer.add_image(name + '_spectrogram_enhanced' + '/enhanced', enhanced_S, itr)  # image_shape = (C, H, W)
        writer.close()
Beispiel #30
0
        # self.resnet = mobilenet.MobileFaceHead([2, 8, 16, 4])
        self.resnet = Inception.Inception3(512)
        self.classifier = losspart.MarginalCosineLayer(num_classes)

    def forward(self, x, label):
        feat = self.resnet(x)
        # feat = tf.nn.dropout(feat, 0.4)
        logits = self.classifier(feat, label, 1.0, 0.5, 0.0)
        logits = logits * 64
        return logits


BSIZE = 2
print(BSIZE)
EPOCH = 30
data_reader = datareader.DataReader('imglist_iccv.txt', BSIZE)
tf.keras.backend.set_learning_phase(True)


def lr_decay(step):
    lr = 0.1
    step = step / 30000
    step = tf.math.floor(step)
    step = tf.math.pow(0.1, step)
    lr = lr * step
    return lr


def grad_loss(x, model):
    data, label = x
    with tf.GradientTape() as tape: