Пример #1
0
 def __init__(self):
     # Loading matrices from given data
     data = data_handler()
     #data = data_handler("../data/ciao/rating_with_timestamp.mat", "../data/ciao/trust.mat")
     self.R_train, self.R_test, self.W, self.PF, self.mu = data.load_matrices(
     )
     # Getting unique users and products used in Training data
     self.prod = np.unique(self.R_train[:, 1])
     self.users = np.unique(self.R_train[:, 0])
     self.n_users, self.n_prod, self.n_cat = data.get_stats()
     common_users = np.intersect1d(self.R_test[:, 0], self.users)
     common_prod = np.intersect1d(self.R_test[:, 1], self.prod)
     self.R_test = self.R_test[np.in1d(self.R_test[:, 0], common_users)]
     self.R_test = self.R_test[np.in1d(self.R_test[:, 1], common_prod)]
     # Creating R_train and R_test dictionaries
     self.R_train_ui = dict(
         zip(zip(self.R_train[:, 0], self.R_train[:, 1]), self.R_train[:,
                                                                       2]))
     self.R_test_ui = dict(
         zip(zip(self.R_test[:, 0], self.R_test[:, 1]), self.R_test[:, 2]))
     # Initializing parameters to be estimated
     self.A = np.zeros((self.n_users + 1, self.n_users + 1, self.n_cat))
     for u, v in self.W:
         self.A[v, u, :] = np.random.rand()
     self.B = np.random.rand(self.n_users + 1, self.n_cat)
     self.C = np.random.rand(self.n_prod + 1)
     self.E = copy.deepcopy(self.R_train_ui)
     self.V = {}
     self.getNui()
     #self.getNuiFromData()
     self.alpha = 0.3
     self.l = 0.1
Пример #2
0
    def __init__(self):
        # Loading matrices from given data
        data = data_handler(CURRENT_DIR + "data/rating_with_timestamp.mat",
                            CURRENT_DIR + "data/trust.mat")
        #data = data_handler("data/ciao/rating_with_timestamp.mat", "data/ciao/trust.mat")
        self.R_train, self.R_test, self.W, self.PF, self.mu = data.load_matrices(
        )
        # Getting unique users and products used in Training data
        self.prod = np.unique(self.R_train[:, 1])
        self.users = np.unique(self.R_train[:, 0])
        #print self.prod
        self.n_users, self.n_prod, self.n_cat = data.get_stats()
        common_users = np.intersect1d(self.R_test[:, 0], self.users)
        common_prod = np.intersect1d(self.R_test[:, 1], self.prod)
        self.R_test = self.R_test[np.in1d(self.R_test[:, 0], common_users)]
        self.R_test = self.R_test[np.in1d(self.R_test[:, 1], common_prod)]
        # Creating R_train and R_test dictionaries
        self.R_train_ui = dict(
            zip(zip(self.R_train[:, 0], self.R_train[:, 1]), self.R_train[:,
                                                                          3]))

        x_R_train_ui = {}
        for i in range(1000):
            for j in range(1000):
                x_R_train_ui[i, j] = 0
        for i in range(100):
            for j in range(100):
                pass
                #print i,j
                #x_R_train_ui[(R_train[:,0][i],x_R_train[:,1][j])]=x_R_train[:,3][i]

        self.R_test_ui = dict(
            zip(zip(self.R_test[:, 0], self.R_test[:, 1]), self.R_test[:, 3]))

        x_R_test_ui = {}
        for i in range(1000):
            for j in range(1000):
                x_R_test_ui[(i, j)] = 0
        for i in range(100):
            pass
            #print i,j
            #self.R_test_ui[(self.R_test[:,0][i],self.R_test[:,1][j])]=self.R_test[:,3][i]

        #print self.R_train_ui
        self.A = np.zeros((self.n_users + 1, self.n_users + 1, self.n_cat))
        for u, v in self.W:
            #print str(u) + " " + str(v)
            self.A[v, u, :] = 1
        print self.n_users

        self.C = np.random.rand(1001)
        self.E = copy.deepcopy(self.R_train_ui)
        self.B = np.random.rand(1001, self.n_cat)

        self.V = {}
        #self.getNui()
        self.Nui()
        self.alpha = float(sys.argv[1])
        self.l = float(sys.argv[2])
Пример #3
0
def main():

    config = tf.ConfigProto(log_device_placement=True)
    config.gpu_options.allow_growth = True
    K.tensorflow_backend.set_session(tf.Session(config=config))

    options = get_options()
    mean = options.mean_name
    path = options.path
    fold_test = options.fold_test - 1 
    table_name = options.table
    inner_cross_validation_number = options.inner_cross_validation_number
    batch_size = options.batch_size
    class_type = options.class_type
    seed = options.seed
    

    ### data business
    data = data_handler(path, fold_test, 
                        table_name, options.n_fold, 
                        batch_size, mean, options)
    if options.y_variable in ["RCB_class", "ee_grade"]:
        columns = ['loss', 'acc', 'recall', 'precision', 'f1', 'val_loss', 
                   'val_acc', 'val_recall', 'val_precision', 'val_f1', 
                   'test_loss', 'test_acc', 'test_recall', 'test_precision', 
                   'test_f1', 'hidden_btleneck', 'hidden_fcn', 'drop_out', 
                   'validation_fold', 'learning_rate', 'weight_decay', 
                   'gaussian_noise', 'k', 'model', 'pooling', 'batch_size', 
                   'size', 'input_depth', 'fold_test', 'run_number']
    else:
        columns = ['loss', 'mean_squared_error', 'val_loss', 
                   'val_mean_squared_error', 'test_loss', 'test_mean_squared_error', 
                   'hidden_btleneck', 'hidden_fcn', 'drop_out', 
                   'validation_fold', 'learning_rate', 'weight_decay', 
                   'gaussian_noise', 'k', 'model', 'pooling', 'batch_size', 
                   'size', 'input_depth', 'fold_test', 'run_number']

    results_table = DataFrame(index=range(options.repeat*(options.n_fold - 1)), columns=columns) # Link with the previous table ? Or just result_table ?
    options.run = 0
    for i in range(options.repeat):
        for j in range(options.n_fold - 1): # Defines which fold will be the validation fold
            parameter_dic = sample_hyperparameters(options, j)
            model = load_model(parameter_dic, options)
            print("begin training", flush=True)
            model, history = train_model(model, data, parameter_dic, options)
            scores, predictions = evaluate_test(model, data, options)
            results_table = fill_table(history, scores, results_table, parameter_dic, options)

            K.clear_session()
            del model
            results_table.to_csv(options.output_name, index=False)
            predictions.to_csv("predictions_run_{}_fold_test_{}.csv".format(options.run, options.fold_test))
            options.run += 1
Пример #4
0
    def __init__(self):
        
        print "Code started"
        # Load trust and rating matrices
        data = data_handler("../data/rating.txt", "../data/trust.txt")
        self.trusts, self.ratings = data.load()
        print "Initializing"
        self.n_users, self.n_items, self.n_trusts, self.n_ratings = data.get_stats()
        self.n = self.n_users
        self.m = self.n_trusts
        self.agents = []
        self.W = np.zeros((self.n_users, self.n_users), dtype = np.float32)
        self.T = np.zeros((self.n_users, self.n_users), dtype = np.float32)
        self.R = np.zeros((self.n_users, self.n_users), dtype = np.float32)
        self.n_it = 5 * self.n
        self.communities = np.arange(self.n_users)
        self.Labels = []
        self.alpha = 0.5
        self.centers1 = []
        self.centers2 = []
        self.centers3 = []
        self.centers4 = []
        self.centers5 = []
        self.centers6 = []

        # Making agents where each agent has its own label
        for i in xrange(self.n_users):
            a = agent()
            # Each user has its own label
            a.L.append(i)
            self.Labels.append([i])
            a.trustor = sum(self.trusts[:, i])
            a.trustee = sum(self.trusts[i, :])
            a.deg = a.trustee + a.trustor
            a.neighbors = np.union1d(np.where(self.trusts[i, :] == 1)[0], np.where(self.trusts[:, i] == 1)[0])
            self.agents.append(a)

        # Calculating Wij i.e. the number of common neighbors i and j have
        for i in xrange(self.n_users):
            for j in xrange(i, self.n_users):
                self.W[j, i] = self.W[i, j] = np.intersect1d(self.agents[i].neighbors, self.agents[j].neighbors).size
Пример #5
0
    def graph_XY(self):
        SIZE = 5

        dl = data_handler(self.file_path)
        XYZ = dl.get_channel_locations()
        channel_names = dl.get_channel_names()

        for band in self.bands.keys():
            QEEG = np.zeros((SIZE * 2 + 1, SIZE * 2 + 1))

            X = np.array(XYZ[0])
            X = (X + abs(np.amin(X)))
            X = (X / np.amax(X)) * SIZE * 2

            Y = np.array(XYZ[1])
            Y = (Y + abs(np.amin(Y)))
            Y = (Y / np.amax(Y)) * SIZE * 2

            for x, y, channel_name in zip(X[:64], Y[:64], channel_names[:64]):
                QEEG[int(x)][int(y)] = self.bands[band][channel_name]

            plt.imshow(QEEG, interpolation='bessel')
            plt.show()
Пример #6
0
import random
import ibmiotf.device, time, json
import minimalmodbus
from data_handler import data_handler
DateString = "%Y-%m-%d"
TimeString = "%H:%M:%S"
DATA = data_handler()
form = {}


def send_ACK(msg):
    print(msg)
    if client.publishEvent("status", "json", json.dumps(msg), 2):
        print("ACK sended to cloud")
    else:
        print("Can't send ACK")


def get_registers(Registers):
    temp = Registers.split(",")
    return temp


def myCommandCallback(cmd):
    print("Command received: %s" % cmd.data)

    if (cmd.data["MSG_FOR"] == "COMMUNICATION"):
        DATA.update_COMMUNICATION_CONFIG(cmd.data)
        send_ACK(cmd.data)

    if (cmd.data["MSG_FOR"] == "NODE"):
Пример #7
0
from data_handler import data_handler
from model1 import user2vec
import pdb
import numpy as np
#n = data.shape[0]
data = data_handler("rating_with_timestamp.mat", "trust.mat", "rating_with_timestamp.mat")
data.load_matrices()
n = data.n
i = data.i
h = 100
d = 50
n_epochs = 5
u2v = user2vec(n, h,d,i)
#u2v.model1()
u2v.model_batch_uu()
u2v.model_batch_ui()

# Training for batch mode
def training_batch(batch_size): 
    # U-U part
    ind = 0
    f = open('Trust.txt','r')
    batch = []
    print "u-U training started"

    for epoch in xrange(n_epochs):
        batch = []
        print "Initiating epoch %d"%epoch
        with open('train.txt', 'r') as f:
            for line in f:
                data1 = line.strip()
Пример #8
0
    save = tf.train.Saver()

    save_dir = os.path.expanduser("~/documents/mdn_") + str(
        datetime.datetime.today()).replace(":", "-").replace(" ", "-")
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    writer = tf.summary.FileWriter(save_dir, graph=session.graph)

    #data_files = os.listdir(data_dir)
    #data_files = [os.path.join(data_dir, d) for d in data_files if ".csv" in d]
    data_file = data_dir

    if args.truncate_data:
        print("Using the truncated dataset.")
        dh = dh.data_handler(data_file[0:100], [.7, .15, .15])
    else:
        print("Using the full dataset.")
        dh = dh.data_handler(data_file, [.7, .15, .15])

    for i in range(args.num_iterations):
        start_time = datetime.datetime.now()
        train = dh.get_train_batch(32, 300)
        # loss, means, stdevs, mix
        things = mdn_model.train_batch(train["X"], train["y"])
        print("mixture evaluation max: ", np.amax(things[-1]), "min: ",
              np.amin(things[-1]))
        print("individual gaussian evaluations max: ", np.amax(things[-2]),
              "min: ", np.amin(things[-2]))
        if i % 100 == 0:
            print("  saving images", i)
Пример #9
0
def prototypeTrainHuge(tipo=1, mean_volume=True, single_volume=0):

    if True:
        subjects = ['Subject3']

        subjects = [
            'Subject1', 'Subject2', 'Subject3', 'Subject4', 'Subject5'
        ]

    roi = 'ROI_VC'
    if tipo == 1:
        classification, encoder, decoder = makeCoders()
    else:
        classification, encoder, decoder = makeCoders(0)

    wb = Workbook()
    ws = wb['Sheet']
    ws.title = "Huge classes mean data"

    for ws in wb.worksheets:
        ws['B1'] = 'Subject 1'
        ws['C1'] = 'Subject 2'
        ws['D1'] = 'Subject 3'
        ws['E1'] = 'Subject 4'
        ws['F1'] = 'Subject 5'
        ws['G1'] = 'Mean'
        ws['H1'] = 'Standard Deviation'
        ws['I1'] = 'CI'

        # average
        ws['G2'] = '=AVERAGE(B2:F2)'
        ws['G3'] = '=AVERAGE(B3:F3)'
        ws['G4'] = '=AVERAGE(B4:F4)'
        ws['G5'] = '=AVERAGE(B5:F5)'

        ws['G8'] = '=AVERAGE(B8:F8)'
        ws['G9'] = '=AVERAGE(B9:F9)'
        ws['G10'] = '=AVERAGE(B10:F10)'
        ws['G11'] = '=AVERAGE(B11:F11)'

        # standard deviation
        ws['H2'] = '=STDEV(B2:F2)'
        ws['H3'] = '=STDEV(B3:F3)'
        ws['H4'] = '=STDEV(B4:F4)'
        ws['H5'] = '=STDEV(B5:F5)'

        ws['H8'] = '=STDEV(B8:F8)'
        ws['H9'] = '=STDEV(B9:F9)'
        ws['H10'] = '=STDEV(B10:F10)'
        ws['H11'] = '=STDEV(B11:F11)'

        # confidence interval
        ws['I2'] = '=CONFIDENCE(0.05, H2, 5)'
        ws['I3'] = '=CONFIDENCE(0.05, H3, 5)'
        ws['I4'] = '=CONFIDENCE(0.05, H4, 5)'
        ws['I5'] = '=CONFIDENCE(0.05, H5, 5)'

        ws['I8'] = '=CONFIDENCE(0.05, H8, 5)'
        ws['I9'] = '=CONFIDENCE(0.05, H9, 5)'
        ws['I10'] = '=CONFIDENCE(0.05, H10, 5)'
        ws['I11'] = '=CONFIDENCE(0.05, H11, 5)'

        # line names
        ws['A2'] = 'Top 1  Accuracy'
        ws['A3'] = 'Top 5  Accuracy'
        ws['A4'] = 'Top 10 Accuracy'
        ws['A5'] = 'Kamitani  Score'

        ws['A7'] = 'Imagined data'

        ws['A8'] = 'Top 1  Accuracy'
        ws['A9'] = 'Top 5  Accuracy'
        ws['A10'] = 'Top 10 Accuracy'
        ws['A11'] = 'Kamitani  Score'

    for sbj in subjects:
        # load prep file generated from bdPy files from Kamitani Lab
        print('')
        print('Processando %s' % sbj)
        print('Loading data from disk.')
        subjectFile = '%s/%s.h5' % (fmri_dir, sbj)
        handler = data_handler(subjectFile)
        x_train, x_test, x_test_avg, x_imag, x_imag_avg = handler.get_data(
            roi=roi, unityNormalization=1, imag_data=1)
        lbl_train, lbl_test, lbl_imag = handler.get_files(imag_data=1)
        _, lbl_test_idx, lbl_imag_idx = handler.get_indices(imag_data=1)

        lbl_test_avg = []
        lbl_imag_avg = []
        for i in range(50):
            idx = np.where(lbl_test_idx == i)[0][0]
            lbl_test_avg.append(lbl_test[idx])

            idx = np.where(lbl_imag_idx == i)[0][0]
            lbl_imag_avg.append(lbl_imag[idx])

        print('Min x_train : ', np.min(x_train))
        print('Max x_train : ', np.max(x_train))

        print('Min x_test : ', np.min(x_test))
        print('Max x_test : ', np.max(x_test))

        print('Min x_imag : ', np.min(x_imag))
        print('Max x_imag : ', np.max(x_imag))

        decode_id = 'teste'
        testData = x_test
        testLabels = lbl_test

        featuresTrain, featuresTest = createFeatures(encoder, tipo, img_dir,
                                                     lbl_train, testLabels)
        y_train = torch.cat(featuresTrain)
        y_test = torch.cat(featuresTest)

        column = getColumn(sbj)

        minTreino = np.min(y_train.cpu().data.numpy())
        maxTreino = np.max(y_train.cpu().data.numpy())

        pred_y, train_pred, transNet, lossCurve = run_decode_cnn(
            decode_id, x_train, y_train, testData, y_test, minTreino,
            maxTreino)

        print('Accuracy with all test data')
        if single_volume:
            ws = wb.worksheets[0]
            printResults(ws, column, 1,
                         'Accuracy Huge classes (prototype mean)', pred_y,
                         testLabels, 5, tipo)

        print('')
        print('Accuracy with average test data')
        testData = x_test_avg
        testLabels = lbl_test_avg

        testData = torch.from_numpy(testData).float()
        pred_y = []
        for i in range(testData.shape[0]):
            prediction = transNet(testData[i].cuda())
            pred_y.append(prediction)

        if mean_volume:
            ws = wb.worksheets[0]
            printResults(ws, column, 1,
                         'Accuracy Huge classes (prototype mean)', pred_y,
                         testLabels, 5, tipo)

        testData = x_imag
        testLabels = lbl_imag

        testData = torch.from_numpy(testData).float()
        pred_y = []
        for i in range(testData.shape[0]):
            prediction = transNet(testData[i].cuda())
            pred_y.append(prediction)

        print('Accuracy with all imag data')

        if single_volume:
            ws = wb.worksheets[0]
            printResults(ws, column, 7,
                         'Accuracy Huge classes (prototype mean)', pred_y,
                         testLabels, 5, tipo)

        print('')
        print('Accuracy with average imag data')
        testData = x_imag_avg
        testLabels = lbl_imag_avg

        testData = torch.from_numpy(testData).float()
        pred_y = []
        for i in range(testData.shape[0]):
            prediction = transNet(testData[i].cuda())
            pred_y.append(prediction)

        if mean_volume:
            ws = wb.worksheets[0]
            printResults(ws, column, 7,
                         'Accuracy Huge classes (prototype mean)', pred_y,
                         testLabels, 5, tipo)

    timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
    if tipo == 1:
        timestamp = '4096_' + timestamp
    else:
        timestamp = '25088_' + timestamp

    filename = os.path.join(resultDir,
                            'resultados_huge_' + timestamp + '.xlsx')
    wb.save(filename=filename)

    # opening and saving excel files as openpyxl does not calculate formulae.
    # Need excel installed
    openExcelFile(filename)

    return filename
def plot_psds_topomap(
        psds, freqs, pos, agg_fun=None, vmin=None, vmax=None, bands=None,
        cmap=None, dB=True, normalize=False, cbar_fmt='%0.3f', outlines='head',
        axes=None, show=True, sphere=None):
    """Plot spatial maps of PSDs.
    Parameters
    ----------
    psds : np.ndarray of float, shape (n_channels, n_freqs)
        Power spectral densities
    freqs : np.ndarray of float, shape (n_freqs)
        Frequencies used to compute psds.
    pos : numpy.ndarray of float, shape (n_sensors, 2)
        The positions of the sensors.
    agg_fun : callable
        The function used to aggregate over frequencies.
        Defaults to np.sum. if normalize is True, else np.mean.
    vmin : float | callable | None
        The value specifying the lower bound of the color range.
        If None np.min(data) is used. If callable, the output equals
        vmin(data).
    vmax : float | callable | None
        The value specifying the upper bound of the color range.
        If None, the maximum absolute value is used. If callable, the output
        equals vmax(data). Defaults to None.
    bands : list of tuple | None
        The lower and upper frequency and the name for that band. If None,
        (default) expands to:
            bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
                     (12, 30, 'Beta'), (30, 45, 'Gamma')]
    cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
        Colormap to use. If tuple, the first value indicates the colormap to
        use and the second value is a boolean defining interactivity. In
        interactive mode the colors are adjustable by clicking and dragging the
        colorbar with left and right mouse button. Left mouse button moves the
        scale up and down and right mouse button adjusts the range. Hitting
        space bar resets the range. Up and down arrows can be used to change
        the colormap. If None (default), 'Reds' is used for all positive data,
        otherwise defaults to 'RdBu_r'. If 'interactive', translates to
        (None, True).
    dB : bool
        If True, transform data to decibels (with ``10 * np.log10(data)``)
        following the application of `agg_fun`. Only valid if normalize is
        False.
    normalize : bool
        If True, each band will be divided by the total power. Defaults to
        False.
    cbar_fmt : str
        The colorbar format. Defaults to '%%0.3f'.
    %(topomap_outlines)s
    axes : list of axes | None
        List of axes to plot consecutive topographies to. If None the axes
        will be created automatically. Defaults to None.
    show : bool
        Show figure if True.
    %(topomap_sphere)s
    Returns
    -------
    fig : instance of matplotlib.figure.Figure
        Figure distributing one image per channel across sensor topography.
    """
    import matplotlib.pyplot as plt

    if bands is None:
        bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
                 (12, 30, 'Beta'), (30, 45, 'Gamma')]

    if agg_fun is None:
        agg_fun = np.sum if normalize is True else np.mean

    if normalize is True:
        psds /= psds.sum(axis=-1)[..., None]
        assert np.allclose(psds.sum(axis=-1), 1.
    
    n_axes = len(bands)
    if axes is not None:
        fig = axes[0].figure
    else:
        fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
        if n_axes == 1:
            axes = [axes]

    for ax, (fmin, fmax, title) in zip(axes, bands):
        try:
            freq_mask = (fmin < freqs) & (freqs < fmax)
            print(freq_mask)
            if freq_mask.sum() == 0:
                raise RuntimeError('No frequencies in band "%s" (%s, %s)'% (title, fmin, fmax))
            data = agg_fun(psds[:, 1], axis=1)
            #print(psds)
            #data = agg_fun(psds, axis=1)
            if dB is True and normalize is False:
                data = 10 * np.log10(data)
                unit = 'dB'
            else:
                unit = 'power'
        except:
            print('fail')

    fig.canvas.draw()
    plt_show(show)
    return fig

def plt_show(show=True, fig=None, **kwargs):
    """Show a figure while suppressing warnings.
    Parameters
    ----------
    show : bool
        Show the figure.
    fig : instance of Figure | None
        If non-None, use fig.show().
    **kwargs : dict
        Extra arguments for :func:`matplotlib.pyplot.show`.
    """
    from matplotlib import get_backend
    import matplotlib.pyplot as plt
    if show and get_backend() != 'agg':
        (fig or plt).show(**kwargs)
############################

def is_MDD(filename):
	val = [1,2,50,99]
	subject = filename[:3]
	for fields in MDD_list:
		if subject == fields[0] and fields[1]!='50':
			return np.eye(len(val))[val.index(int(fields[1]))] # possible values are 1,2,50, 99
	return [0,0,0,0]

def len_check(sliced_channel):
	if len(sliced_channel) < LENGTH:
		while(sliced_channel != LENGTH):
			sliced_channel.append(0)
	return sliced_channel

def balance():
	classes = [0, 0, 0, 0] # [1, 2, 50 , 90]
	for _, one_hot in band_amps:
		classes[np.argmax(one_hot)] += 1
	min_class = min([classes[0], classes[-1]])
	classes = [0, 0, 0, 0] # [1, 2, 50 , 90]
	balanced = []

	for data in band_amps:
		if classes[np.argmax(data[1])] < min_class:
			balanced.append(data)
			classes[np.argmax(data[1])] += 1
	return balanced

LENGTH = 15000
ELECTRODES = 64
MAX_Hz = 60
PATH = "C:\OneDrive - Cumberland Valley School District\EEG ScienceFair\database\depression\Matlab Files"
MDD_list = np.genfromtxt("C:\OneDrive - Cumberland Valley School District\EEG ScienceFair\database\depression\Data_4_Import_REST.csv", delimiter=',', dtype=str)
band_amps = []

for filename in tqdm(os.listdir(PATH)):
	class_ = is_MDD(filename)
	if max(class_) == 0:
		continue
	file_path = f"{PATH}\{filename}"
	dl = data_handler(file_path)		# Add inheritance between data_handler & channel_bands_amp
	channel_data = dl.get_EEG()[:ELECTRODES]
	channel_locations = dl.get_channel_locations()
	PSDS = []
	sliced_channels = []
	POS = []
	for channel, name, pos in zip(channel_data, dl.get_channel_names(), channel_locations):
		fft = fast_fourier_transform(channel)
		FFT = fft.FFT(MAX_Hz)
		PSDS.append([periodogram(FFT, 500)[0]])
		POS.append([pos[0], pos[1]])

	plot_psds_topomap(np.array(PSDS), 500, np.array(POS))
	break
Пример #11
0
pg.setConfigOption('background', (33, 33, 33))
pg.setConfigOption('foreground', (197, 198, 199))
# Interface variables
app = QtGui.QApplication([])
view = pg.GraphicsView()
Layout = pg.GraphicsLayout()
view.setCentralItem(Layout)
view.show()
view.setWindowTitle('Flight monitoring')
view.resize(1200, 700)

# declare object for serial Communication
ser = Communication()
# declare object for storage in CSV
data_base = data_handler()
# Fonts for text items
font = QtGui.QFont()
font.setPixelSize(90)

# Title at top
text = """
Flight monitoring interface.
"""
Layout.addLabel(text, col=1, colspan=21)
Layout.nextRow()

# Put vertical label on left side
Layout.addLabel('LIDER - ATL research hotbed', angle=-90, rowspan=3)

Layout.nextRow()
Пример #12
0
import numpy as numpy
import os
import data_handler as dh

if __name__ == "__main__":
    data_dir = "data_clean/data_2018-03-25-16-15-56.863776"
    data_files = os.listdir(data_dir)
    data_files = [os.path.join(data_dir, d) for d in data_files if ".csv" in d]

    dh = dh.data_handler(data_files, [.7, .15, .15])

    for i in range(100000000):
        train = dh.get_train_batch(32, 300)
			classes[np.argmax(data[1])] += 1
	return balanced

LENGTH = 15000
ELECTRODES = 64
MAX_Hz = 60
PATH = "C:\OneDrive - Cumberland Valley School District\EEG ScienceFair\database\depression\Matlab Files"
MDD_list = np.genfromtxt("C:\OneDrive - Cumberland Valley School District\EEG ScienceFair\database\depression\Data_4_Import_REST.csv", delimiter=',', dtype=str)
band_amps = []

for filename in tqdm(os.listdir(PATH)):
	class_ = is_MDD(filename)
	if max(class_) == 0:
		continue
	file_path = f"{PATH}\{filename}"
	dl = data_handler(file_path)		# Add inheritance between data_handler & channel_bands_amp
	channel_data = dl.get_EEG()[:ELECTRODES]
	channel_locations = dl.get_channel_locations()

	sliced_channels = []
	for channel in channel_data:
		fft = fast_fourier_transform(channel, MAX_Hz)
		sliced_channels.append(fft.get_bands()['Delta'])
	#break

    #print(sliced_channels)


	x = channel_locations[0][:ELECTRODES]
	y = channel_locations[1][:ELECTRODES]
	
Пример #14
0
sfreq = 500
LEN_SEC = 4
BUFF_SEC = 0.5

LENGTH = int(LEN_SEC / (1 / sfreq))
BUFFER_ZONE = int(BUFF_SEC / (1 / sfreq))
PATH = "C:\OneDrive - Cumberland Valley School District\EEG ScienceFair\database\ADHD\ADH0"

dir = []
for fn in os.listdir(PATH):
    if fn[-3:] == 'mat' and fn != 'chan.mat':
        dir.append(fn)
    else:
        continue

BATCH = []

for filename in tqdm(dir):
    dl = data_handler(f"{PATH}\{filename}", data_name=filename[:-4])
    fake_data = np.array([i for i in range(300 * 56 * 5000)]).reshape(
        (300, 56, 5000))
    print(fake_data.shape)
    for trial in tqdm(fake_data):
        data = np.transpose(trial)  # shape: (5000, 56)
        for i in range(BUFFER_ZONE,
                       len(data) - BUFFER_ZONE + LENGTH,
                       LENGTH):  # iterates by len
            batcher([np.transpose(data[i:i + LENGTH]), [CLASS_VALUE]
                     ])  # before shape (len, 56) : after shape (56, len)
    break
Пример #15
0
 def __init__(self, file_path):
     self.names = dict()
     dl = data_handler(file_path)
     list_names = dl.get_channel_names()
     for name in list_names[:64]:
         self.names[name] = None