Beispiel #1
0
 def task():  # 实现输出正在加载字样
     print(flag)
     global root
     # 代码
     root.after(1000, task)  # 1000是循环间隔,单位毫秒
     if (flag == 1):
         get_data.get()
         NextMenu()
Beispiel #2
0
def main_loop():
      
    gpio_scl=Pin(14)
    gpio_sda=Pin(12)
    i2c=I2C(scl=gpio_scl,sda=gpio_sda,freq=400000)
    print(i2c.scan())
    i2c.writeto_mem(87,1,b'\xf0')
    i2c.writeto_mem(87,3,b'\x02')
    i2c.writeto_mem(87,33,b'\x01')
    i2c.writeto_mem(87,10,b'\x43')
    i2c.writeto_mem(87,12,b'\xff')
    i2c.writeto_mem(87,13,b'\xff')
    i2c.writeto_mem(87,9,b'\x0b')
    i2c.writeto_mem(87,8,b'\x0f')  
    i2c2 = I2C(scl=Pin(5), sda=Pin(4))  
    oled = SSD1306_I2C(128, 64, i2c2)
    oled.fill(1)
    oled.show()
    oled.fill(0)
    oled.show() 
    while True:
      oo2=[]
      IRdata1=[]
      Rdata1=[]
      l=0
      while(l<1):
        if(i2c.readfrom_mem(87,4,1)==b'\x1f'):
          oo2=i2c.readfrom_mem(87,7,192)
          for i in range(1,33):        
            IRdata1.append(oo2[6*(i-1)+0]*65536+oo2[6*(i-1)+1]*256+oo2[6*(i-1)+2])
            Rdata1.append(oo2[6*(i-1)+3]*65536+oo2[6*(i-1)+4]*256+oo2[6*(i-1)+5])  
          l=l+1
          oled.text('SPO2&HR detector', 0, 0)
          oled.text('Place ur finger', 4, 20)
          oled.show()
      if Rdata1[10]>=3500: 
        oled.fill(0)
        oled.show()
        oled.text('Hold on for 8s', 4, 20)
        oled.show()
        time.sleep(1)
        oled.fill(0)
        oled.show()
        HR,PO=get(oled)
        if HR>20:
          oled.fill(0)
          oled.show()
          oled.text('Uploading data', 4, 20)
          oled.show()
          upload(HR,PO)
          oled.fill(0)
          oled.show()
          oled.text('successful!', 4, 20)
          oled.show()
          time.sleep(1)
          oled.fill(0)
          oled.show()
Beispiel #3
0
def thread_function(input_address, output_address):
    global done
    global is_simulating
    max_number_of_tasks = 50_000_000  # maximum number of tasks
    number_of_warm_up_task = 5_000  # after this number of tasks we begin to collect the statistics

    input_data = get_data.get(input_address)  # getting data from an input file
    task_generator = get_initial_data_generator.get_task_generator(
        input_data)  # make a task generator
    system = System(input_data)  # make a main system
    AccuracyChecker.initial(system)

    simulation.simulate(number_of_warm_up_task, max_number_of_tasks,
                        task_generator, system, StatisticalData(input_data))
    Report.print_report(output_address)
    window.close()
    exit()
Beispiel #4
0
if __name__ == '__main__':

    model = LLLNet()("train")
    model.compile(optimizer=keras.optimizers.Adam(lr=1e-4, decay=1e-5),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    filepath = "model.h5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='loss',
                                 verbose=1,
                                 mode='min',
                                 period=1)
    callbacks_list = [checkpoint]

    train_images, train_spectrograms, train_labels = get_data.get(
        './dataset', 'train_dataset.csv')

    model.fit([train_spectrograms, train_images],
              train_labels,
              epochs=5,
              callbacks=callbacks_list)
    '''

	model = keras.models.load_model('model.h5')

	test_images, test_spectrograms, test_labels = get_data.get('./dataset','test_dataset.csv')

	pred = model.predict([test_spectrograms, test_images])
	
	print(pred)
	'''
Beispiel #5
0
import sys
import get_data

if __name__ == '__main__':

    class_, weekday = get_data.get(sys.argv)

    with open(class_, 'r') as inp:

        FILE = inp.readlines()
        access_indexes = list(range(len(FILE)))

        for item in range(len(FILE)):
            try:
                if FILE[item].split()[0] == weekday:
                    access_indexes.remove(item)
            except:
                pass
    with open(class_, 'w') as ouf:
        for item in access_indexes:

            ouf.write(FILE[item])
            ouf.write('\n')
Beispiel #6
0
    plot_model(model,
               to_file="model.png",
               show_layer_names=True,
               show_shapes=True)
    model.save("model.h5")

    print("Testing...")
    print(f"Inp : {x_test}")
    print(f"pred : {model.predict(x_test)}; >> true : {y_test}")
    print(f"Result (loss, acc, mae): {model.evaluate(x_test, y_test)}")

    print("\nPredicting...")
    pred = model.predict(x_sample)
    last = x_sample[0][tr_size - 1][obv_size - 1]
    forward = last + (last * pred[0][0])
    print(f">> Inp : {x_sample};\n>> Out : {pred}")
    print(
        f"Prediction Normalized : {last}+({last}*{pred[0][0]}) => {forward} new cases);"
    )


if __name__ == "__main__":
    # is not imported
    import get_data

    obv_size = 2
    tr_size = 5
    data = get_data.get(obv_size, use_increase=True)
    train(data, 1, obv_size, tr_size)
import sklearn
import pickle
import argparse
from crf_ner import *
import get_data

parser = argparse.ArgumentParser(description='Predicting with classifier')
parser.add_argument("-i", '--input_file', type=str, default='AA/wiki_00')
parser.add_argument("-o", "--output_file", type=str, default='output.txt')

args = parser.parse_args()

# getting sentences from 20 .pkl files
X = get_data.get(20)

# extracting features
train_sents, test_sents = train_test_split(X, test_size=0.33, random_state=42)
X_train = [sent2features(s) for s in train_sents]
y_train = [sent2labels(s) for s in train_sents]
X_test = [sent2features(s) for s in test_sents]
y_test = [sent2labels(s) for s in test_sents]

# loading classifier
with open("classifier.pkl", "rb") as f:
    clf = pickle.load(f)

labels = list(clf.classes_)
labels.remove('O')
#print("Labels: ", labels)

# prediction
import create_orgs
import create_persons
import create_users
import get_data
import json

with open('config.json', 'r') as f:
    config = json.load(f)

# Parse the CSV data:
data = get_data.get(config=config)

# Combine uni, faculty and dept data to send to org creation function:
org_data = {**data["areas"], **data["depts"]}

# Create org data:
create_orgs.create(config=config, data=org_data)

# Create person data:
create_persons.create(config=config, data=data["persons"])

# Create user data:
create_users.create(config=config, data=data["persons"])
Beispiel #9
0
import get_data
import get_initial_data_generator
import simulation
from SystemClass import System
from StatisticalData import StatisticalData
import Report
import AccuracyChecker

max_number_of_tasks = 50_000_000  # maximum number of tasks
number_of_warm_up_task = 5_000  # after this number of tasks we begin to collect the statistics

input_data = get_data.get(
    input("input file address: "))  # getting data from an input file
task_generator = get_initial_data_generator.get_task_generator(
    input_data)  # make a task generator
system = System(input_data)  # make a main system
AccuracyChecker.initial(system)

simulation_result = simulation.simulate(number_of_warm_up_task,
                                        max_number_of_tasks, task_generator,
                                        system, StatisticalData(input_data))

Report.print_report(input("output file address: "))
def main():
    ## Import packages and functions
    import os, argparse, time
    import numpy as np
    import matplotlib.pyplot as plt
    import get_data, get_model, snr_acc, overall_acc
    import torch
    from torch import nn
    from torch import optim

    # Handle input arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        type=str,
                        required=False,
                        default='RML2016')
    parser.add_argument('--arch', type=str, required=True)
    parser.add_argument('--train_pct', type=int, required=False, default=50)
    parser.add_argument('--load_weights', type=int, required=False, default=0)
    parser.add_argument('--trial', type=int, required=False, default=0)
    parser.add_argument('--epochs', type=int, required=False, default=100)
    args = parser.parse_args()

    # Extract the data
    data_set = get_data.get(args.dataset, args.train_pct / 100, BATCH_SIZE=256)
    train_dataloader = data_set['train_dataloader']
    val_dataloader = data_set['val_dataloader']

    # If loading weights
    load_weights = args.load_weights

    # Specify file tag to ID the results from this run
    tag = args.dataset+'_train_-20_18_test_-20_18'+'_arch_'+args.arch+\
                         '_trial_'+str(args.trial)

    # Setup directories to organize results if training
    if args.load_weights == 0:
        sub_folders = ['Figures', 'Computed_Values']
        for i in range(len(sub_folders)):
            path = os.path.join(os.getcwd(), tag + '/' + sub_folders[i])
            os.makedirs(path, exist_ok=True)

    # Get the model
    model = get_model.get(args.arch)
    model_path = os.path.join(os.getcwd(), './Weights/' + tag +
                              '.pth')  # Where to save weights

    # Hardware Check, Loss Function, and Optimizer
    criterion = nn.CrossEntropyLoss()
    if args.arch == 'Complex':  # to match the settings of Krzyston et al., 2020
        optimizer = optim.Adam(params=model.parameters(),
                               lr=0.001,
                               betas=(0.9, 0.999),
                               eps=1e-07)
    else:
        optimizer = optim.SGD(params=model.parameters(), lr=0.1, momentum=0.9)
    if torch.cuda.is_available():
        print('CUDA is available')
        use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")
    torch.backends.cudnn.benchmark = False
    model = model.to(device)
    criterion = criterion.to(device)

    # Train model or load saved model
    if args.load_weights == 1:
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['state_dict'])
        for parameter in model.parameters():
            parameter.requires_grad = False
        model.to(device)
        model.eval()
        print('Model Loaded')
    else:  # Train the model

        # Setup early stopping
        patience_counter = 0
        patience = 5

        # Setup Training
        epochs = args.epochs
        train_losses = []
        valid_losses = []
        val_best = np.Inf
        best_ep = 0

        # Train
        start_all = time.time()
        for e in range(args.epochs):
            start_ep = time.time()
            running_loss = 0
            rl = 0
            model.train()
            for data, labels in train_dataloader:
                data = data.to(device)
                labels = labels.to(device)
                optimizer.zero_grad()
                output = model(data)
                loss = criterion(output, labels)
                loss.backward()
                optimizer.step()
                running_loss += loss.item()

            with torch.no_grad():
                for dv, lv in val_dataloader:
                    dv = dv.to(device)
                    lv = lv.to(device)
                    model.eval()
                    op = model(dv)
                    ll = criterion(op, lv)
                    rl += ll.item()

            train_loss = running_loss / len(train_dataloader)
            val_loss = rl / len(val_dataloader)
            train_losses.append(train_loss)
            valid_losses.append(val_loss)

            if val_loss < val_best:
                val_best = val_loss
                checkpoint = {'state_dict': model.state_dict()}
                torch.save(checkpoint, model_path)
                best_ep = e
                patience_counter = 0
            else:  #early stopping
                patience_counter += 1
                if patience_counter == patience - 1:
                    end_ep = time.time()
                    print('Epoch: ' + str(e))
                    print(' - ' + str(round(end_ep-start_ep,3)) + 's - train_loss: '+\
                          str(round(running_loss/len(train_dataloader),4))+' - val_loss: '\
                          +str(round(rl/len(val_dataloader),4)))
                    break

            end_ep = time.time()
            print('Epoch: ' + str(e))
            print(' - ' + str(round(end_ep-start_ep,3)) + 's - train_loss: '+\
              str(round(running_loss/len(train_dataloader),4))+' - val_loss: '\
              +str(round(rl/len(val_dataloader),4)))
        end_all = time.time()
        print('Total training time = ' +
              str(round((end_all - start_all) / 60, 3)) + ' minutes')

        # PLot training and validation losses
        plt.plot(train_losses, label='Train')
        plt.plot(valid_losses, label='Valid')
        plt.legend()
        plt.xlabel('Epoch #')
        plt.ylabel('Loss')
        plt.savefig(
            os.path.join(os.getcwd(), tag + '/Figures/Train_Valid_Losses.png'))

        # Load best performing weights for inference
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['state_dict'])
        for parameter in model.parameters():
            parameter.requires_grad = False
        model.to(device)
        model.eval()
        print('Weights Loaded for Inference')

    # Overall accuracy and speed test
    overall_acc.eval(data_set['test_snrs'], model, tag, data_set['mods'],
                     data_set['snr_dataloader'])

    # Accuracy by SNR
    snr_acc.eval(data_set['test_snrs'], model, tag, data_set['mods'],
                 data_set['snr_dataloader'])