Esempio n. 1
0
 def __init__(self, fps, timeout):
     self.fps = fps
     self.controller = Leap.Controller()
     self.timeout = timeout
     self.data = m.Sequence()
     self.raw_data = m.Sequence()
     self.frames = []
     self.timeout_start = None
     self.previous_palm_position = None
     self.done_reading = False
Esempio n. 2
0
    def read_data_from_leap(self):

        self.done_reading = False
        self.data = m.Sequence()
        self.raw_data = m.Sequence()
        self.frames = []
        while len(self.frames) < self.fps * self.timeout:
            self.read()
            time.sleep(1.0 / self.fps)

        for frame in self.frames:
            self.compute_frame(frame)
        self.done_reading = True

        return self.data, self.raw_data
Esempio n. 3
0
 def SEQUENCE(self, ast):
     if not ast.terms:
         return model.Empty()
     elif len(ast.terms) < 2:
         return ast.terms[0]
     else:
         return model.Sequence(ast.terms)
Esempio n. 4
0
    def makeNewSeq(self):
        seq, ok = QtGui.QInputDialog.getText(self, "QInputDialog.getText()",
                "Sequence name:", QtGui.QLineEdit.Normal,"")
        if ok and len(str(seq)) == 2:
            #self.textLabel.setText(text)
            print 
            newseq = model.Sequence(Name= unicode(string.upper(str(seq))))
            self.ui.SequComboBox.addItem(string.upper(str(seq)))
	    self.curProject.Seqs.append(newseq)
            model.saveData()
Esempio n. 5
0
    def load_file(self, file_name):
        classified_sequence_list = []
        with open('../data/' + file_name + '.json') as f:
            data = json.load(f)
            for classified_sequence in data:
                gesture_code = classified_sequence['gesture']
                seq = classified_sequence['sequence']
                sequence = m.Sequence()

                gesture = m.Gesture.gesture_from_code(gesture_code)

                for vector in seq:
                    sequence.add_data(m.DiscretizedHandModel(m.HandModel(None, None, vector)))

                classified_sequence_list.append(m.ClassifiedSequence(sequence, gesture))

        return classified_sequence_list
Esempio n. 6
0
    def load_raw_data(self, file_name):
        classified_sequence_list = []
        with open("../data/raw/" + file_name + ".data", "rb") as data_file:
            for j in range(0, 16):
                frame_list = []
                for i in range(0, 60):
                    next_block_size = data_file.read(4)
                    size = struct.unpack('i', next_block_size)[0]
                    data = data_file.read(size)
                    leap_byte_array = Leap.byte_array(size)
                    address = leap_byte_array.cast().__long__()
                    ctypes.memmove(address, data, size)

                    frame = Leap.Frame()
                    frame.deserialize((leap_byte_array, size))
                    frame_list.append(frame)
                code = struct.unpack('i', data_file.read(4))[0]
                classified_sequence_list.append(
                    m.ClassifiedSequence(m.Sequence().load_data(frame_list), m.Gesture.gesture_from_code(code)))
        return classified_sequence_list
Esempio n. 7
0
conn = sqlite3.connect(args.file_db)

# Création de la fabrique
factory = FactoryRDF.FactoryRDF()
factory.open(True)

#region Sequence
query = open('./SQL/sequence.sql', 'r').read()
cursor = conn.cursor()
cursor.execute(query)

records = cursor.fetchall()

for row in records:
    factory.addSequence(model.Sequence(row[0], row[2], row[3], row[1], row[4]))

cursor.close()
#endregion

#region Drug Delivery
query = open('./SQL/drug_delivery.sql', 'r').read()
cursor = conn.cursor()
cursor.execute(query)

records = cursor.fetchall()

for row in records:
    factory.addEventDrugDelivery(
        model.Event(str(row[0]), model.Pharmacy(str(row[5])),
                    model.DrugDelevery(str(row[3]), row[4]), row[1], row[2]))
Esempio n. 8
0
def main():
    matplotlib.use('Agg')
    parser = argparse.ArgumentParser(description='SMFS Event Detect')
    parser.add_argument('--datafolder',
                        default="./input",
                        type=str,
                        help='folder for data (.np) files')
    parser.add_argument('--modelfile',
                        default="./report/model-latest.pt",
                        type=str,
                        help='folder for model (.pt) files')
    parser.add_argument('--cuda',
                        metavar='1 or 0',
                        default=0 if torch.cuda.is_available() else 0,
                        type=int,
                        help='use cuda')
    parser.add_argument(
        '--train',
        default=1,
        type=int,
        help='set 1 to train the model, set 0 to test the trained model')
    parser.add_argument('--predict_size',
                        default=300,
                        type=int,
                        help='predict_size for predicting')
    parser.add_argument('--minibatches_per_step',
                        default=10,
                        type=int,
                        help='minibatches_per_step for training')
    parser.add_argument('--minibatch_size',
                        default=300,
                        type=int,
                        help='minibatch_size for training')
    parser.add_argument('--epoch',
                        default=30,
                        type=int,
                        help='epochs for training')
    parser.add_argument('--learning_rate',
                        default=0.001,
                        type=float,
                        help='learning_rate for training')
    parser.add_argument('--data_split',
                        default="0,1.0",
                        type=str,
                        help='data_split for truncating dataset')
    parser.add_argument('--data_kept',
                        default="0,0",
                        type=str,
                        help='data_kept for truncating dataset')
    parser.add_argument(
        '--source_scale',
        default="400, 50",
        type=str,
        help=
        'source_scale in nm and pN for transforming input signals in the dataset'
    )
    parser.add_argument(
        '--source_bias',
        default="-1.6, -1.5",
        type=str,
        help=
        'source_bias after applying source_scale for transforming input signals in the dataset'
    )
    parser.add_argument(
        '--downsampling',
        default=1,
        type=int,
        help='perform downsampling using averaging filter on input data')
    parser.add_argument(
        '--noiselevel',
        default="0,0",
        type=str,
        help='add extra Gaussian noise (level in nm and pN) into input dataset'
    )
    parser.add_argument('--report',
                        default="./report/",
                        type=str,
                        help='folder for saving repots')
    parser.add_argument('--report_note',
                        default="train",
                        type=str,
                        help='add prefix to each report file')
    args = parser.parse_args()

    device = torch.device('cuda' if args.cuda else 'cpu')
    print(device)

    data_split = [float(item) for item in args.data_split.split(',')]
    source_scale = [float(item) for item in args.source_scale.split(',')]
    source_bias = [float(item) for item in args.source_bias.split(',')]
    data_kept = [int(item) for item in args.data_kept.split(',')]
    if args.cuda == 0:
        print("WARNING: run in debugging mode")
        args.epoch = 0

    if not args.train:
        args.report_note = "test"
        args.epoch = 1
    args.report = args.report
    noiselevel = [float(item) for item in args.noiselevel.split(',')]
    print("using noise level " + str(noiselevel))

    spLoader = SpectrumLoader(datafolder=args.datafolder,
                              recpetive_field=model.receptive_field,
                              filename_suffix="",
                              AWGN=noiselevel,
                              downsampling=args.downsampling,
                              source_scale=source_scale,
                              source_bias=source_bias,
                              data_split=data_split,
                              data_kept=data_kept)
    print("DataSet Size: %d" % spLoader.size)
    total_steps_per_epoch = (
        spLoader.size + args.minibatches_per_step) // args.minibatches_per_step
    print("total_steps_per_epoch: %d" % total_steps_per_epoch)

    seq_predictor = model.Sequence(2).to(device)
    if not args.train:
        seq_predictor.load_state_dict(
            torch.load(args.modelfile, map_location=device))
        seq_predictor.eval()
        print("loading trained model")
    if args.cuda == 0:
        from torchsummary import summary
        summary(seq_predictor, (2, 224))

    seq_predictor.double()
    bcecriterion = nn.BCELoss()
    msecriterion = nn.L1Loss()
    if args.train:

        optimizer = optim.Adam(seq_predictor.parameters(),
                               lr=args.learning_rate)  # , momentum=0.9
    best_metric_epoch = Metric()
    best_epoch_id = 0

    for epoch in range(args.epoch):
        metric_epoch = Metric()
        spLoader.reset(randomize=True)

        for step in range(total_steps_per_epoch):
            if args.train:
                optimizer.zero_grad()
            total_loss = 0
            total_mse_loss = 0
            for _ in range(args.minibatches_per_step):
                source_minibatch, target_regress, target_invreg, _ = spLoader.get_samples(
                    device=device, sample_per_file=args.minibatch_size)

                predict_invreg, predict_regressor = seq_predictor(
                    source_minibatch)

                lossmse = msecriterion(predict_regressor, target_regress) * 0.5
                lossmse_inv = msecriterion(predict_invreg, target_invreg) * 0.5
                enable = (abs(predict_regressor - 0.5) +
                          abs(predict_invreg - 0.5))
                with torch.no_grad():
                    metric_epoch.update(enable, target_regress,
                                        predict_regressor)
                loss = lossmse + lossmse_inv
                total_loss += float(loss.item())
                total_mse_loss += float(lossmse.item())
                if args.train:
                    loss /= args.minibatches_per_step
                    loss.backward(retain_graph=True)
            if args.train:
                optimizer.step()

                report_man.progress_bar(
                    step, total_steps_per_epoch,
                    args.report_note + " Epoch: %d | Loss: %.6f/%.6f | " %
                    (epoch, total_loss, total_mse_loss) + str(metric_epoch))

        print("FINAL: " + args.report_note + " | Epoch: %d | total metric " %
              (epoch) + str(metric_epoch))
        if metric_epoch.get_score(F1_only=True) >= best_metric_epoch.get_score(
                F1_only=True):
            best_metric_epoch = metric_epoch
            best_epoch_id = epoch

        with torch.no_grad():
            if not args.train:
                print("No checkpointing to prevent trouble.")
            else:
                if epoch % 10 == 0:
                    print("Checkpoint ... | Epoch: %d" % (epoch))
                    torch.save(
                        seq_predictor.state_dict(),
                        report_man.get_report_folder(args.report) +
                        'model%d.pt' % epoch)

    with open(
            report_man.get_report_folder(args.report) + args.report_note +
            ".log", "a") as logfile:
        import sys
        logfile.write(' '.join(sys.argv) + "\n")
        logfile.write('at {}: {}\n'.format(best_epoch_id,
                                           str(best_metric_epoch)))

    if args.train:
        torch.save(seq_predictor.state_dict(), args.modelfile)
    else:
        predict_on_single(args, spLoader, device, seq_predictor, noiselevel)

    with open(
            report_man.get_report_folder(args.report) + args.report_note +
            ".log", "r") as logfile:
        print(logfile.read())
Esempio n. 9
0
import model
import layer
import optimizers
import pickle
import util
import numpy
import matplotlib.pyplot as plt

train_set, val_set, test_set = pickle.load(open("mnist.pkl", "rb"),
                                           encoding='latin1')

model = model.Sequence()
model.add(layer.Dense(300, input_dim=28 * 28, activation="Relu"))
#model.add(layer.Dense(300, activation="Relu"))
model.add(layer.Dense(10))

train_y = util.to_categorical(train_set[1])
idx = numpy.random.choice(train_set[0].shape[0], 50000)
train_set = train_set[0][idx]
train_y = train_y[idx]

model.init()
model.fit(input_data=train_set, output_data=train_y, epoch=500, batch_num=10)
model.compile(optimizer=optimizers.SGD(model, 0.1), loss="Mean_squared_error")
model.train()

id = 0
rightnum = 0
for now in val_set[0]:
    # plt.imshow(numpy.reshape(now,(28,28)))
    # plt.show()
    for filename in file_names:
        file = open("../train/" + filename + ".txt", "r")
        raw_data[filename] = []
        for line in file:
            raw_data[filename].append(map(float, line.split()))

    file_y = open("../train/y_train.txt", "r")
    gestures = []
    for line in file_y:
        gestures.append(int(line))

    classified_sequence_list = []

    for i in range(0, 810):
        sequence = m.Sequence()
        for j in range(0, 200):
            model = []
            for filename in file_names:
                model.append(round(raw_data[filename][i][j], 3))
            sequence.add_data(model)
            classified_sequence_list.append(
                m.ClassifiedSequence(sequence,
                                     m.Gesture.gesture_from_code(gestures[i])))
    saver = fl.Saver()

    saver.add_data_to_file(classified_sequence_list, "avola_dataset_all")

    for i in range(10):
        saver.add_data_to_file(classified_sequence_list[i * 50:i * 50 + 50],
                               "2_avola_dataset" + str(i))
Esempio n. 11
0
def p_NonAnyType_sequence(p):
    """NonAnyType : sequence "<" Type ">" Null"""
    p[0] = model.Sequence(t=p[3], nullable=p[5])