Beispiel #1
0
def main(_):
    # Load MNIST data
    mnist = load_mnist()
    pre_training = FLAGS.pre_train

    # Define the deep learning model
    if FLAGS.model == 'Base':
        pre_training = False
        kernlen = int(FLAGS.frame_size / 2)
        net = Baseline(directory=FLAGS.dir,
                       optimizer=FLAGS.optimizer,
                       learning_rate=FLAGS.learning_rate,
                       layer_sizes=FLAGS.arch,
                       num_features=FLAGS.num_features,
                       num_filters=FLAGS.num_filters,
                       frame_size=FLAGS.frame_size)
    if FLAGS.model == 'Cat':
        kernlen = int(FLAGS.frame_size / 2)
        net = Cat_Net(layer_sizes=FLAGS.arch,
                      optimizer=FLAGS.optimizer,
                      num_filters=FLAGS.num_filters,
                      num_features=FLAGS.num_features,
                      num_samples=FLAGS.num_samples,
                      frame_size=FLAGS.frame_size,
                      num_cat=FLAGS.num_cat,
                      learning_rate=FLAGS.learning_rate,
                      feedback_distance=FLAGS.feedback_distance,
                      directory=FLAGS.dir)
    elif FLAGS.model == 'Gumbel':
        kernlen = int(FLAGS.frame_size / 2)
        net = Gumbel_Net(layer_sizes=FLAGS.arch,
                         optimizer=FLAGS.optimizer,
                         num_filters=FLAGS.num_filters,
                         num_features=FLAGS.num_features,
                         frame_size=FLAGS.frame_size,
                         num_cat=FLAGS.num_cat,
                         learning_rate=FLAGS.learning_rate,
                         feedback_distance=FLAGS.feedback_distance,
                         directory=FLAGS.dir,
                         second_conv=FLAGS.second_conv,
                         initial_tau=FLAGS.initial_tau,
                         tau_decay=FLAGS.tau_decay,
                         reg=FLAGS.reg)
    elif FLAGS.model == 'RawG':
        pre_training = False
        kernlen = 60
        net = Raw_Gumbel_Net(layer_sizes=FLAGS.arch,
                             optimizer=FLAGS.optimizer,
                             num_filters=FLAGS.num_filters,
                             num_features=FLAGS.frame_size**2,
                             frame_size=FLAGS.frame_size,
                             num_cat=FLAGS.num_cat,
                             learning_rate=FLAGS.learning_rate,
                             feedback_distance=FLAGS.feedback_distance,
                             directory=FLAGS.dir,
                             second_conv=FLAGS.second_conv,
                             initial_tau=FLAGS.initial_tau,
                             meta=None)
    elif FLAGS.model == 'RL':
        kernlen = int(FLAGS.frame_size / 2)
        net = Bernoulli_Net(layer_sizes=FLAGS.arch,
                            optimizer=FLAGS.optimizer,
                            num_filters=FLAGS.num_filters,
                            num_features=FLAGS.num_features,
                            num_samples=FLAGS.num_samples,
                            frame_size=FLAGS.frame_size,
                            learning_rate=FLAGS.learning_rate,
                            feedback_distance=FLAGS.feedback_distance,
                            directory=FLAGS.dir,
                            second_conv=FLAGS.second_conv)
    elif FLAGS.model == 'RawB':
        pre_training = True
        kernlen = 60
        net = Raw_Bernoulli_Net(layer_sizes=FLAGS.arch,
                                optimizer=FLAGS.optimizer,
                                num_filters=FLAGS.num_filters,
                                num_features=FLAGS.frame_size**2,
                                num_samples=FLAGS.num_samples,
                                frame_size=FLAGS.frame_size,
                                learning_rate=FLAGS.learning_rate,
                                feedback_distance=FLAGS.feedback_distance,
                                directory=FLAGS.dir,
                                second_conv=FLAGS.second_conv)

    X_train, train_coords = convertCluttered(
        mnist.train.images,
        finalImgSize=FLAGS.frame_size,
        number_patches=FLAGS.number_patches)
    y_train = mnist.train.labels

    train_coords = np.array(
        [gkern(coord[0], coord[1], kernlen=kernlen) for coord in train_coords])

    X_test, test_coords = convertCluttered(mnist.test.images,
                                           finalImgSize=FLAGS.frame_size,
                                           number_patches=FLAGS.number_patches)
    # test_coords = np.array([gkern(coord[0], coord[1], kernlen=20) for coord in test_coords])
    y_test = mnist.test.labels

    batch_size = FLAGS.batch_size
    if pre_training:
        print("Pre-training")
        for epoch in tqdm(range(FLAGS.epochs)):
            _x, _y = input_fn(X_test, y_test, batch_size=batch_size)
            net.evaluate(_x, _y, pre_trainining=True)
            X_train, train_coords = convertCluttered(
                mnist.train.images,
                finalImgSize=FLAGS.frame_size,
                number_patches=FLAGS.number_patches)
            y_train = mnist.train.labels
            # print(net.confusion_matrix(_x, _y))
            net.save()
            X_train, y_train, train_coords = shuffle_in_unison(
                X_train, y_train, train_coords)
            for i in range(0, len(X_train), batch_size):
                _x, _y = input_fn(X_train[i:i + batch_size],
                                  y_train[i:i + batch_size],
                                  batch_size=batch_size)
                net.pre_train(_x, _y, dropout=0.8)

    print("Training")
    for epoch in tqdm(range(FLAGS.epochs)):
        X_train, y_train, train_coords = shuffle_in_unison(
            X_train, y_train, train_coords)
        _x, _y = input_fn(X_test, y_test, batch_size=batch_size)
        net.evaluate(_x, _y)
        X_train, train_coords = convertCluttered(
            mnist.train.images,
            finalImgSize=FLAGS.frame_size,
            number_patches=FLAGS.number_patches)
        y_train = mnist.train.labels
        # print(net.confusion_matrix(_x, _y))
        net.save()
        for i in range(0, len(X_train), batch_size):
            _x, _y = X_train[i:i + batch_size], y_train[i:i + batch_size]
            net.train(_x, _y, dropout=FLAGS.dropout)

    if FLAGS.model == 'RL' or FLAGS.model == 'Gumbel' or FLAGS.model == 'Cat' or FLAGS.model == 'RawB' or FLAGS.model == 'RawG':
        print("Feedback Training")
        for epoch in tqdm(range(FLAGS.epochs)):
            _x, _y = input_fn(X_test, y_test, batch_size=batch_size)
            net.evaluate(_x, _y)
            X_train, train_coords = convertCluttered(
                mnist.train.images,
                finalImgSize=FLAGS.frame_size,
                number_patches=FLAGS.number_patches)
            y_train = mnist.train.labels
            train_coords = np.array([
                gkern(coord[0], coord[1], kernlen=kernlen)
                for coord in train_coords
            ])
            # print(net.confusion_matrix(_x, _y))
            net.save()
            X_train, y_train, train_coords = shuffle_in_unison(
                X_train, y_train, train_coords)
            for i in range(0, len(X_train), batch_size):
                _x, _y, _train_coords = input_fn(X_train,
                                                 y_train,
                                                 train_coords,
                                                 batch_size=batch_size)
                net.feedback_train(_x,
                                   _y,
                                   _train_coords,
                                   dropout=FLAGS.dropout)
Beispiel #2
0
def getDayBaseline(meter, channel, day, data_type):
    day = day.date()
    try:
        baseline = Baseline.objects.get(date=day, 
                                        sensor=meter,
                                        channel=channel)
        created = False
    except Baseline.DoesNotExist:
        baseline = Baseline(date=day, 
                            sensor=meter,
                            channel=channel,
                            value=0.0)
        created = True
    
    logger.debug('getDayBaseline')
    #powerFactor = 60 * 60.0 / channel.reading_frequency
    
    valid = False
    if not created:
        lastModifiedDay = baseline.last_modified.date()
        if day == date.today():
            if (datetime.now() - baseline.last_modified) < timedelta(hours=1):
                # TODO: check me!
                valid = True
        else: # day is not today
            if lastModifiedDay > day: 
                valid = True
    
    logger.debug('valid: ' + str(valid))
    
    if valid:
        return baseline.value
    else:
        # filter all energy data from the specific reading meter and specific period (1 day)
        filter_energy_objects = SensorReading.objects.filter(
                             sensor=meter, channel=channel).filter(
                             timestamp__gte=day).filter(
                             timestamp__lt=(day+timedelta(days=1)) )

        logger.debug('filter_energy_objects.count(): ' + 
                     str(filter_energy_objects.count()))
        
        if filter_energy_objects.count() > 0:
            energy = [x.value for x in filter_energy_objects]
            
            # hard-coded subset size for moving average calculation
            window_size = ALWAYS_ON_WINDOW_SIZE
            
            mav = moving_average(energy, window_size)
            import numpy as np
            # calculate the moving average using a rectangular window
            window = (np.zeros(int(window_size)) + 1.0) / window_size
            mav = np.convolve(energy, window, 'valid')

            try:
                min_baseline = min( mav )
            except ValueError:
                min_baseline = 0 
        else:
            min_baseline = 0
        
        baseline.value = min_baseline
        try:
            baseline.save()
        except IntegrityError:
            b2 = Baseline.objects.get(date=day, 
                                            sensor=meter,
                                            channel=channel)
            b2.value = min_baseline
            b2.save()
        
        return min_baseline