Ejemplo n.º 1
0
def main():
    #_data, _label = gen_data(filename='1cycle_iv_small.txt',input_list=['v(i)'], output_list=['v(pad)'])
    _data, _label = gen_data(filename='1cycle_iv_small_100p.txt',
                             input_list=['v(i)'],
                             output_list=['i(vi)'])
    #_data, _label = gen_data(filename='1cycle_iv_small_10p.txt',input_list=['v(i)'], output_list=['i(vpad)'])
    _data = np.concatenate([_data] * 60)
    _label = np.concatenate([_label] * 60)
    global data_scale, label_scale
    data_scale = normalize(_data, axis=1)
    label_scale = normalize(_label, axis=1)

    window_size = len(_data[0])  # actually it's sequence length or num_steps
    evaluate_timeseries_with_label(_data, _label, window_size)
Ejemplo n.º 2
0
def main():

    h = 720
    w = 1280
    c = 3
    net = kerasModel.SN(h, w)
    net.build(input_shape=(None, h, w, c))
    net.load_weights(CP_Dir)
    # Make a dummy prediction to get the input shape
    for i, f in zip(range(len(v_scan_blur)), v_scan_blur):
        if (i % 10) == 9:
            dir_num = str("%03d_" % (i // 100))
            name = str("%08d" % (i % 100))
            final = np.zeros((h, w, c))
            for mode in range(8):

                test = cv2.imread(f)
                test = self_ensemble(test, mode, 1)
                test = data.normalize(test)
                test = np.expand_dims(test, axis=0)
                pred = net.predict(test, batch_size=1)
                pred = pred.squeeze(0)
                pred = data.unnormalize(pred)
                pred = self_ensemble(pred, mode, 0)
                final = final + pred
            #cv2.imwrite(save_files[a], pred)
            cv2.imwrite(
                "D:/ntire2020/Deblur/ntire-2020-deblur-mobile-master/validation2/"
                + dir_num + name + ".png", final / 8)
            print("%s" % f)
Ejemplo n.º 3
0
def train_composition(dataset, transformation_list):
    """
    Train a model on dataset on which a sequence of transformations applied
    :param dataset: the original dataset
    :param transformation_list: the sequence of transformations
    :return:
    """
    # Apply a sequence of transformations
    (X_train, Y_train), (X_test, Y_test) = load_data(dataset)
    X_train = transform(X_train, transformation_list)

    nb_examples, img_rows, img_cols, nb_channels = X_train.shape
    nb_classes = Y_train.shape[1]
    input_shape = (img_rows, img_cols, nb_channels)

    # Train a model and save
    model_name = 'model-{}-cnn-{}'.format(dataset, 'composition')
    require_preprocess = (dataset == DATA.cifar_10)

    model = models.create_model(dataset, input_shape, nb_classes)
    models.train(model, X_train, Y_train, model_name, require_preprocess)
    # save to disk
    models.save_model(model, model_name)

    # evaluate the new model
    loaded_model = models.load_model(model_name)
    X_test = transform(X_test, transformation_list)

    if require_preprocess:
        X_test = normalize(X_test)

    scores = loaded_model.evaluate(X_test, Y_test, verbose=2)
    print('*** Evaluating the new model: {}'.format(scores))
    del loaded_model
Ejemplo n.º 4
0
def do_draft(model, which):
  (trX, trY, teX) = draft(which)
  (trX, teX) = normalize(trX, teX)
  model.fit(trX, trY)                
  teY = model.predict_proba(teX)[:,1]
  print eval(teY)
  return model
Ejemplo n.º 5
0
 def process_post(self, post):
     """
     Process post received from the message queue.
     """
     # is this a post matching one or more persons?
     post_add = False
     text = data.normalize(post['text']).lower()
     self.first_person = None
     # check post language
     if data.get_text_language(text) == 'fr':
         for person in self.persons:
             names = data.get_names(person)
             if data.check_names(names, text, person['words']) == 1:
                 # one more post for this person
                 if not post_add:
                     post_add = True
                     # get next post id
                     post_id = self.db.incr('nextPostId')
                 # add post to person's posts list
                 key = 'person:%d:posts:%d' % (person['id'],
                         self.stats_last_update)
                 self.db.rpush(key, post_id)
                 # update stats for this person
                 self.update_person_stats(person)
         if post_add:
             # add post to db
             self.db.set_post(int(post_id),
                 json.dumps(post))
             # add post id to current hour
             key = 'posts:%d' % (self.stats_last_update)
             self.db.rpush(key, post_id)
     else:
         logging.debug('found english word in %s', text)
def basic_model():
    """
    TODO:
        Initialize a classifier
        Suggested classifiers:
            * SVM
            * K-nn
            * Decision Tree
            * Random Forrest
        Train a model and evaluate it
            You should use k-fold cross validation
        Return the accuracy of each fold in the scores variable
    """
    model = None
    """
        labels: a list with a subject-label for each feature-vector
        targets: the class labels
        features: the features
    """
    labels, targets, features = data.get_all("features.csv")
    features = data.normalize(features)

    scores = None

    return scores
Ejemplo n.º 7
0
def main():

    h = 360
    w = 256
    c = 3
    #val_psnr=0
    net = kerasModel2.SN(h, w)
    net.build(input_shape=(None, h, w, c))
    net.load_weights(CP_Dir)
    # Make a dummy prediction to get the input shape
    start = time.time()
    for i, f in zip(range(len(v_scan_blur)), v_scan_blur):
        #if (i % 10) == 9:
        dir_num = str("%03d_" % (i // 100))
        name = str("%08d" % (i % 100))
        test2 = cv2.imread(f)
        test2 = cv2.resize(test2, (256, 360), interpolation=cv2.INTER_CUBIC)
        test = data.normalize(test2)
        test = np.expand_dims(test, axis=0)
        pred = net.predict(test, batch_size=1)
        pred = pred.squeeze(0)
        pred = data.unnormalize(pred)
        #val_psnr = val_psnr + BB(test2, pred)
        #cv2.imwrite(save_files[a], pred)
        cv2.imwrite(
            "D:/ntire2020/Deblur/ntire-2020-deblur-mobile-master/jisu/" +
            dir_num + name + ".png", pred)
        print("%s" % f)
    print("%.4f sec took for testing" % (time.time() - start))
Ejemplo n.º 8
0
def preprocess():
    """
    Returns
    ------------
    training_generator
    test_generator
    mean for normalization
    std for normalization
    """
    dataset = data.load("../train", im_size)
    x, y = zip(*dataset)
    r = data.onehot_label(y)
    y = list(map(lambda k: r[k], y))
    x, m, s = data.normalize(x)
    (x_train, y_train), (x_test, y_test) = data.train_val_test_split(
        (x, y), prc_test=.3, random_state=42)

    training_generator = [
        RotatingGenerator(angle=i * 90,
                          image_shape=x_train[0].shape,
                          prob_transfo=.5,
                          featurewise_center=False,
                          samplewise_center=False,
                          featurewise_std_normalization=False,
                          samplewise_std_normalization=False,
                          zca_whitening=False,
                          rotation_range=80,
                          width_shift_range=.3,
                          height_shift_range=.3,
                          horizontal_flip=True,
                          vertical_flip=True,
                          zoom_range=0.5,
                          shear_range=0.5,
                          fill_mode="reflect") for i in range(4)
    ]
    test_generator = [
        RotatingGenerator(angle=i * 90,
                          image_shape=x_train[0].shape,
                          prob_transfo=0,
                          featurewise_center=False,
                          samplewise_center=False,
                          featurewise_std_normalization=False,
                          samplewise_std_normalization=False,
                          zca_whitening=False,
                          rotation_range=0,
                          width_shift_range=.0,
                          height_shift_range=.0,
                          horizontal_flip=False,
                          vertical_flip=False,
                          zoom_range=0,
                          shear_range=0,
                          fill_mode="reflect") for i in range(4)
    ]

    training_generator = multipleInputGenerator(x_train, y_train,
                                                training_generator)
    test_generator = multipleInputGenerator(x_test, y_test, test_generator)
    return training_generator, (x_train,
                                y_train), test_generator, (x_test,
                                                           y_test), m, s
Ejemplo n.º 9
0
def do_proof(model, which):
  (trX, trY, teX) = proof(which)
  (trX, teX) = normalize(trX, teX)
  model.fit(trX, trY)
  teY = model.predict_proba(teX)[:,1]
  save(teY, 'pred.csv')
  return model
Ejemplo n.º 10
0
    def process_post(self, post):
        """
        Process post received from the message queue.
        """
        # is this a post matching one or more persons?
        post_add = False
        text = data.normalize(post['text']).lower()
        self.first_person = None
        # check post language

        try:
            url = "https://translate.yandex.net/api/v1.5/tr.json/translate?key=trnsl.1.1.20160331T033639Z.8ac2657ae86c5f48.a00ba4924e8fc84e53a9521069702d599ebd3663"
            response = urllib2.urlopen(url + '&' + urllib.urlencode({'text': text.encode('ascii','ignore') }) +'&lang=tl-en')
            trans_data = json.loads(response.read())

            print 'translated: "%s"' % str(trans_data['text'][0])
            text = trans_data['text'][0]
            print('--------------------------------------------------------------------')
        except IOError, e:
            if hasattr(e, 'code'): # HTTPError
                print 'http error code: ', e.code
            elif hasattr(e, 'reason'): # URLError
                print "can't connect, reason: ", e.reason
            else:
                raise
Ejemplo n.º 11
0
def do_knn(which = ''):
  (trX, trY, teX) = draft(which)
  (trX, teX) = normalize(trX, teX)

  clf = KNeighborsClassifier(probabilities = True)
  clf.fit(trX, trY)
  teY = clf.predict_proba(teX)[:,1]
  return teY
Ejemplo n.º 12
0
def do_svm(which = ''):
  (trX, trY, teX) = draft(which)
  (trX, teX) = normalize(trX, teX)

  clf = svm.SVC(probability=True)
  clf.fit(trX, trY)
  teY = clf.predict_proba(teX)[:,1]
  return teY
def basic_model():
    model = RandomForestClassifier(n_estimators=100)
    labels, targets, features = data.get_all("features.csv")
    features = data.normalize(features)

    scores = cross_val_score(model, features, targets, cv=5)

    return scores
Ejemplo n.º 14
0
def preprocess():
    """
    Returns
    ------------
    training_generator
    test_generator
    mean for normalization
    std for normalization
    """
    base = "../../train/"
    dirs = [base + "Loose Silky-bent", base + "Black-grass"]
    dataset = data.load_specific(dirs, im_size)

    x, y = zip(*dataset)
    x, m, s = data.normalize(x)
    r = data.one_label(y)
    y = list(map(lambda k: r[k], y))
    (x_train, y_train), (x_test, y_test) = data.train_val_test_split(
        (x, y), prc_test=.2, random_state=42)
    training_generator = CustomImageDataGenerator(
        x_train[0].shape,
        .5,
        featurewise_center=False,
        samplewise_center=False,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_whitening=False,
        rotation_range=80,
        width_shift_range=.3,
        height_shift_range=.3,
        horizontal_flip=True,
        vertical_flip=True,
        zoom_range=0.5,
        shear_range=0.5,
        fill_mode="reflect")
    test_generator = CustomImageDataGenerator(
        x_train[0].shape,
        0,
        featurewise_center=False,
        samplewise_center=False,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_whitening=False,
        rotation_range=0,
        width_shift_range=.0,
        height_shift_range=.0,
        horizontal_flip=False,
        vertical_flip=False,
        zoom_range=0,
        shear_range=0,
        fill_mode="reflect")
    return training_generator, (x_train,
                                y_train), test_generator, (x_test,
                                                           y_test), m, s
Ejemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--image', type=str, default='example/input.png')
    parser.add_argument('-m',
                        '--model_file',
                        type=str,
                        default='models/deblur.tflite')
    parser.add_argument('-q', '--quantized', action='store_true')
    args = parser.parse_args()

    interpreter = tf.lite.Interpreter(model_path=args.model_file)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # check the type of the input tensor

    # NxHxWxC, H:1, W:2
    img = imageio.imread(args.image)

    save_dir = 'example'
    os.makedirs(save_dir, exist_ok=True)

    if args.quantized:
        img = img.astype(np.float32)
        img = img - 128
    else:
        img = data.normalize(img)

    input_data = np.expand_dims(img, axis=0)
    interpreter.set_tensor(input_details[0]['index'], input_data)

    # Note that we only measure the invoke time
    time_begin = time.time()
    interpreter.invoke()
    time_end = time.time()

    output_data = interpreter.get_tensor(output_details[0]['index'])
    results = np.squeeze(output_data)
    results = results.astype(np.float32)

    if args.quantized:
        results = results + 128
    else:
        results = 127.5 * (results + 1)

    results = results.round().clip(min=0, max=255)
    results = results.astype(np.uint8)

    imageio.imwrite(path.join(save_dir, 'output.png'), results)
    time_total = time_end - time_begin
    print('Time: {:.3f}s {:.1f}fps'.format(time_total, 1 / time_total))
Ejemplo n.º 16
0
def _main_():
    config_path = args.conf
    input_path = args.input
    weights_path = args.weights

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2,
                                allow_growth=True)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    K.set_session(sess)

    classes = data.get_classes(config['train']['cache_name'])

    if not classes:
        print('Failed to get train classes')

    infer_model = load_model(weights_path)

    image_paths = utils.get_impaths_from_path(input_path)

    processing_count = 0
    sum_time = 0

    # the main loop
    for image_path in tqdm(image_paths):
        src_image = cv2.imread(image_path)
        # print(image_path)

        start_time = time.time()

        net_input_shape = (config['model']['input_side_sz'],
                           config['model']['input_side_sz'])

        image = cv2.resize(src_image, net_input_shape)

        image = data.normalize(image)
        image = np.expand_dims(image, axis=0)
        result = infer_model.predict(image)[0]

        sum_time += time.time() - start_time
        processing_count += 1

        max_idx = np.argmax(result)
        print(classes[max_idx], max_idx)

        cv2.imshow('1', src_image)
        if 27 == cv2.waitKey(0):
            break

    fps = processing_count / sum_time
    print('Result: {}'.format(fps))
Ejemplo n.º 17
0
 def stream_filter(self):
     """
     Start listening based on a list of persons names.
     """
     # add names to stream filter
     track_list = [data.normalize(p['name']) for p in self.persons]
     logging.debug('track_list: %s', track_list)
     while True:
         try:
             self.stream.filter(track=track_list)
         except Exception:
             logging.exception('stream filter')
             time.sleep(10)
Ejemplo n.º 18
0
def encode_partial(state, var=False):
    if state.dtype == np.float:
        state = normalize([state], [stat[0]])[0]
    else:
        state = normalize([state], [stat[0]], var=True)[0]

    Rr_idx, Rs_idx, values, Ra = construct_fully_connected_rel(state.size(1), args.relation_dim)
    d = [state, Rr_idx, Rs_idx, values, Ra]
    d = [x.cuda() if use_gpu else x for x in d]
    state, Rr_idx, Rs_idx, values, Ra = d

    Rr = torch.sparse.FloatTensor(
        Rr_idx, values, torch.Size([state.size(1), Ra.size(0)]))
    Rs = torch.sparse.FloatTensor(
        Rs_idx, values, torch.Size([state.size(1), Ra.size(0)]))
    Ra = Ra[None, :, :]

    with torch.set_grad_enabled(var):
        encode = model.encode([state, Rr, Rs, Ra], args.pstep_encode)
        latent = model.to_latent(encode)

    return latent
Ejemplo n.º 19
0
 def stream_filter(self):
     """
     Start listening based on a list of persons names.
     """
     # add names to stream filter
     track_list = [data.normalize(p['name']) for p in self.persons]
     logging.debug('track_list: %s', track_list)
     while True:
         try:
             self.stream.filter(track=track_list)
         except Exception:
             logging.exception('stream filter')
             time.sleep(10)
def personalized_model():
    model = RandomForestClassifier(n_estimators=100)
    labels, targets, features = data.get_all("features.csv")

    logo = LeaveOneGroupOut()
    scores = []
    for rest, user in logo.split(features, targets, groups=labels):
        normalized = data.normalize(features[user])

        user_scores = cross_val_score(model, normalized, targets[user], cv=5)

        scores = scores + user_scores.tolist()

    return np.array(scores)
Ejemplo n.º 21
0
def predict(params):
    """
    From a set of parameters, loads a network (model and weights), builds a
    prediction vector, which is returned together with the number of tendency
    errors found
    """
    raw = data.read(params, params['pred_dataset'])
    normalized = data.normalize(raw, params)
    adjusted = parameters.adjust(normalized, params)
    # prepare test data
    _, _, X_test, Y_test = data.prepare(adjusted, params)
    # Perform the prediction.
    model1 = model.prediction_setup(params)
    print('Feeding X_test (shape=', X_test.shape, ')')
    (yhat, rmse, num_errors) = range_predict(model1, X_test, Y_test, params)
    return (params, model1, Y_test, yhat, rmse, num_errors)
Ejemplo n.º 22
0
def preprocess():
    """
    Returns
    ------------
    training_generator
    test_generator
    mean for normalization
    std for normalization
    """
    dataset = data.load("../train", im_size)
    x, y = zip(*dataset)
    r = data.onehot_label(y)
    y = list(map(lambda k: r[k], y))
    x, m, s = data.normalize(x)
    (x_train, y_train), (x_test, y_test) = data.train_val_test_split((x, y))
    training_generator = ImageDataGenerator(
        featurewise_center=False,
        samplewise_center=False,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_whitening=False,
        rotation_range=80,
        width_shift_range=.3,
        height_shift_range=.3,
        horizontal_flip=True,
        vertical_flip=True,
        zoom_range=0.5,
        shear_range=0.5,
        fill_mode="reflect")
    test_generator = ImageDataGenerator(featurewise_center=False,
                                        samplewise_center=False,
                                        featurewise_std_normalization=False,
                                        samplewise_std_normalization=False,
                                        zca_whitening=False,
                                        rotation_range=0,
                                        width_shift_range=.0,
                                        height_shift_range=.0,
                                        horizontal_flip=True,
                                        vertical_flip=True,
                                        zoom_range=0.5,
                                        shear_range=0.5,
                                        fill_mode="reflect")
    return training_generator, (x_train,
                                y_train), test_generator, (x_test,
                                                           y_test), m, s
Ejemplo n.º 23
0
    def parse_data(self, data, bar):
        last_user = None
        movies, ratings = ([] for _ in range(2))
        for i, line in enumerate(data):
            # progress bar
            bar.next()
            user, movie, rating, _ = parse('{:d}::{:d}::{:g}:{}', line)
            if user != last_user:  # if we're on to a new user
                if last_user is not None:
                    return last_user, movies, ratings

                # clean slate for next user
                movies, ratings = ([] for _ in range(2))
                last_user = user

            movies.append(movie)
            ratings.append(normalize(rating))
        bar.next()
Ejemplo n.º 24
0
def eval_single_model(model_name, testset_name, labels_name):
    """
    Evaluate model on test set
    :param model_name:
    :param testset_name:
    :return:
    """
    prefix, dataset, architect, trans_type = model_name.split('-')

    X_test = np.load('{}/{}.npy'.format(PATH.ADVERSARIAL_FILE, testset_name))
    labels = np.load('{}/{}.npy'.format(PATH.ADVERSARIAL_FILE, labels_name))

    if 'composition' in trans_type:
        trans_type = TRANSFORMATION.get_transformation_compositions()
        print(type(trans_type), trans_type)

    # apply transformation(s)
    X_test = transform(X_test, trans_type)

    # evaluate each of the composition
    if 'composition' in trans_type:
        for trans in trans_type:
            print(type(trans), trans)

            m_name = '{}-{}-{}-{}'.format(prefix, dataset, architect, trans)
            model = models.load_model(m_name)

            print('*** Evaluating ({}) on ({})...'.format(
                m_name, testset_name))
            scores = model.evaluate(X_test, labels, verbose=2)
            print(scores)
            del model

    # evaluate the model
    model = models.load_model(model_name)

    if (dataset == DATA.cifar_10):
        X_test = normalize(X_test)
    print('*** Evaluating ({}) on ({})...'.format(model_name, testset_name))
    scores = model.evaluate(X_test, labels, verbose=2)
    print(scores)
    return scores
Ejemplo n.º 25
0
def load_case(normal=True):
    if normal:
        test_samples = np.load(os.path.join("dataset/demo/", "normal_samples.npy"))
    else:
        test_samples = np.load(os.path.join("dataset/demo/", "abnormal_samples.npy"))

    for i in range(test_samples.shape[0]):
        for j in range(1):
            test_samples[i][j] = normalize(test_samples[i][j][:])
    test_samples = test_samples[:, :1, :]
    print(test_samples.shape)
    if not normal :
        test_y=np.ones([test_samples.shape[0],1])
    else:
        test_y = np.zeros([test_samples.shape[0], 1])
    test_dataset = TensorDataset(torch.Tensor(test_samples), torch.Tensor(test_y))

    return DataLoader(dataset=test_dataset,  # torch TensorDataset format
                      batch_size=64,
                      shuffle=False,
                      num_workers=0,
                      drop_last=False)
dataset = data.load()
features = dataset["train_data"]
targets = dataset["train_labels"]
test_features = dataset["test_data"]
test_targets = dataset["test_labels"]
m,n = features.shape
replace_value_with_definition("inputs_N",n)
replace_value_with_definition("weights_L",-1.0/n)
replace_value_with_definition("weights_H",1.0/n)
print initial_setting
batchsizes = [(50,'g'),(100,'r'),(500,'g'),(1000,'r')]

results = []
for minibatch in batchsizes:
    NN = network.NetworkFrame(initial_setting)
    features_normalized,mean,std = data.normalize(features)
    test_normalized,_,_ = data.normalize(test_features,mean,std)
    NN.Train(features_normalized, targets, test_normalized, test_targets, 10e-5, minibatch[0], 200, 0.001)
    testing_record = NN.GetTestingRecord()
    testing_indecs = np.array([x[0] for x in testing_record])
    testing_errors = np.array([x[1] for x in testing_record])
    results.append([testing_indecs,testing_errors,minibatch[1]])

plot_config = []
legends= []
for i in range(len(results)):
    x,=plt.plot(results[i][0],results[i][1])
    legends.append(x)
plt.legend(legends,['50','100','500','1000'])
plt.title('Test error curve of different batch size')
plt.show()
Ejemplo n.º 27
0
                         input_list=['v(i)'],
                         output_list=['i(vi)'])
num_samples, sequence_length, num_channel = _data.shape
print("Sequence_len: ", sequence_length)
num_hidden = sequence_length  # too big will hang
num_hidden = 512
num_layers = 1  # multi-layer
batch_size = 8

#data_scale = 1.0*np.max(_data)
#label_scale = 1.0*np.max(_label)

_data = np.concatenate([_data] * 1000)
_label = np.concatenate([_label] * 1000)
indexes = np.arange(_data[0].size)
data_scale = normalize(_data, axis=1)
label_scale = normalize(_label, axis=1)
plt.plot(indexes, _label[0, :, 0], label='normalized')
plt.plot(indexes, _label[0, :, 0] * label_scale[0][0], label='recoverd')
plt.legend(loc='upper right')
plt.show()

print("data: ", _data.shape)
print("scale: ", data_scale.shape)

data_len = len(_data)
m = int(0.9 * data_len)
m += m % batch_size
idx = np.random.choice(data_len, size=data_len, replace=False)
train_idx = idx[:m]
test_idx = idx[m:]
Ejemplo n.º 28
0
def train(model, X, Y, model_name, need_augment=False, is_BB=False, **kwargs):
    """
    Train a model on given dataset.
    :param model: the model to train
    :param dataset: the name of the dataset
    :param need_augment: a flag - whether we need to augment the data before training the model
    :param kwargs: for optimizer, loss function, and metrics
    :return: the trained model
    """
    print('INFO: model name: {}'.format(model_name))
    learning_rate = 0.001
    validation_rate = 0.2

    prefix, dataset, architect, trans_type = model_name.split('-')

    optimizer = kwargs.get('optimizer',
                           keras.optimizers.Adam(lr=learning_rate))
    loss_func = kwargs.get('loss', keras.losses.categorical_crossentropy)
    metrics = kwargs.get('metrics', 'default')

    print('INFO: compiler')
    print('>>> optimizer: {}'.format(optimizer))
    print('>>> loss function: {}'.format(loss_func))
    print('>>> metrics: {}'.format(metrics))

    nb_examples, img_rows, img_cols, nb_channels = X.shape

    if (DATA.cifar_10 == dataset):
        """
        mean-std normalization
        """
        X = data.normalize(X)

    if not is_BB:
        nb_training = int(nb_examples * (1. - validation_rate))
        train_examples = X[:nb_training]
        train_labels = Y[:nb_training]
        val_examples = X[nb_training:]
        val_labels = Y[nb_training:]

    else:
        num_val = 9000
        train_examples = X[num_val:]
        train_labels = Y[num_val:]
        val_examples = X[:num_val]
        val_labels = Y[:num_val]
    """
    augment data
    """
    datagen = None
    if (DATA.cifar_10 == dataset):
        # normalize data (has been handled when loading the data)
        # data augmentation
        datagen = ImageDataGenerator(
            rotation_range=15,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
        )
        datagen.fit(train_examples)
    """
    compile data
    """
    if ('default' == metrics):
        model.compile(optimizer=optimizer,
                      loss=loss_func,
                      metrics=['accuracy'])
    else:
        model.compile(optimizer=optimizer,
                      loss=loss_func,
                      metrics=['accuracy', metrics])
    """
    train the model
    """
    if (DATA.cifar_10 == dataset):
        history = model.fit_generator(
            datagen.flow(train_examples,
                         train_labels,
                         batch_size=MODEL.BATCH_SIZE),
            steps_per_epoch=nb_training // MODEL.BATCH_SIZE,
            epochs=MODEL.EPOCHS,
            verbose=2,
            validation_data=(val_examples, val_labels),
            callbacks=[LearningRateScheduler(lr_schedule)])

    else:  # do not need to augment data
        history = model.fit(train_examples,
                            train_labels,
                            batch_size=MODEL.BATCH_SIZE,
                            epochs=MODEL.EPOCHS,
                            verbose=2,
                            validation_data=(val_examples, val_labels))
    """
    evaluate the model
    """
    scores_train = model.evaluate(train_examples,
                                  train_labels,
                                  batch_size=128,
                                  verbose=0)
    scores_val = model.evaluate(val_examples,
                                val_labels,
                                batch_size=128,
                                verbose=0)
    """
    report
    """
    print('\t\t\t loss, \tacc, \tadv_acc')
    print('Evaluation score on training set: {}'.format(scores_train))
    print('Evaluation score on validation set: {}'.format(scores_val))
    file_name = 'checkpoints-{}-{}-{}-{}.csv'.format(prefix, dataset,
                                                     architect, trans_type)
    file.dict2csv(history.history, '{}/{}'.format(PATH.RESULTS, file_name))
    plotTrainingResult(history, model_name)

    return model
Ejemplo n.º 29
0
def build_iters(filename, input_list, output_list, splits, batch_size):
    """
    Load & generate training examples from multivariate time series data
    :return: data iters & variables required to define network architecture
    """
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['v(pad)'], shape='ncw')
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vpad)'])
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vi)'])
    #_data, _label = gen_data(filename='1cycle_iv_small_100p.txt',input_list=['v(i)'], output_list=['v(pad)'], shape='ncw')
    _data, _label = gen_data(filename='1cycle_iv_small_100p.txt',
                             input_list=['v(i)'],
                             output_list=['i(vpad)'],
                             shape='ncw')
    global num_samples, sequence_length, num_channel
    num_samples, num_channel, sequence_length = _data.shape
    _data = np.concatenate([_data] * 200)
    _label = np.concatenate([_label] * 200)
    data_scale = normalize(_data, axis=2)
    label_scale = normalize(_label, axis=2)

    _data = np.atleast_3d(_data)
    _label = np.atleast_3d(_label)
    data_len = len(_data)
    print("Shape: ", _data.shape)  # (samples, seq_len, features)
    #sys.exit(0)

    m = int(splits[0] * data_len)
    m += m % batch_size
    k = int(splits[1] * data_len)
    k += k % batch_size

    idx = np.random.choice(data_len, size=data_len, replace=False)
    train_idx = idx[:m]
    val_idx = idx[m:m + k]
    test_idx = idx[m + k:]

    #X = _data[:m]
    #y = _label[:m]
    X = _data[train_idx, :]
    y = _label[train_idx, :]
    train_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #train_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=False)
    print("train_data shape: ", X.shape, y.shape)

    #X = _data[m:m+k]
    #y = _label[m:m+k]
    X = _data[val_idx, :]
    y = _label[val_idx, :]
    val_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #val_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=False)
    print("val_data shape: ", X.shape, y.shape)

    #X = _data[m+k:]
    #y = _label[m+k:]
    X = _data[test_idx, :]
    y = _label[test_idx, :]
    global eval_data_scale, eval_label_scale
    eval_data_scale = data_scale[test_idx, :]
    eval_label_scale = label_scale[test_idx, :]
    test_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #test_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=1, shuffle=False)
    print("test_data shape: ", X.shape, y.shape)
    return train_iter, val_iter, test_iter
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-l',
                        '--load_from',
                        type=str,
                        default='models/deblur.hdf5')
    parser.add_argument('-s',
                        '--save_to',
                        type=str,
                        default='models/deblur.tflite')
    parser.add_argument('-d', '--depth', type=int, default=4)
    parser.add_argument('-t', '--test', type=str, default='example/input.png')
    parser.add_argument('-o', '--optimize', type=str, default='')
    parser.add_argument('-q', '--quantize', type=str, default='')
    cfg = parser.parse_args()

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            tf.config.experimental.set_memory_growth(gpus[0], True)
        except RuntimeError as e:
            print(e)

    # Prepare the test input
    test_input = imageio.imread(cfg.test)
    test_input = data.normalize(test_input)
    test_input = np.expand_dims(test_input, axis=0)
    _, h, w, c = test_input.shape

    representative = 'REDS/{}/train_blur'
    if h == 256 and w == 256:
        print('hi')
        representative = representative.format('train_crop')
    else:
        representative = representative.format('train')

    if cfg.depth == 4:
        net = model.Baseline(h, w)
    else:
        net_class = getattr(model, 'Small{}'.format(cfg.depth))
        net = net_class(h, w)

    net.build(input_shape=(None, h, w, c))
    net.load_weights(cfg.load_from)
    # Make a dummy prediction to get the input shape
    net.predict(test_input, batch_size=1)
    net.summary()

    # Convert to the TFLite model
    converter = lite.TFLiteConverter.from_keras_model(net)
    if cfg.optimize == 'weight':
        converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
    elif 'integer' in cfg.quantize:
        converter.optimizations = [tf.lite.Optimize.DEFAULT]

        # Dataset for tuning
        def gen_rep():
            list_dir = os.listdir(representative)
            list_dir.sort()
            for d in tqdm.tqdm(list_dir, ncols=80):
                imgs = glob.glob(path.join(representative, d, '*.png'))
                img = random.choice(imgs)
                x = imageio.imread(img)
                hh, ww, _ = x.shape
                py = random.randrange(0, hh - h + 1)
                px = random.randrange(0, ww - w + 1)
                x = x[py:(py + h), px:(px + w)]
                x = np.expand_dims(x, axis=0)
                x = x.astype(np.float32)
                x = x - 128
                yield [x]

        converter.representative_dataset = gen_rep
        if 'full' in cfg.quantize:
            converter.target_spec.supported_ops = [
                tf.lite.OpsSet.TFLITE_BUILTINS_INT8
            ]
            converter.inference_input_type = tf.uint8
            converter.inference_output_type = tf.uint8
    '''
    elif 'fp16' in cfg.quantize:
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.target_spec.supported_types = [tf.float16]
    '''
    lite_model = converter.convert()
    with open(cfg.save_to, 'wb') as f:
        f.write(lite_model)
Ejemplo n.º 31
0

# generate init and goal positions
print("Prepare initial and goal configurations")
if args.env == 'Rope':
    state_goal, scene_ckp = generate_Rope_goal(args)
    state_goal_v = to_var(state_goal, use_gpu=use_gpu)[None, :, :]

    engine = RopeEngine(args.dt, args.state_dim, args.action_dim)
    engine.reset_scene(args.n_particle, ckp=scene_ckp)

    # construct attributes
    attr = gen_attr_Rope(engine, args)

    # normalize attr and change to torch variable
    attr_normalized = normalize([attr], [stat[0]])[0]
    attr_normalized = to_var(attr_normalized, use_gpu)[None, :, :]

    states_roll = np.zeros((args.roll_step, args.n_particle + 2, args.state_dim))
    actions_roll = np.zeros((args.roll_step, args.n_particle + 2, args.action_dim))

    control_v = to_var(actions_roll, use_gpu, requires_grad=True)

elif args.env == 'Box':
    state_goal, state_goal_full, vis_goal_full, scene_ckp = generate_Box_goal(args)
    state_goal_v = to_var(state_goal, use_gpu=use_gpu)[None, :, :]
    latent_goal = encode_partial(state_goal_v)

    engine = BoxEngine(args.dt, args.state_dim, args.action_dim)
    ckp = engine.reset_scene(args.n_particle, ckp=scene_ckp)
Ejemplo n.º 32
0
from sklearn import svm
from numpy import arange
from data import draft, proof, eval, save, normalize

#(trX, trY, teX) = proof()
#clf = svm.SVC(probability=True, kernel='rbf', C = 5.0, gamma = 0.1)
#clf.fit(trX, trY)
#teY_svm01 = clf.predict_proba(teX)[:,1]
#save(teY_svm01)

def do_svm(which = ''):
  (trX, trY, teX) = draft(which)
  (trX, teX) = normalize(trX, teX)

  clf = svm.SVC(probability=True)
  clf.fit(trX, trY)
  teY = clf.predict_proba(teX)[:,1]
  return teY
#save(teY)

print eval(do_svm('_f03'))

(trX, trY, teX) = proof('_f03')
(trX, teX) = normalize(trX, teX)

clf = svm.SVC(probability=True)
clf.fit(trX, trY)
teY = clf.predict_proba(teX)[:,1]

save(teY)
Ejemplo n.º 33
0
def main():
    # Load the CSV dataframe
    print("------Loading Dataframe---------")
    data_df = data.load(DATA_FILE)
    print("------DataFrame Loading DONE--------")
    # Convert to numpy and normalize
    print("------Normalizing Data---------")
    train_data = data.normalize(data_df, NORMALIZE)
    print("------Normalizing Data DONE---------")
    # Create Pytorch dataloader
    dataloader = data.create_dataloader(train_data, BATCH_SIZE, DEVICE, NOISE,
                                        NOISE_PARAM)
    print("------Created Dataloader---------")
    N, ninp = train_data.shape
    # CREATE MODEL
    if BOTTLENECK:
        net = model.DACBottle(ninp, NHID, NBOT, NHLAYERS, BOTTLENECK,
                              RESNET_TRICK, RANDOM_SEED).to(DEVICE)
    else:
        net = model.DAC(ninp, NHID, NHLAYERS, RESNET_TRICK,
                        RANDOM_SEED).to(DEVICE)
    print("------Loaded Model---------")
    N, ninp = train_data.shape
    if ((USE_EXISTING_CHECKPOINT or MODE == 'generate')
            and os.path.isfile(CHECKPOINT_FILE)):
        print("----------Loading CHECKPOINT-----------------")
        checkpoint = torch.load(CHECKPOINT_FILE)
        net.load_state_dict(checkpoint['model_state_dict'])
        print("----------Loading CHECKPOINT DONE-----------------")
    if MODE == 'train':
        # GET NORM DATA WITH NOISE AND GENERATE PREDICTIONS
        print("-----------Starting training--------------")
        trainer = train.Trainer(net, LR, LR_DECAY, REG)
        best_loss = np.inf
        for i in range(EPOCHS):
            for bx, by in dataloader:
                bx = bx.to(DEVICE)
                by = by.to(DEVICE)
                loss = trainer.step(bx, by)
            if i % PRINT_EVERY == 0:
                print(f"Epoch: {i}\t Training Loss: {loss}")
            if loss < best_loss:
                best_loss = loss
                torch.save({'model_state_dict': net.state_dict()},
                           CHECKPOINT_FILE)
        print("-----------Training DONE--------------")
    elif MODE == 'generate':
        # GET CLEAN NORM DATA AND GENERATE FEATURES FROM ACTIVATIONS
        print("----------Generating FEATURES-----------------")
        model.eval()
        with torch.no_grad():
            all_data = []
            for bx, by in dataloader:
                x = bx.to(DEVICE)
                out = net.generate(x)
                if len(all_data) == 0:
                    all_data = out
                else:
                    all_data = np.vstack((all_data, out))
        np.savetxt(OUT_FILE, all_data, delimiter=",")
        print("----------FEATURES generated and saved to file------------")
Ejemplo n.º 34
0
#ignore the test header
testfile.next()
 
X_test = []
for line in testfile:
    splitted = line.rstrip().split(',')
    X_test.append([float(item) for item in splitted])

testfile.close()
from numpy import array
X_test = array(X_test)
y_train = array(y_train)
X_train = array(X_train)

from data import normalize
X_train, X_test = normalize(X_train, X_test)

###########################
# EXAMPLE BASELINE SOLUTION USING SCIKIT-LEARN
#
# using scikit-learn LogisticRegression module without fitting intercept
# to make it more interesting instead of using the raw features we transform them logarithmically
# the input to the classifier will be the difference between transformed features of A and B
# the method roughly follows this procedure, except that we already start with pairwise data
# http://fseoane.net/blog/2012/learning-to-rank-with-scikit-learn-the-pairwise-transform/
###########################
 
model = linear_model.LogisticRegression(fit_intercept=True, penalty='l2', C=25.1)
model.fit(X_train,y_train)
# compute AuC score on the training data (BTW this is kind of useless due to overfitting, but hey, this is only an example solution)
p_train = model.predict_proba(X_train)
Ejemplo n.º 35
0
import numpy as np
from tfHelper import tfHelper
import data

tfHelper.log_level_decrease()
# tfHelper.numpy_show_entire_array(28)
# np.set_printoptions(linewidth=200)


print ("Load data ...")
_, X_id, label = data.load_data_predict()

X_pred = tfHelper.get_dataset_with_one_folder('classed/.None', 'L')
X_pred = data.normalize(X_pred)

model = tfHelper.load_model("model_img")
# model = tfHelper.load_model("model")

######################### Predict #########################
predictions = model.predict(X_pred)

# print(predictions)
# exit (0)


# All features
with open("output_img_detailed", "w+") as file:
	# Head
	for line in label[:-1]:
		file.write(line + ",")
Ejemplo n.º 36
0
args = parser.parse_args()

x, y = {'adult'    : data.load_adult,
        'cifar10'  : data.load_multi_cifar10,
        'cifar100' : data.load_multi_cifar100,
        'covtype'  : data.load_covtype,
        'kddcup08' : data.load_kddcup08,
        'letter'   : data.load_multi_letter,
        'mnist'    : data.load_multi_mnist}[args.ds]()
x, y = data.shuffle(x, y)
[[train_xx, train_yy],
 [val_xx,   val_yy],
 [test_xx,  test_yy]] = data.partition(x, y, args.ptt)
train_x, val_x, test_x = th.cat(train_xx), th.cat(val_xx), th.cat(test_xx)
train_y, val_y, test_y = th.cat(train_yy), th.cat(val_yy), th.cat(test_yy)
train_x, val_x, test_x = data.normalize([train_x, val_x, test_x])
train_xx = th.split(train_x, [len(x) for x in train_xx])
train_datasets = [D.TensorDataset(x) for x in train_xx]
train_loader = D.DataLoader(D.TensorDataset(train_x, train_y), args.bsi)
val_loader = D.DataLoader(D.TensorDataset(val_x, val_y), args.bsi)
test_loader = D.DataLoader(D.TensorDataset(test_x, test_y), args.bsi)
pclass_list = [len(y) / len(train_y) for y in train_yy]

n_classes = len(train_yy)
if len(args.bst) == n_classes:
    bs_list = args.bst
elif len(args.bst) == 1:
    bs_list = [args.bst[0]] * n_classes
else:
    raise RuntimeError()
train_loaders = [utils.cycle(D.DataLoader(ds, bs, shuffle=True)) \
Ejemplo n.º 37
0
#!/usr/bin/python

import data as dt
import clusters as cl
    
data = dt.fill_data("in/src_data.txt")

print("=== INIT DATA ===")
dt.print_data(data)

data = dt.normalize(data)

print("=== NORMALIZED DATA ===")
dt.print_data(data)

k_clusters = cl.k_averages(data, [4,2,8])
print("=== K AVERAGE ===")
print(k_clusters)

dt.plot("out/k_average.png", k_clusters, data)

mm_clusters = cl.max_min(data)
print("=== MAXIMIN ===")
print(mm_clusters)

dt.plot("out/maximin.png", mm_clusters, data)
Ejemplo n.º 38
0
def train_and_save(model_name, X, Y, validation_rate=0.2, need_augment=False):
    """
    Train a model over given training set, then
    save the trained model as given name.
    :param model_name: the name to save the model as
    :param X: training examples.
    :param Y: corresponding desired labels.
    :param need_augment: a flag whether to perform data augmentation before training.
    """
    warnings.warn(
        'This method is deprecated, it will be removed soon. '
        'Please use functions train() or train_model() to train a model'
        'then save_model() to save the model.', DeprecationWarning)

    prefix, dataset, architect, trans_type = model_name.split('-')
    nb_examples, img_rows, img_cols, nb_channels = X.shape

    nb_validation = int(nb_examples * validation_rate)
    nb_training = nb_examples - nb_validation
    train_samples = X[:nb_training]
    train_labels = Y[:nb_training]
    val_sample = X[nb_training:]
    val_labels = Y[nb_training:]
    input_shape = (img_rows, img_cols, nb_channels)
    nb_classes = int(Y.shape[1])

    print('input_shape: {}; nb_classes: {}'.format(input_shape, nb_classes))
    print('{} training sample; {} validation samples.'.format(
        nb_training, nb_validation))

    # get corresponding model
    model = create_model(dataset,
                         input_shape=input_shape,
                         nb_classes=nb_classes)
    history = []
    scores = []
    if (need_augment):
        # normalize samples
        train_samples = data.normalize(train_samples)
        val_sample = data.normalize(val_sample)
        # data augmentation
        datagen = ImageDataGenerator(
            rotation_range=15,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
        )
        datagen.fit(train_samples)
        # define a optimizer
        opt_rms = optimizers.RMSprop(lr=0.001, decay=1e-6)
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt_rms,
                      metrics=['accuracy'])
        # perform training
        with tf.device('/device:GPU:0'):  # to run in google colab
            print("Found GPU:0")
            # train
            history = model.fit_generator(
                datagen.flow(train_samples,
                             train_labels,
                             batch_size=MODEL.BATCH_SIZE),
                steps_per_epoch=nb_training // MODEL.BATCH_SIZE,
                epochs=MODEL.EPOCHS,
                verbose=2,
                validation_data=(val_sample, val_labels),
                callbacks=[LearningRateScheduler(lr_schedule)])
            # test, this will be run with GPU
            # verbose: integer. 0 = silent; 1 = progress bar; 2 = one line per epoch
            # train the model silently
            scores = model.evaluate(val_sample,
                                    val_labels,
                                    batch_size=128,
                                    verbose=0)
    else:
        # compile model
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        with tf.device('/device:GPU:0'):
            # train
            history = model.fit(train_samples,
                                train_labels,
                                epochs=MODEL.EPOCHS,
                                batch_size=MODEL.BATCH_SIZE,
                                shuffle=True,
                                verbose=1,
                                validation_data=(val_sample, val_labels))
            # test
            # verbose: integer. 0 = silent; 1 = progress bar; 2 = one line per epoch
            # train the model silently
            scores = model.evaluate(val_sample,
                                    val_labels,
                                    batch_size=128,
                                    verbose=0)

    # save the trained model
    model.save('{}/{}.h5'.format(PATH.MODEL, model_name))
    keras.models.save_model(model, '{}/{}_2.h5'.format(PATH.MODEL, model_name))
    # report
    print('Trained model has been saved to data/{}'.format(model_name))
    print('Test accuracy: {:.4f}; loss: {:.4f}'.format(scores[1], scores[0]))
    file_name = 'CheckPoint-{}-{}-{}.csv'.format(dataset, architect,
                                                 trans_type)
    file.dict2csv(history.history, '{}/{}'.format(PATH.RESULTS, file_name))
    plotTrainingResult(history, model_name)
    # delete the model after it's been saved.
    del model
Ejemplo n.º 39
0
def eval(idx_rollout, video=True):
    print(f'\n=== Forward Simulation on Example {roll_idx} ===')

    seq_data = load_data(prepared_names, os.path.join(data_dir, str(idx_rollout) + '.rollout.h5'))
    attrs, states, actions, rel_attrs = [to_var(d.copy(), use_gpu=use_gpu) for d in seq_data]

    seq_data = denormalize(seq_data, stat)
    attrs_gt, states_gt, action_gt = seq_data[:3]

    param_file = os.path.join(data_dir, str(idx_rollout // args.group_size) + '.param')
    param = torch.load(param_file)
    engine.init(param)

    '''
    fit data
    '''
    fit_data = get_more_trajectories(roll_idx)
    fit_data = [to_var(d, use_gpu=use_gpu) for d in fit_data]
    bs = args.fit_num

    ''' T x N x D (denormalized)'''
    states_pred = states_gt.copy()
    states_pred[1:] = 0

    ''' T x N x D (normalized)'''
    s_pred = states.clone()

    '''
    reconstruct loss
    '''
    attrs_flat = get_flat(fit_data[0])
    states_flat = get_flat(fit_data[1])
    actions_flat = get_flat(fit_data[2])
    rel_attrs_flat = get_flat(fit_data[3])

    g = model.to_g(attrs_flat, states_flat, rel_attrs_flat, args.pstep)
    g = g.view(torch.Size([bs, args.time_step]) + g.size()[1:])

    G_tilde = g[:, :-1]
    H_tilde = g[:, 1:]
    U_tilde = fit_data[2][:, :-1]

    G_tilde = get_flat(G_tilde, keep_dim=True)
    H_tilde = get_flat(H_tilde, keep_dim=True)
    U_tilde = get_flat(U_tilde, keep_dim=True)

    _t = time.time()
    A, B, fit_err = model.system_identify(
        G=G_tilde, H=H_tilde, U=U_tilde, rel_attrs=fit_data[3][:1, 0], I_factor=args.I_factor)
    _t = time.time() - _t

    '''
    predict
    '''

    g = model.to_g(attrs, states, rel_attrs, args.pstep)

    pred_g = None
    for step in range(0, args.time_step - 1):
        # prepare input data

        if step == 0:
            current_s = states[step:step + 1]
            current_g = g[step:step + 1]
            states_pred[step] = states_gt[step]
        else:
            '''current state'''
            if args.eval_type == 'valid':
                current_s = states[step:step + 1]
            elif args.eval_type == 'rollout':
                current_s = s_pred[step:step + 1]

            '''current g'''
            if args.eval_type in {'valid', 'rollout'}:
                current_g = model.to_g(attrs[step:step + 1], current_s, rel_attrs[step:step + 1], args.pstep)
            elif args.eval_type == 'koopman':
                current_g = pred_g

        '''next g'''
        pred_g = model.step(g=current_g, u=actions[step:step + 1], rel_attrs=rel_attrs[step:step + 1])

        '''decode s'''
        pred_s = model.to_s(attrs=attrs[step:step + 1], gcodes=pred_g,
                            rel_attrs=rel_attrs[step:step + 1], pstep=args.pstep)

        pred_s_np_denorm = denormalize([to_np(pred_s)], [stat[1]])[0]

        states_pred[step + 1:step + 2] = pred_s_np_denorm
        d = args.state_dim // 2
        states_pred[step + 1:step + 2, :, :d] = states_pred[step:step + 1, :, :d] + \
                                                args.dt * states_pred[step + 1:step + 2, :, d:]

        s_pred_next = normalize([states_pred[step + 1:step + 2]], [stat[1]])[0]
        s_pred[step + 1:step + 2] = to_var(s_pred_next, use_gpu=use_gpu)

    if video:
        engine.render(states_pred, seq_data[2], param, act_scale=args.act_scale, video=True, image=True,
                      path=os.path.join(args.evalf, str(idx_rollout) + '.pred'),
                      states_gt=states_gt)
Ejemplo n.º 40
0
split = int(len(x_train) * 0.2)
x_test = x_train[:split]
x_train = x_train[split:]
y_test = y_train[:split]
y_train = y_train[split:]

input_size = len(x_train[0])
num_classes = 99

print(str(num_classes) + ' classes')
print(str(input_size) + ' features')
print(str(len(x_train)) + ' lines')

print(x_train.shape, 'train samples')

x_train = data.normalize(x_train)
x_test = data.normalize(x_test)

# model = tfHelper.load_model("model_img")

# model = k.models.Sequential()
# model.add(k.layers.Dense(300, input_dim=input_size, activation='tanh'))
# model.add(k.layers.Dense(200, activation='tanh'))
# model.add(k.layers.Dense(150, activation='tanh'))
# model.add(k.layers.Dense(num_classes, activation='softmax'))

model = k.models.Sequential()
model.add(
    k.layers.Conv2D(16, (5, 5),
                    activation='relu',
                    input_shape=(imgWidth, imgWidth, 1)))
Ejemplo n.º 41
0
excluded = ('gpu', 'report_every', 'n_iterations')
run_id = 'ce-' + '-'.join('%s-%s' % (key, str(getattr(args, key)))
                          for key in keys if key not in excluded)
writer = tb.SummaryWriter('runs/' + run_id)

# In[ ]:

if args.gpu < 0:
    cuda = False
else:
    cuda = True
    th.cuda.set_device(args.gpu)

rbg = args.actor in ('lenet', 'resnet')
train_x, train_y, test_x, test_y = data.load_dataset(args.dataset, rbg)
train_x, test_x = data.normalize(train_x, test_x)
if args.post == '91-under':
    label2ratio = {0: 0.9, 1: 0.1}
    train_x, train_y, test_x, test_y = data.random_subset(
        train_x, train_y, test_x, test_y, label2ratio)
elif args.post == '91-over':
    label2label = {9: 1}
    label2label.update({i: 0 for i in range(9)})
    train_x, train_y, test_x, test_y = data.relabel(train_x, train_y, test_x,
                                                    test_y, label2label)
elif args.post == 'covtype':
    label2label = {0: 0, 1: 0, 2: 0, 3: 0, 4: 1, 5: 0, 6: 0}
    train_x, train_y, test_x, test_y = data.relabel(train_x, train_y, test_x,
                                                    test_y, label2label)

if args.sampler: