def calculate_t(dataset_no):
    print("Starting to find orignal_labels ans for dataset no", dataset_no)
    X, y = load_datasets(dataset_no)
    rows, col = X.shape
    kernel = 1.0 * RBF(1.0)
    ROW = int(Training_percent * rows)
    ROW = 800
    print("Starting $")
    gpc = GaussianProcessClassifier(kernel=kernel,
                                    random_state=0).fit(X[:ROW, :], y[:ROW])
    print("Successfully trained ", dataset_no)
    print("Starting predicting data for full length")
    orignal_probability = gpc.predict_proba(X[:ROW, :])
    print("Orignal_probability array calculated for dataset_no", dataset_no)

    mrl = [None for _ in range(5)]

    current_pos = int(STARTING_FRACTION * col)

    while 1:
        gpc = GaussianProcessClassifier(kernel=kernel, random_state=0).fit(
            X[:ROW, :current_pos], y[:ROW])
        new_probability = gpc.predict_proba(X[:ROW, :current_pos])

        print("Probabilities calculated for current value of f = ",
              current_pos)

        for i in range(5):
            value_mrl = mrl[i]
            if not (value_mrl):
                if i == 4:
                    temporary = 16
                else:
                    temporary = i + 1
                if (check_probabilities_for_f(orignal_probability,
                                              new_probability, alpha, y,
                                              temporary)):
                    mrl[i] = current_pos
                    print("F for label", temporary, " is ", current_pos)
                    # print("Saving model")
                    # s = 'label_id' + str(i+1) + "component" + str(dataset_no)
                    # filename = 'models/' + s + '.sav'
                    # pickle.dump(gpc, open(filename, 'wb'))

        all_completed = 1
        for value_mrl in mrl:
            if not (value_mrl):
                all_completed = 0
        if all_completed:
            break

        current_pos = current_pos + 5
    return mrl
def apply_gaussian_classifier(feature, col_required, array_to_predict):
    Main_X, Main_Y = load_datasets(feature)
    print("Starting Gausian")
    gpc = GaussianProcessClassifier(kernel=kernel, random_state=0).fit(
        Main_X[:ROWS, :col_required], Main_Y[:ROWS])
    print("Successfully Trained :)")
    orignal_probability = gpc.predict_proba(array_to_predict)
    maxa = max(orignal_probability[0])
    for j in range(5):
        if (orignal_probability[0][j] == maxa):
            index = j
    if (index == 0):
        return 1
    elif (index == 1):
        return 2
    elif (index == 2):
        return 3
    elif (index == 3):
        return 4
    else:
        return 16
dir_path = os.path.join(path, 'dataset_agument')

#################################################
# use for make pickle data
make_dir(dir_path , 'pickle_data')
data_path = os.path.join(dir_path, 'pickle_data')

for ver in version:

  make_dir(data_path , ver)
  file_path = os.path.join(raw_data_path, ver)
  for file in os.listdir(file_path):
    if file == '2018Y02M20D09H59m22s':
      continue

    loads = load_datasets(ver, file)
    dataset = loads.load_tracklets()
    datas = loads.load_INS()

    fd = classify_tracklets(dataset)
    xyz = TRtoTM(fd, datas, ver)
    fd = len_filter(xyz)

    emp = detect_emptynum(fd)
    ffd = fill_emptynum(fd, emp)
    ffd = len_filter(ffd, fp=True) 

    save = save_datasets(data_path, ver, file)
    save.save_as_pkl(ffd)
    
Exemplo n.º 4
0
from sklearn.model_selection import train_test_split

from load_datasets import load_datasets
import numpy as np

X, y = load_datasets()


def load_test_data():
    X_test = X[650:]
    y_test = y[650:]
    return X_test, y_test


def load_train_valid_data(test_split):
    X_tmp = X[:650]
    y_tmp = y[:650]
    X_train, X_valid, y_train, y_valid = train_test_split(X_tmp,
                                                          y_tmp,
                                                          test_size=test_split,
                                                          random_state=1)

    return X_train, X_valid, y_train, y_valid
Exemplo n.º 5
0
def training( network, learning_rate, hidden_units, epochs, gpu):
    
    train_dir = './flowers/train/'
    valid_dir = './flowers/valid/'
    test_dir = './flowers/test/'
   
    train_data, valid_data, test_data = load_datasets(train_dir, valid_dir, test_dir)
    
    trainloader, validloader, testloader = get_loaders(train_data, valid_data, test_data)

    model, classifier = get_model_classifier(network, get_class_tot(train_dir, valid_dir, test_dir), hidden_units)

    train_losses, valid_losses = [], []
    
    if gpu == True:
        # use GPU
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')


    criterion = nn.NLLLoss()

    optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)

    model.to(device)

    for epoch in range(epochs):
        epoch_train_run_loss = 0
        epoch_batches = 0

        print(f"Epoch for {epoch+1}/{epochs}..")
    
        for images, labels in trainloader:
            epoch_batches += 1
            
            images, labels = images.to(device), labels.to(device)
        
            optimizer.zero_grad()
            logps = model(images)
            loss = criterion(logps, labels)
            loss.backward()
            optimizer.step()        

            epoch_train_run_loss += loss.item()
            
        with torch.no_grad():
            model.eval()
            valid_loss, valid_accuracy = validation(model, validloader, criterion, device)
            valid_losses.append(valid_loss)
            
            model.train()
        
        train_losses.append(epoch_train_run_loss/epoch_batches)
            
        print(f"Final Train loss: {epoch_train_run_loss/epoch_batches:.4f}"
              f", Validation loss: {valid_loss:.4f} "
              f"and Validation accuracy: {valid_accuracy:.4f} for Epoch {epoch+1}/{epochs}")
    
    print("Validation Completed Successfully. Saving started")
    #saving checkpoint
    save_model_checkpoint(model, train_data,  optimizer, epochs)     
Exemplo n.º 6
0
def main():
    start_time = time.time()
    print("Loading data...")
    train_dataset, valid_dataset, eval_dataset = load_datasets()
    print("Done loading")

    train_data, train_labels = train_dataset
    train_labels = np.asarray(train_labels, dtype=np.int32)
    valid_data, valid_labels = valid_dataset
    valid_labels = np.asarray(valid_labels, dtype=np.int32)

    config = tf.estimator.RunConfig(model_dir = "Models/gray_scale_6.0",
                                    keep_checkpoint_max = 25,
                                    save_checkpoints_steps = 1000
                                    )

    params = tf.contrib.training.HParams(
        learning_rate = 0.001,
        dense_layers = 1,
        dense_layer_units = 1024,
        dropout_rate = 0.3,
        optimizer = "Adam",
        cross_entropy_weight = 1.0,
        ##### Higher level #######
        color_channels = 1
    )

    # Create the Estimator
    face_classifier = tf.estimator.Estimator(model_fn=cnn_face_classification_fn, config = config, params = params)

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(
        tensors=tensors_to_log, every_n_iter=1000)

    my_train = True
    # the old way
    if my_train:
        face_classifier.train(input_fn=lambda: imgs_input_fn(train_data, train_labels, perform_shuffle = True, batch_size = 25),
                                               hooks=[logging_hook],
                                               steps = 5000)

    train_results = face_classifier.evaluate(input_fn=lambda: imgs_input_fn(train_data, train_labels, perform_shuffle = False, batch_size = 25))
    valid_results = face_classifier.evaluate(input_fn=lambda: imgs_input_fn(valid_data, valid_labels, perform_shuffle = False, batch_size = 25))

    print("------------------------------------------")
    print(train_results)
    print(valid_results)


    """
    # the new way
    train_spec  = tf.estimator.TrainSpec(input_fn=lambda: imgs_input_fn(train_data,
                                                                        train_labels,
                                                                        perform_shuffle=True,
                                                                        batch_size=15),
                                                                        max_steps=50000,
                                                                        hooks=[logging_hook])
    eval_spec = tf.estimator.EvalSpec(input_fn=lambda: imgs_input_fn(valid_data,
                                                                     valid_labels,
                                                                     perform_shuffle=False,
                                                                     batch_size=len(valid_data)),
                                                                     steps = 5000)


    tf.estimator.train_and_evaluate(face_classifier, train_spec, eval_spec)
    """
    print("--- %s seconds ---" % (time.time() - start_time))