Exemplo n.º 1
0
def _main(args):
    # Raw arguments from parser
    save = args.save
    data_path = args.data_path
    filename = args.file
    results_dir = args.results_dir
    training_path = args.training_path
    model_dir = args.model_dir
    classes = args.classes
    weights_name = args.weights

    # Computed arguments
    classes_path = os.path.join(model_dir, classes)
    dir_list = [x for x in sorted(os.listdir(data_path)) if x.endswith('.jpg')]

    # Creating config instance
    config = Config()

    # Extracting classes and anchors
    class_names = get_classes(classes_path)
    anchors = config.YOLO_ANCHORS

    # Loading dictionnary
    data = np.load(os.path.join(data_path, filename), allow_pickle=True)

    # Extracting images and boxes
    image_data, boxes = process_data(data['images'], data['boxes'])

    # Extracting anchor boxes and masks
    detectors_mask, matching_true_boxes = get_detector_mask(boxes, anchors)

    # Normalizing data
    normalized_data = normalize(image_data, training_path, train=False)

    # Creating model and printing summary
    model_body, model = create_model(anchors,
                                     class_names,
                                     freeze_body=config.FREEZE,
                                     load_pretrained=config.LOAD_PRETRAINED)

    # Call to predict function
    boxes_dict = predict(model_body,
                         class_names,
                         anchors,
                         normalized_data,
                         weights_name,
                         dir_list,
                         non_best_sup=config.NON_BEST_SUP,
                         results_dir=results_dir,
                         save=save)

    # Saving predictions
    save_annotation(boxes_dict,
                    os.path.join(results_dir, 'predictions', 'pred_boxes.p'))
Exemplo n.º 2
0
def start_test():
    Tk().withdraw()
    filename = askopenfilename(filetypes=[("Audio files (.wav)", "*.wav")])
    if filename != '':
        print(filename)
        model = training.create_model(summary=False)
        model.load_weights(model_dir)
        fr, sound = training.read(filename)
        female_prob = model.predict(np.vstack([sound[:10000]]),
                                    batch_size=10)[0][0]
        male_prob = 1 - female_prob
        gender = "male" if male_prob > female_prob else "female"
        print("Result: ", gender)
        print(
            f"Probabilities: Male: {male_prob*100:.2f}%    Female: {female_prob*100:.2f}%"
        )
    else:
        print('Tidak ada file yang dipilih')
def main(args):
    ''' Main function for 
    
    Args:
        - from_id: start index to file list
        
    Returns:
        - Write file missing data
        - Write file imputed values 
    
    '''

    # Parameters
    batch_size = args.batch_size
    epochs = args.epochs

    flag_model = args.flag_model
    # Define your transforms for the training and testing sets
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.RandomRotation(30),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            RandAugment(),
            ImageNetPolicy(),
            Cutout(size=16),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'test':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    # Load the datasets with ImageFolder
    image_datasets = {
        x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
        for x in ['train', 'test']
    }
    data_loader = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=batch_size,
                                       shuffe=True,
                                       num_workers=4,
                                       pin_memory=True)
        for x in ['train', 'test']
    }
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'test']}
    class_names = image_datasets['train'].classes

    # we get the class_to_index in the data_Set but what we really need is the cat_to_names  so we will create
    _ = image_datasets['train'].class_to_idx
    cat_to_name = {_[i]: i for i in list(_.keys())}

    # Run this to test the data loader
    images, labels = next(iter(data_loader['test']))

    # Loaded pre_trained model
    model = create_model(flag_model=flag_model, flag_pretrained=True)

    # Create classifiers
    for param in model.parameters():
        param.requires_grad = True

    # Configs some last layers for my tasks classification covid
    fc = nn.Sequential(
        OrderedDict([('fc1', nn.Linear(2048, 1000, bias=True)),
                     ('BN1',
                      nn.BatchNorm2d(1000,
                                     eps=1e-05,
                                     momentum=0.1,
                                     affine=True,
                                     track_running_stats=True)),
                     ('dropout1', nn.Dropout(0.7)),
                     ('fc2', nn.Linear(1000, 512)),
                     ('BN2',
                      nn.BatchNorm2d(512,
                                     eps=1e-05,
                                     momentum=0.1,
                                     affine=True,
                                     track_running_stats=True)),
                     ('swish1', Swish()), ('dropout2', nn.Dropout(0.5)),
                     ('fc3', nn.Linear(512, 128)),
                     ('BN3',
                      nn.BatchNorm2d(128,
                                     eps=1e-05,
                                     momentum=0.1,
                                     affine=True,
                                     track_running_stats=True)),
                     ('swish2', Swish()), ('fc4', nn.Linear(128, 3)),
                     ('output', nn.Softmax(dim=1))]))

    # Connect pretrained model with modified classifer layer
    model.fc = fc
    criterion = nn.CrossEntropyLoss()

    # Optimizer
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          nesterov=True,
                          weight_decay=0.0001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
    # Send to GPU
    model.to(device)

    #Set checkpoint
    try:
        checkpoint = torch.load(CHECK_POINT_PATH)
        print("checkpoint loaded")
    except:
        checkpoint = None
        print("checkpoint not found")

    if checkpoint == None:
        CHECK_POINT_PATH = CHECK_POINT_PATH

    model, best_val_loss, best_val_acc = train_model(
        model,
        criterion,
        optimizer,
        scheduler,
        num_epochs=epochs,
        checkpoint=torch.load(CHECK_POINT_PATH))
    torch.save(
        {
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'best_val_loss': best_val_loss,
            'best_val_accuracy': best_val_acc,
            'scheduler_state_dict': scheduler.state_dict(),
        }, CHECK_POINT_PATH)

    # Load checkpoint path
    try:
        checkpoint = torch.load(CHECK_POINT_PATH)
        print("checkpoint loaded")
    except:
        checkpoint = None
        print("checkpoint not found")

    load_model(model=model, checkpoint=checkpoint, path=CHECK_POINT_PATH)

    # Testing
    since = time.time()
    model.eval()
    y_test = []
    y_pred = []
    for images, labels in data_loader['test']:
        images = Variable(images.cuda())
        labels = Variable(labels.cuda())
        outputs = model(images)
        _, predictions = outputs.max(1)

        y_test.append(labels.data.cpu().numpy())
        y_pred.append(predictions.data.cpu().numpy())

    y_test = np.concatenate(y_test)
    y_pred = np.concatenate(y_pred)
    pd.DataFrame({
        'true_label': y_test,
        'predicted_label': y_pred
    }).to_csv('Modified_EfficienNet_B0_Covid-19_Test.csv', index=False)
    time_elapsed = time.time() - since

    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))

    sns.heatmap(confusion_matrix(y_test, y_pred))
    accuracy_score(y_test, y_pred)

    report = classification_report(y_test, y_pred)
    print(report)
'''

###########################################################################################################
## selected the weights learned at epoch #70 for my final model, which can now be loaded again with Keras:

start_new_session()

# load data and generate clusters
np.random.seed(42)
os.chdir('C:/ENEA_CAS_WORK/Taxi_destination_predictions')
data = load_data()
clusters = get_clusters(data.train_labels)

# load the model of the run #1
os.chdir('C:\\ENEA_CAS_WORK\\Taxi_destination_predictions\\cache')
model = create_model(data.metadata, clusters)
model.load_weights('mymodel-001-2.2026.hdf5')

WWW = model.weights
print(WWW[1].shape)
# Out[139]: TensorShape([7, 10]) ....7 feature of each of the 10 lat, lon (first and last coordinates)

processed = process_features(data.validation)
print(len(processed))
print(processed[6].shape)
# Out[155]: (16444, 20)  # lat, lon

# see the exact MEAN LOSS for our custom validation dataset:
validation_predictions = model.predict(process_features(data.validation))
print(validation_predictions.shape)
# (16444, 2)
Exemplo n.º 5
0
import os
#ignore warning
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

from tensorflow.keras import models
from training import create_model, evaluate_model

model = create_model()
model.summary()
evaluate_model(model, 5, 200, 55)
models.save_model(model, "./models/digits/1/")
Exemplo n.º 6
0
 def make_model(self):
     """ Create an untrained model with the instance's parameters """
     if self.model is None:
         self.model = create_model(**self.params)
     return self.model
Exemplo n.º 7
0
if __name__ == '__main__':
    # print("Starting the program...")
    # dp.get_historical_candle_data_binance(pa.PAIR, pa.INITIAL_ANALYSIS_PERIOD, pa.CSV_PATH)
    # print("Data Fetch Done!")

    df = dp.make_dataframe_and_add_y()
    print(df.head())
    print(df.shape)
    print("data frame is ready!")

    scaled_df, scaler = dp.scale_data_for_lstm(df)
    # print(scaled_df.head())
    # print(scaled_df.shape)
    print("data frame is scaled!")

    X_train, y_train, X_test, y_test = dp.create_train_test_sets(
        scaled_df, SEQ_LEN, 0.98)
    print("x-train:", X_train.shape, "y-train:", y_train.shape, "x-test:",
          X_test.shape, "y-test:", X_test.shape)
    print("data split completed!")

    model = tr.create_model(X_train.shape[-1])
    print("model created!")
    trained = tr.train_model(model, X_train, y_train)

    enter = input("press enter to continue...")
    print("training Done!")
    tr.test(model, scaler, X_test, y_test)
    print("test Done!")