def evaluate(args, model_path, configs):
    result = open(os.path.join(args.method, 'results.csv'), 'w')
    result.write('id,categories\n')

    # initialize data
    feats = sk_read_eval('origin_data/test.csv', normal=False)

    # initialize model
    if args.method == 'mlp':
        model = CifarClassifer(num_classes=configs['num_classes'])
    elif args.method == 'cnn':
        model = CNNCifarClassifer(num_classes=configs['num_classes'])
    model.load_state_dict(torch.load(model_path))
    model = model.eval()
    if configs['gpu']:
        model = model.cuda()
    print('======= Loaded model from %s =======' % model_path)

    for batch_i, feat in enumerate(feats):
        feat = torch.from_numpy(feat).unsqueeze(0)

        if configs['gpu']:
            feat = feat.cuda()

        probs = model(feat)
        prob = F.softmax(probs, dim=-1)
        if configs['gpu']:
            pred = prob.max(1, keepdim=True)[1].cpu().numpy()
        else:
            pred = prob.max(1, keepdim=True)[1].numpy()

        result.write('%s,%s\n' % (str(batch_i), str(pred[0][0])))

    result.close()
Esempio n. 2
0
def demo():
    mymodel = nn.model()
    mymodel.name = "test"
    mymodel.add_prop([nn.fc(2, 100)])
    mymodel.add_prop([nn.node(100)])
    mymodel.add_prop([nn.fc(100, 2)])
    mymodel.add_prop([nn.node_out(2)])

    mymodel.train(pat_train(), pat_eval(), 10)
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(description='Train a car to drive itself')
    parser.add_argument('--data-base-path',
                        type=str,
                        default='./data1',
                        help='Path to image directory and driving log')

    args = parser.parse_args()

    # Instantiate the pipeline
    pipeline = Pipeline(model=model(), base_path=args.data_base_path, epochs=2)

    # Feed driving log data into the pipeline
    pipeline.import_data()
    # Start training
    pipeline.run()
Esempio n. 4
0
import numpy as np
import nn

Xtr = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]).T
Ytr = np.array([[0], [1], [1], [0]]).T

ld = [2, 1]

lrate = 10

num_iterations = 10000

#print(Ytr.shape[1])

parameters = nn.model(X=Xtr,
                      Y=Ytr,
                      nn_architecture=ld,
                      start_learning_rate=lrate,
                      num_iterations=num_iterations,
                      learning_decay=0.01)

#parameters = nn.initialize_parameters(ld)

#print(nn.forward_propagation(Xtr, parameters)[0])

nn.predict([[0], [0]], parameters)
nn.predict([[1], [0]], parameters)
nn.predict([[0], [1]], parameters)
nn.predict([[1], [1]], parameters)
        if 'positive' in image_file.lower():
            y[0, i] = 1
        elif 'negative' in image_file.lower():
            y[0, i] = 0
    return X, y
train_set_x, train_set_y = prepare_data(train_images)
test_set_x, test_set_y = prepare_data(test_images)

train_set_x_flatten = train_set_x.reshape(train_set_x.shape[0], ROWS*COLS*CHANNELS).T
test_set_x_flatten = test_set_x.reshape(test_set_x.shape[0], -1).T
train_set_x = train_set_x_flatten/255
test_set_x = test_set_x_flatten/255



NN = nn.model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 10000, learning_rate = 0.003, print_loss= True)
im = cv2.imread('test/dog1.jpg',0)
im = cv2.resize(im,(ROWS,COLS))
test= im.reshape(1, ROWS*COLS).T
pred = nn.predict(NN["w"], NN["b"], test)
print(pred)
learning_rates = [0.001,0.01,0.005]
models = {}
for i in learning_rates:
    print("learning rate is: ",i)
    models[i] = nn.model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 10000, learning_rate = i, print_loss= True)
    print("---------------------------------------------------------")


for i in learning_rates:
    plt.plot(np.squeeze(models[i]["loss"]), label= str(models[i]["learning_rate"]))
# Loading the dataset
print("Loading Training Data...")
X_train, Y_train = ld.load_dataset("TrainData.csv")
print("Loading CrossValidation Data...")
X_cv, Y_cv = ld.load_dataset("CrossValidationData.csv")
print("Data loading compeleted.")
print("number of training examples = " + str(X_train.shape[1]))
print("number of cv examples = " + str(X_cv.shape[1]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_cv shape: " + str(X_cv.shape))
print("Y_cv shape: " + str(Y_cv.shape))

# Train
print("Start Training...")
parameters = nn.model(X_train, Y_train, X_cv, Y_cv)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']        
np.savetxt("parameters/W1.txt",W1)
np.savetxt("parameters/b1.txt",b1)
np.savetxt("parameters/W2.txt",W2)
np.savetxt("parameters/b2.txt",b2)
np.savetxt("parameters/W3.txt",W3)
np.savetxt("parameters/b3.txt",b3)
print("Parameters have been saved!")
Esempio n. 7
0
    def save_screenshot(selected_counties='Alameda, CA',
                        selected_features=['recent_deaths']):
        print('selected_counties', selected_counties, 'selected_features',
              selected_features)
        # print(selected_features)
        # print(isinstance(selected_counties, list))
        cols = [
            '#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4',
            '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff',
            '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1',
            '#000075', '#808080', '#ffffff', '#000000'
        ]
        fig = make_subplots(rows=1,
                            cols=2,
                            subplot_titles=("Cases", "Deaths",
                                            "Cases (Aligned to first case)",
                                            "Deaths (Aligned to first death)"))
        keys = ['cases', 'deaths']  #, 'aligned_cases', 'aligned_deaths']
        date1 = viz_map_utils.date_in_data(df)
        #     print(date1)
        date2 = [i for i in range(len(date1))]

        # if multiple counties are selected, aka manual select
        if isinstance(selected_counties, list):
            for i in range(2):
                key = keys[i]
                for index, pos in enumerate(selected_counties):
                    row = df[df['pos'] == pos]
                    if i < 2:
                        dates = date1
                    else:
                        dates = date2
                    fig.add_trace(go.Scatter(x=dates,
                                             y=row[key].to_list()[0],
                                             line=dict(color=cols[index]),
                                             name=row['CountyName'].values[0] +
                                             ', ' + row['StateName'].values[0],
                                             showlegend=i == 0),
                                  row=i // 2 + 1,
                                  col=i - (i // 2) * 2 + 1)
        # if a single county is selected, aka nearest neighbors
        elif selected_counties is not None:
            # plot selected county
            for i in range(2):
                key = keys[i]
                row = df[df['pos'] == selected_counties]
                if i < 2:
                    dates = date1
                else:
                    dates = date2
                fig.add_trace(go.Scatter(x=dates,
                                         y=row[key].to_list()[0],
                                         line=dict(color=cols[0]),
                                         name=row['CountyName'].values[0] +
                                         ', ' + row['StateName'].values[0],
                                         showlegend=i == 0),
                              row=i // 2 + 1,
                              col=i - (i // 2) * 2 + 1)
            # if features are selected, run nearest neighbors and plot results
            if selected_features is not None:
                if len(selected_features) != 0:
                    neighs = nn.model(df, selected_features, selected_counties)
                    # print(neighs)
                    for i in range(2):
                        key = keys[i]
                        for index, pos in enumerate(neighs):
                            row = df[df['pos'] == pos]
                            if i < 2:
                                dates = date1
                            else:
                                dates = date2
                            # print(row['CountyName'].values[0] +', ' + row['StateName'].values[0])
                            fig.add_trace(
                                go.Scatter(x=dates,
                                           y=row[key].to_list()[0],
                                           line=dict(color=cols[index + 1]),
                                           name=row['CountyName'].values[0] +
                                           ', ' + row['StateName'].values[0],
                                           showlegend=i == 0),
                                row=i // 2 + 1,
                                col=i - (i // 2) * 2 + 1)

        fig.update_layout(height=450,
                          width=900,
                          template='plotly_dark',
                          xaxis_title="Time",
                          yaxis_title="Count",
                          yaxis_showgrid=True)

        # edit axis labels
        fig['layout']['xaxis']['title'] = 'Date'
        fig['layout']['xaxis2']['title'] = 'Date'
        #         fig['layout']['xaxis3']['title']='Days'
        #         fig['layout']['xaxis4']['title']='Days'
        fig['layout']['yaxis']['title'] = 'Cases'
        fig['layout']['yaxis2']['title'] = 'Deaths'
        #         fig['layout']['yaxis3']['title']='Cases'
        #         fig['layout']['yaxis4']['title']='Deaths'
        fig.write_image('./matching.svg')
        fig.write_image(oj(assets_dir, 'matching.svg'))
Esempio n. 8
0
def update_graph(selected_counties, selected_features):
    print(selected_counties)
    print(selected_features)
    print(isinstance(selected_counties, list))
    cols = ['#e6194b', '#3cb44b', '#ffe119', '#4363d8', 
        '#f58231', '#911eb4', '#46f0f0', '#f032e6', 
        '#bcf60c', '#fabebe', '#008080', '#e6beff', 
        '#9a6324', '#fffac8', '#800000', '#aaffc3', 
        '#808000', '#ffd8b1', '#000075', '#808080', 
        '#ffffff', '#000000']
    fig = make_subplots(rows=2, cols=2,
                        subplot_titles=("Cases", "Deaths", "Cases (Aligned to first case)", "Deaths (Aligned to first death)"))
    keys = ['cases', 'deaths', 'aligned_cases', 'aligned_deaths']
    import sys
    sys.path.append("/home/ubuntu/new_uploader/viz")
    #sys.path.append("/usr/local/google/home/danqingwang/covid19-severity-prediction/viz")
    import viz_map_utils
    date1 = viz_map_utils.date_in_data(df)
#     print(date1)
    date2 = [i for i in range(len(date1))]
    
    # if multiple counties are selected, aka manual select
    if isinstance(selected_counties, list):
        for i in range(4):
            key = keys[i]
            for index, pos in enumerate(selected_counties):
                row = df[df['pos'] == pos]
                if i < 2:
                    dates = date1
                else:
                    dates = date2
                fig.add_trace(go.Scatter(x=dates,
                            y=row[key].to_list()[0],
                            line=dict(color=cols[index]),
                            name = row['CountyName'].values[0] +', ' + row['StateName'].values[0],
                            showlegend = i == 0
                            ),row = i //2 + 1, col = i - (i// 2)*2 + 1) 
    # if a single county is selected, aka nearest neighbors
    elif selected_counties is not None:
        # plot selected county
        for i in range(4):
            key = keys[i]
            row = df[df['pos'] == selected_counties]
            if i < 2:
                dates = date1
            else:
                dates = date2
            fig.add_trace(go.Scatter(x=dates,
                        y=row[key].to_list()[0],
                        line=dict(color=cols[0]),
                        name = row['CountyName'].values[0] +', ' + row['StateName'].values[0],
                        showlegend = i == 0
                        ),row = i //2 + 1, col = i - (i// 2)*2 + 1)
        # if features are selected, run nearest neighbors and plot results    
        if selected_features is not None:
            if len(selected_features)!= 0:
                neighs = nn.model(df, selected_features, selected_counties)
                print(neighs)
                for i in range(4):
                    key = keys[i]
                    for index, pos in enumerate(neighs):
                        row = df[df['pos'] == pos]
                        if i < 2:
                            dates = date1
                        else:
                            dates = date2
                        print(row['CountyName'].values[0] +', ' + row['StateName'].values[0])
                        fig.add_trace(go.Scatter(x=dates,
                                    y=row[key].to_list()[0],
                                    line=dict(color=cols[index+1]),
                                    name = row['CountyName'].values[0] +', ' + row['StateName'].values[0],
                                    showlegend = i == 0
                                    ),row = i //2 + 1, col = i - (i// 2)*2 + 1)
                
    fig.update_layout(height=1000,                         
                      template='plotly_dark',
                      xaxis_title="Time",
                      yaxis_title="Count",
                      yaxis_showgrid=True)

    # edit axis labels
    fig['layout']['xaxis']['title']='Date'
    fig['layout']['xaxis2']['title']='Date'
    fig['layout']['xaxis3']['title']='Days'
    fig['layout']['xaxis4']['title']='Days'
    fig['layout']['yaxis']['title']='Cases'
    fig['layout']['yaxis2']['title']='Deaths'
    fig['layout']['yaxis3']['title']='Cases'
    fig['layout']['yaxis4']['title']='Deaths'                                
    return fig
def bagging(args, model_path_list, configs):
    result = open(os.path.join(args.method, 'results.csv'), 'w')
    result.write('id,categories\n')

    # initialize data
    feats = sk_read_eval('origin_data/test.csv', normal=False)

    # initialize model
    model_list = []
    for sub_model_path, sub_model_type in model_path_list:
        if sub_model_type == 'cnn':
            model = CNNCifarClassifer(num_classes=configs['num_classes'])
        elif sub_model_type == 'dense':
            model = DLCifarClassifer(num_classes=configs['num_classes'])
        elif sub_model_type == 'pcd':
            model = PointNetfeat(num_classes=configs['num_classes'])
        elif sub_model_type == 'mlp':
            model = CifarClassifer(num_classes=configs['num_classes'])

        model.load_state_dict(torch.load(sub_model_path))
        if configs['gpu']:
            model = model.cuda()
        model = model.eval()
        model_list.append(model)

        print('======= Loaded model from %s =======' % sub_model_path)

    # start to inference
    for batch_i, feat in enumerate(feats):
        feat = torch.from_numpy(feat).unsqueeze(0)

        if configs['gpu']:
            feat = feat.cuda()

        max_probs_list = []
        prob_list = []
        pred_list = []

        for model in model_list:
            probs = model(feat)
            prob = F.softmax(probs, dim=-1)

            if configs['gpu']:
                pred = prob.max(1, keepdim=True)[1].cpu().numpy()

                prob_list.append(prob.cpu().detach().numpy()[0])
                max_probs_list.append(prob.max(1, keepdim=True)[0].cpu().detach().numpy()[0][0])
            else:
                pred = prob.max(1, keepdim=True)[1].numpy()

                prob_list.append(prob.detach().numpy()[0])
                max_probs_list.append(prob.max(1, keepdim=True)[0].detach().numpy()[0][0])

            pred_list.append(pred)

        # choose the prediction
        max_probs_list = np.array(max_probs_list)
        if configs['bagging_mode'] == 'hard':
            max_pred = pred_list[np.argmax(max_probs_list)][0][0]
        elif configs['bagging_mode'] == 'soft':
            prob_sum = np.zeros((configs['num_classes']))
            for sub_prob in prob_list:
                prob_sum += sub_prob
            max_pred = np.argmax(prob_sum)

        result.write('%s,%s\n' % (str(batch_i), str(max_pred)))

    result.close()
def train(args, configs):
    torch.manual_seed(args.seed)
    if configs['separate'] or not os.path.exists('splited_data/train.csv'):
        split_train_test('origin_data/train.csv', 'splited_data', args.seed)

    # initialize logger
    if not os.path.isdir(os.path.join(args.log_dir, args.method)):
        os.makedirs(os.path.join(args.log_dir, args.method))
    log_tr = open(os.path.join(args.log_dir, args.method, 'train_log.txt'), 'w')
    log_t = open(os.path.join(args.log_dir, args.method, 'test_log.txt'), 'w')

    # initialize data loader
    dataset_tr = Cifar10(args.data_dir, 'train')
    dataset_t = Cifar10(args.data_dir, 'test')
    data_loader_tr = DataLoader(dataset_tr, batch_size=configs['batch_size'],
                            pin_memory=True, num_workers=configs['workers'], shuffle=True)
    data_loader_t = DataLoader(dataset_t, batch_size=1,
                            pin_memory=True, num_workers=configs['workers'], shuffle=True)

    # initialize model & optimizer & loss
    if args.method == 'mlp':
        model = CifarClassifer(num_classes=configs['num_classes'])
    elif args.method == 'cnn':
        model = CNNCifarClassifer(num_classes=configs['num_classes'])
    optimizer = optim.Adam(model.parameters(), lr=configs['lr'], weight_decay=configs['weight_decay'])
    criterion = nn.CrossEntropyLoss()
    if configs['gpu']:
        model = model.cuda()
        criterion = criterion.cuda()

    max_accuracy = 0
    loss_sum = 0

    for i in range(configs['epoch']):
        # training phase
        print('======= Training =======')
        model = model.train()

        for batch_i, (feat, lbl) in enumerate(data_loader_tr):
            if configs['gpu']:
                feat = feat.cuda()
                lbl = lbl.cuda()

            probs = model(feat)
            loss = criterion(probs, lbl)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if configs['gpu']:
                loss = loss.detach().cpu().numpy()
            else:
                loss = loss.detach().numpy()
            loss_sum += loss

            if batch_i % configs['loss_interval'] == 0:
                log_info = '[Epoch: %d], [training loss: %0.8f]' % (i, loss_sum)
                print(log_info)
                log_tr.write(log_info + '\n')
                log_tr.flush()

                loss_sum = 0

        # testing phase
        print('======= Testing =======')
        model = model.eval()

        correct = 0
        for batch_i, (feat, lbl) in enumerate(data_loader_t):
            if configs['gpu']:
                feat = feat.cuda()

            probs = model(feat)
            prob = F.softmax(probs, dim=-1)
            if configs['gpu']:
                pred = prob.max(1, keepdim=True)[1].cpu().numpy()
            else:
                pred = prob.max(1, keepdim=True)[1].numpy()
            correct += pred[0][0] == lbl.numpy()[0]

        accuracy = correct / len(data_loader_t)
        log_info = '[Epoch: %d], [test accuracy: %0.8f]' % (i, accuracy)
        print(log_info)
        log_t.write(log_info + '\n')
        log_t.flush()

        # save the model
        if i % configs['save_interval'] == 0:
            print('saving model')
            if not os.path.isdir(os.path.join(args.model_dir, args.method)):
                os.makedirs(os.path.join(args.model_dir, args.method))
            torch.save(model.state_dict(), os.path.join(args.model_dir, args.method, 'epoch_%d.pth' % i))

        if accuracy > max_accuracy:
            torch.save(model.state_dict(), os.path.join(args.model_dir, args.method, 'best_%0.4f.pth' % accuracy))
            max_accuracy = accuracy

    torch.save(model.state_dict(), os.path.join(args.model_dir, args.method, 'final.pth'))
    log_tr.close()
    log_t.close()