Пример #1
0
def train(model,
          dataloader,
          criterion,
          device,
          optimizer,
          epochs=1,
          validate_every=None,
          skip_after=None):

    training_losses = {}
    model.train()
    for e in range(epochs):
        training_loss = 0
        normal = False
        for ii, (images, labels) in enumerate(dataloader[0]):
            if skip_after and ii == skip_after:
                break
            images, labels = images.to(device), labels.to(device)
            optimizer.zero_grad()
            loss = criterion(model(images), labels)
            loss.backward()
            optimizer.step()
            training_loss += loss.item()
            if validate_every and (ii + 1) % validate_every == 0:
                accuracy, valid_loss = validation(model, dataloader[1],
                                                  criterion, device)

                helper.print_results(epoch=(str(e + 1) + '/' + str(ii + 1)),
                                     accuracy=format(accuracy, '.3f'),
                                     training_loss=format(
                                         training_loss / (ii + 1), '.3f'),
                                     valid_loss=format(valid_loss, '.3f'))
        else:
            normal = True
            training_loss /= len(dataloader[0])
            #training_losses['epoch-'+e] = training_loss
        if not normal:
            training_loss /= skip_after
        training_losses['epoch-' + str(e + 1)] = format(training_loss, '.3f')
    return training_losses
Пример #2
0
def main():
    
    names=None
    parser = argparse.ArgumentParser()  
    parser.add_argument("input",help="file path of image")
    parser.add_argument("checkpoint",help="directory to load checkpoint",metavar='L')
    parser.add_argument("--top_k","-k",dest='top_k',metavar='K',type=int,default="1",help="Top K class (default:1)")
    parser.add_argument("--gpu",action="store_true",help="Use GPU for inference")                   
    parser.add_argument("--category_names","-c", metavar='cat_names',dest="label_mappings", \
                        help="file name describing mapping of categories to real names(default:cat_to_name.json)")
   
    args = parser.parse_args()
        
    if not os.path.exists(args.checkpoint):
        print("checkpoint filepath doesn't exists.Please try again")
        sys.exit()
    if not os.path.exists(args.input):
        print("Image path doesn't exists.Please try again")  
        sys.exit()
    if args.label_mappings:
        if not os.path.exists(args.label_mappings):
             print("mapping file doesn't exist")
             sys.exit()
            
    device = torch.device('cuda' if torch.cuda.is_available() and args.gpu else 'cpu') 
    model = image_classifier.get_model(args.checkpoint)
    model.to(device)
    
    probabalities,class_names = predict(args.input,model,device,k=args.top_k)
    if args.label_mappings:
        category_dict = helper.label_mapping(args.label_mappings)
        names = [ category_dict[item] for item in class_names]
    category_names =  names if names else class_names
    zipped_list = zip(category_names,probabalities[0])
    for name,probability in zipped_list:
                #print(" name {} ".format(name) ,"Probability {}".format(probability))
        helper.print_results(name=name,probability=format(probability,'<45.3f'))
Пример #3
0
                    'solver': 'lbfgs'
                }),
    'Survived - Decision_tree':
    build_model(decision_tree_fn,
                'Survived',
                FEATURES,
                titanic_df,
                options={
                    'criterion': 'gini',
                    'max_depth': 3,
                    'min_samples_split': 2
                })
}

# Running code with default values
plt = print_results(result_dict)
#plt.show()
plt.savefig(fig_path + 'results.png')

title = "Learning Curves for Decision Tree"
plt = plot_learning_curve(DecisionTreeClassifier(criterion='gini',
                                                 max_depth=3,
                                                 min_samples_split=2),
                          'Survived',
                          FEATURES,
                          titanic_df,
                          title,
                          ylim=(0.4, 1.01))
#plt.show()
plt.savefig(fig_path + 'learning_curve_dt.png')
Пример #4
0
        verbose=0)
    ann_model.save_weights(path + 'models/' + str(data_interval) + 'min/ann_' +
                           stock + '.h5')
    # ann_model.load_weights(path+'models/'+str(data_interval)+'min/ann_'+stock+'.h5')

    # ================
    # MODEL EVALUATION
    # ================

    print('Stock name: ', stock)

    # SUPPORT VECTOR REGRESSOR
    y_pred_svr = inverse_normalize(data=svr_model.predict(X_test),
                                   m=y_mean,
                                   s=y_std)
    print_results('SVR', stock, data_interval, y_test, y_pred_svr, path=path)

    # RANDOM FOREST
    y_pred_rf = inverse_normalize(data=rf_model.predict(X_test),
                                  m=y_mean,
                                  s=y_std)
    print_results('RandomForest',
                  stock,
                  data_interval,
                  y_test,
                  y_pred_rf,
                  path=path)

    # ADABOOST
    y_pred_adb = inverse_normalize(data=adb_model.predict(X_test),
                                   m=y_mean,