Example #1
0
def main(args):
    dataset = LJSpeechDataset(args.train_dir, args.train_csv,
                              text_transformer=text_to_sequence,
                              audio_transformer=spectrogram)
    print(len(dataset))
    batch_sampler = RandomBucketBatchSampler(dataset,
                                             batch_size=args.batch_size,
                                             drop_last=False)
    collate_fn = TextAudioCollate()
    data_loader = DataLoader(dataset, batch_sampler=batch_sampler,
                            collate_fn=collate_fn, num_workers=1)
    # Build model

    print(next(iter(data_loader)))
    print("{} {} {}".format(hparams.num_chars, hparams.padding_idx, hparams.feature_dim))
    model = FeaturePredictNet(hparams.num_chars, hparams.padding_idx,
                              hparams.feature_dim)
    # print(model)
    if args.use_cuda:
        # model = torch.nn.DataParallel(model)
        model.cuda()
    print(model)
    # Build criterion
    criterion = FeaturePredictNetLoss()
    # Build optimizer
    optimizier = torch.optim.Adam(model.parameters(), lr=args.lr,
                                  weight_decay=args.l2,
                                  betas=(0.9, 0.999), eps=1e-6)

    solver = Solver(data_loader, model, criterion, optimizier, args)
    solver.train()
Example #2
0
def main(cfg):
    # dynamic import using --model argument
    net = importlib.import_module("model.{}".format(cfg.model)).Net
    print(json.dumps(vars(cfg), indent=4, sort_keys=True))

    solver = Solver(net, cfg)
    solver.fit()
Example #3
0
    def solve(self):

        self.fluid_system.params.initial_P = 1e6 + self.fluid_system.params.P_system_min()
        self.fluid_system.params.initial_T = 60.
        initial_T = self.fluid_system.params.initial_T

        dT_inj = np.nan
        dT_loops = 1
        solv = Solver()
        while np.isnan(dT_inj) or abs(dT_inj) >= 0.5:

            initial_state = FluidState.getStateFromPT(self.fluid_system.params.initial_P, initial_T, self.fluid_system.params.working_fluid)
            system_state = self.fluid_system.solve(initial_state)

            dT_inj = initial_T - system_state.pp.state.T_C

            initial_T = solv.addDataAndEstimate(initial_T, dT_inj)

            if np.isnan(initial_T):
                initial_T = system_state.pp.state.T_C

            # add lower bounds
            if initial_T < 1:
                initial_T = 1

            # add upper bounds
            T_prod_surface_C = system_state.pump.well.state.T_C
            if initial_T > T_prod_surface_C and initial_T > 50:
                initial_T = T_prod_surface_C

            if dT_loops >  10:
                print('GenGeo::Warning:FluidSystemWaterSolver:dT_loops is large: %s'%dT_loops)
            dT_loops += 1

        # check if silica precipitation is allowed
        if self.fluid_system.params.silica_precipitation:
            # prevent silica precipitation by DiPippo 1985
            maxSurface_dT = 89
            if (T_prod_surface_C - initial_T) > maxSurface_dT:
                raise Exception('GenGeo::FluidSystemWaterSolver:ExceedsMaxTemperatureDecrease - '
                            'Exceeds Max Temp Decrease of  %.3f C to prevent silica precipitation!'%(maxSurface_dT))
        else:
            maxSurface_dT = np.inf

        return system_state
Example #4
0
def main(env, args):
    if 'RoboschoolHalfCheetah' in args.env_name or 'RoboschoolWalker2d' in args.env_name:
        solver = SolverGait(args, env, project_path)
    else:
        solver = Solver(args, env, project_path)
    if not args.eval_only:
        solver.train()
    else:
        solver.eval_only()
def main(args):
    dataset_json = args.json
    tuning_layers = []
    if args.tune:
        tuning_layers = args.tune.split(",")
        print tuning_layers
    vgg_path = args.vgg
    max_frames = args.max_frames
    print "Loading VGGNet model. Model type: %s" % args.model

    if args.model == 'average':
        model = AverageFrameModel(vgg_path,
                                  output_neurons=4,
                                  tuning_layers=tuning_layers)
    elif args.model == 'late':
        model = LateFusionModel(vgg_path, output_neurons=4)
    else:
        raise ValueError("Model type must be one of 'average' and 'late'")

    print "Reading data"
    start = datetime.datetime.now()

    tvt_split = [args.train, args.val, args.test]
    data = read_dataset_tvt(dataset_json, sample_probability=0.5, mode='temporal', max_frames=max_frames,
                            mean_value=model.mean_bgr, tvt_split=tvt_split, ids_file=args.ids)
    print "Training data shape:", data["train_X"].shape
    print "Test data shape:", data["test_X"].shape
    print "Validation data shape:", data["val_X"].shape

    end = datetime.datetime.now()
    print "Read data in %d seconds" % (end-start).seconds

    print "---- Training parameters summary -----"
    param_summary(data["train_X"].shape[0], args)
    batch_size = min(args.batch_size, data["train_X"].shape[0])
    print "--------------------------------------"
    solver = Solver(model,
                    data["train_X"], data["train_y"],
                    val_X=data["val_X"], val_y=data["val_y"],
                    model_type=args.model,
                    num_epochs=args.num_epochs,
                    batch_size=batch_size,
                    output_lr=args.output_lr,
                    tune_lr=args.tune_lr,
                    tuning_layers=tuning_layers,
                    reg=args.reg)

    solver.train()
    test_predictions, scores = solver.predict(data["test_X"], data["test_y"])
    write_predictions(args.model, args.ids, test_predictions, scores, args.out)
    print "--------------------------------------"
    print "---- Training parameters summary -----"
    param_summary(data["train_X"].shape[0], args)

    print "---- Loss and accuracy history ----"
    print "Training loss history"
    print solver.train_loss_history
    print "Training accuracy history"
    print solver.train_acc_history
    print "Validation accuracy history"
    print solver.val_acc_history
Example #6
0
def eval_affinitynet(data, res):
    model = MultiviewAttention(in_dim=in_dim, hidden_dims=[in_dim], k=10, graph=None,
                               out_indices=None,
                               feature_subset=None, kernel='gaussian', nonlinearity_1=None,
                               nonlinearity_2=None, use_previous_graph=False,
                               group_index=None, merge=None,
                               merge_type='affine', reset_graph_every_forward=False,
                               no_feature_transformation=True, rescale=True, merge_dim=2)

    if x_var.numel() < 10 ** 6:
        title = 'raw data PCA'
        plot_scatter(x_var, title=title, colors=y, folder=save_folder, save_fig=save_fig,
                     size=figsize)

        y_pred = model(x_var)
        title = 'before training output PCA'
        plot_scatter(y_pred, title=title, colors=y, folder=save_folder, save_fig=save_fig,
                     size=figsize)

    title = 'feature weight distribution: before training'
    plot_feature_weight_affinitynet(model.layers, title)

    model = nn.Sequential(model, DenseLinear(in_dim, hidden_dims + [num_cls]))

    loss_fn = nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

    solver = Solver(model, data, optimizer, loss_fn)

    loss_train, acc_train, loss_val, acc_val = solver.train_eval(
        num_iter=num_iter, batch_size=batch_size, X=None, y=None, X_val=None, y_val=None,
        X_test=None, y_test=None, eval_test=False, balanced_sample=True)

    plot_result(loss_train, acc_train, loss_val, acc_val, avg='avg')
    plot_result(loss_train, acc_train, loss_val, acc_val, avg='batch')

    title = 'Feature weights after training'
    plot_feature_weight_affinitynet(model[0].layers, title)

    acc, nmi, confusion_mat, f1_score = visualize_val(
        data['X_train'], data['y_train'], solver, batch_size=batch_size,
        title='affinitynet X_train', topk=1, save_fig=save_fig, save_folder=save_folder)
    res[train_portion]['train']['acc'].append(acc)
    res[train_portion]['train']['nmi'].append(nmi)
    res[train_portion]['train']['f1_score'].append(f1_score)
    res[train_portion]['train']['confusion_mat'].append(confusion_mat)

    acc, nmi, confusion_mat, f1_score = visualize_val(
        data['X_val'], data['y_val'], solver, batch_size=batch_size, title='affinitynet X_val', topk=1,
        save_fig=save_fig, save_folder=save_folder)
    res[train_portion]['test']['acc'].append(acc)
    res[train_portion]['test']['nmi'].append(nmi)
    res[train_portion]['test']['f1_score'].append(f1_score)
    res[train_portion]['test']['confusion_mat'].append(confusion_mat)

    cnt = 0
    for n, p in model.named_parameters():
        print(n, p.numel())
        cnt += p.numel()
    print('total param:{0}'.format(cnt))

    model = DenseLinear(in_dim, hidden_dims + [num_cls])

    cnt = 0
    for n, p in model.named_parameters():
        print(n, p.numel())
        cnt += p.numel()
    print('total param:{0}'.format(cnt))

    title = 'Feature weights before training Linear'
    plot_feature_weight_linear(model, title)

    # set a smaller learning rate for DenseLinear
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=weight_decay)

    solver = Solver(model, data, optimizer, loss_fn)

    loss_train, acc_train, loss_val, acc_val = solver.train_eval(
        num_iter=num_iter, batch_size=batch_size, X=None, y=None, X_val=None, y_val=None,
        X_test=None, y_test=None, eval_test=False, balanced_sample=True)

    plot_result(loss_train, acc_train, loss_val, acc_val, avg='avg', title_prefix='training-linear')
    plot_result(loss_train, acc_train, loss_val, acc_val, avg='batch', title_prefix='training-linear')

    title = 'Feature weights after training Linear'
    plot_feature_weight_linear(model, title)

    acc, nmi, confusion_mat, f1_score = visualize_val(
        data['X_train'], data['y_train'], solver, batch_size=batch_size,
        title='linear X_train', topk=1, save_fig=save_fig, save_folder=save_folder)
    res[train_portion]['train']['acc'].append(acc)
    res[train_portion]['train']['nmi'].append(nmi)
    res[train_portion]['train']['f1_score'].append(f1_score)
    res[train_portion]['train']['confusion_mat'].append(confusion_mat)

    acc, nmi, confusion_mat, f1_score = visualize_val(
        data['X_val'], data['y_val'], solver, batch_size=batch_size, title='linear X_val', topk=1,
        save_fig=save_fig, save_folder=save_folder)
    res[train_portion]['test']['acc'].append(acc)
    res[train_portion]['test']['nmi'].append(nmi)
    res[train_portion]['test']['f1_score'].append(f1_score)
    res[train_portion]['test']['confusion_mat'].append(confusion_mat)
Example #7
0
def start_solver(network, spec, seed, best_solution):
    '''
    function to start new process
    '''
    solver = Solver(network, spec, seed, best_solution)
    solver.solve()
Example #8
0
}

# configuration of overfitting
model = FullyConnectedNet([20, 30],
                          num_classes=10,
                          dropout=0,
                          reg=0.5,
                          weight_scale=1e-2,
                          dtype=np.float32,
                          seed=42)

solver = Solver(model,
                data,
                update_rule='sgd',
                optim_config={
                    'learning_rate': 0.0033,
                },
                lr_decay=1,
                num_epochs=20,
                batch_size=10,
                print_every=100)

solver.train()
with open('raw_data_overfit.csv', 'w', newline='') as csvfile:
    fieldname = [
        'overfit_train_acc', 'overfit_val_acc', 'overfit_loss',
        'overfit_loss_val'
    ]
    writer = csv.DictWriter(csvfile, fieldnames=fieldname)

    for i in range(len(solver.train_acc_history)):
        writer.writerow({
Example #9
0
    def solve(self, initial_state):

        results = FluidSystemWaterOutput()

        injection_state = FluidState.getStateFromPT(initial_state.P_Pa,
                                                    initial_state.T_C,
                                                    initial_state.fluid)
        # Find necessary injection pressure
        dP_downhole = np.nan
        dP_solver = Solver()
        dP_loops = 1
        stop = False

        while np.isnan(dP_downhole) or abs(dP_downhole) > 10e3:
            results.injection_well = self.injection_well.solve(injection_state)
            results.reservoir = self.reservoir.solve(
                results.injection_well.state)

            # if already at P_system_min, stop looping
            if stop:
                break

            # find downhole pressure difference (negative means overpressure)
            dP_downhole = self.params.P_reservoir(
            ) - results.reservoir.state.P_Pa
            injection_state.P_Pa = dP_solver.addDataAndEstimate(
                injection_state.P_Pa, dP_downhole)

            if np.isnan(injection_state.P_Pa):
                injection_state.P_Pa = initial_state.P_Pa + dP_downhole

            if dP_loops > 10:
                print(
                    'GenGeo::Warning:FluidSystemWater:dP_loops is large: %s' %
                    dP_loops)
            dP_loops += 1

            # Set Limits
            if injection_state.P_Pa < self.params.P_system_min():
                # can't be below this temp or fluid will flash
                injection_state.P_Pa = self.params.P_system_min()
                # switch stop to run injection well and reservoir once more
                stop = True

        if results.reservoir.state.P_Pa >= self.params.P_reservoir_max():
            raise Exception(
                'GenGeo::FluidSystemWater:ExceedsMaxReservoirPressure - '
                'Exceeds Max Reservoir Pressure of %.3f MPa!' %
                (self.params.P_reservoir_max() / 1e6))

        # Production Well (Lower, to production pump)
        results.production_well1 = self.production_well1.solve(
            results.reservoir.state)
        # Upper half of production well
        results.pump = self.pump.solve(results.production_well1.state,
                                       injection_state.P_Pa)

        # Subtract surface frictional losses between production wellhead and surface plant
        ff = frictionFactor(self.params.well_radius,
                            results.pump.well.state.P_Pa,
                            results.pump.well.state.h_Jkg,
                            self.params.m_dot_IP, self.params.working_fluid,
                            self.params.epsilon)
        if self.params.has_surface_gathering_system == True:
            dP_surfacePipes = ff * self.params.well_spacing / (
                self.params.well_radius * 2
            )**5 * 8 * self.params.m_dot_IP**2 / results.pump.well.state.rho_kgm3 / np.pi**2
        else:
            dP_surfacePipes = 0

        results.surface_plant_inlet = FluidState.getStateFromPh(
            results.pump.well.state.P_Pa - dP_surfacePipes,
            results.pump.well.state.h_Jkg, self.params.working_fluid)

        results.pp = self.pp.solve(results.surface_plant_inlet)
        return results