Esempio n. 1
0
    def build_model(self):
        if self.args.mean_teacher:
            self.model = baseline(self.args).to(self.device)
            self.model.weight_init()
            self.ema_model = baseline(self.args).to(self.device)
            self.ema_model.weight_init()

        if self.args.pretrain:
            print('Loading  ', self.args.pretrain)
            self.model = torch.load(
                self.args.pretrain,
                map_location=lambda storage, loc: storage).to(self.device)
            checkpoint = torch.load(self.args.pretrain)
            self.model.load_state_dict(checkpoint['model_state_dict'])
            #self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            self.epoch = checkpoint['epoch']
            self.loss = checkpoint['loss']
        else:
            self.model = baseline(self.args).to(self.device)
            self.model.weight_init()
            #self.model = resnet56().to(self.device)
            if self.args.optim is 'adam':
                self.optimizer = optim.Adam(self.model.parameters(),
                                            lr=self.lr)
            else:
                self.optimizer = optim.SGD(self.model.parameters(),
                                           lr=self.lr,
                                           momentum=self.args.momentum,
                                           weight_decay=self.args.weight_decay)

        milestones = []
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
                                                        milestones=milestones,
                                                        gamma=self.args.gamma)

        #if self.args.mean_teacher:
        #self.model = resnet20_cifar().to(self.device)
        #elf.ema_model = resnet20_cifar().to(self.device)
        print(self.model)
        self.data_timer = timer(self.args)
        self.train_timer = timer(self.args)
        self.test_timer = timer(self.args)

        self.L1 = nn.L1Loss()
        self.L2 = nn.MSELoss()
        #self.criterion = nn.CrossEntropyLoss(size_average=False)
        self.criterion = nn.CrossEntropyLoss(size_average=False)
        torch.manual_seed(self.seed)

        if self.GPU_IN_USE:
            torch.cuda.manual_seed(self.seed)
            cudnn.benchmark = True
            self.L1.cuda()
            self.L2.cuda()
            self.criterion.cuda()
def train_baseline():
    #model = load_model("data/models/baseline.h5")

    model = models.baseline()
    model.summary()

    tensorboard  = TensorBoard(log_dir='./baseline1_graph', histogram_freq=0, write_graph=True, write_images=True)
    checkpoint = ModelCheckpoint("data/models/baseline_chkpt.h5", monitor='val_loss', save_best_only=True, verbose=1, mode="min")
    stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=1, verbose=1)

    model.fit_generator(batcher.train_gen(),
                    steps_per_epoch=batcher.train_steps,
                    validation_data=batcher.validation_gen(),
                    validation_steps=batcher.validation_steps,
                    epochs=8,
                    callbacks=[tensorboard, checkpoint, stopping])

    model.save("data/models/baseline1.h5")
    batcher.reset()
    model = load_model("data/models/baseline_chkpt.h5")
    eval = model.evaluate_generator(batcher.validation_gen(), steps=batcher.validation_steps)
    print("BASELINE", eval)

    batcher.reset()
Esempio n. 3
0
x1_cc = tf.placeholder(tf.float32, shape=[1, 2000, 2600, 1])
x2_cc = tf.placeholder(tf.float32, shape=[1, 2000, 2600, 1])

x1_mlo = tf.placeholder(tf.float32, shape=[1, 2000, 2600, 1])
x2_mlo = tf.placeholder(tf.float32, shape=[1, 2000, 2600, 1])

if network_type == "CC":
    x = (x1_cc, x2_cc)
else:
    x = (x1_mlo, x2_mlo)

#x = tf.placeholder(dtype=tf.int32, shape = None, name=None)

y = tf.placeholder(tf.float32, shape=(1, 3))

prediction = model.baseline(x, network_type)

print(prediction)

cost = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))

optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

# Evaluate model node
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=1)
Esempio n. 4
0
    validationset = io_module.Dataset(args.datasetpath,
                                      C.LIST_VALID,
                                      args.featuretype,
                                      framerate=args.framerate,
                                      num_classes=3)

    trainset.storeFeatures()
    validationset.storeFeatures()

    # -----------------
    # Training settings
    # -----------------

    # Load the network
    network = model.baseline(trainset.input_shape, args.capsules,
                             trainset.num_classes,
                             args.receptivefield * args.framerate,
                             args.numbertimestamps)

    # Load the margins
    hit = np.array([args.mplus] * trainset.num_classes)
    miss = np.array([args.mminus] * trainset.num_classes)

    # Load the segmentation and detection losses
    model_STL = losses.SegmentationTSELoss(params=C.K_MATRIX,
                                           hit_radius=hit,
                                           miss_radius=miss)
    model_DSL = losses.DetectionSpottingLoss(lambda_coord=args.lambdacoord,
                                             lambda_noobj=args.lambdanoobj)

    # Load the optimizer
    optimizer = tensorflow.keras.optimizers.Adam(lr=args.learningrate,
Esempio n. 5
0

#Parameters
training_iters = 10
learning_rate = 0.001
batch_size = 8
no_epochs = 40
n_classes = 3

x1_cc = tf.placeholder(tf.float32, shape=[1, 2000, 2600, 1])
x1_mlo = tf.placeholder(tf.float32, shape=[1, 2000, 2600, 1])
x = (x1_cc,x1_mlo)

y = tf.placeholder(tf.float32, shape=(1, 3))

prediction = model.baseline(x, parameters=None)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.99, epsilon=0.1)
optimizer2 = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
train2 = optimizer2.minimize(cost)

init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=1)

with tf.Session() as sess:
    init.run()
    # sess.run(init)
Esempio n. 6
0
X_test = np.concatenate((X1_test,X2_test,X3_test,X4_test,X5_test))
del X1_test,X2_test,X3_test,X4_test,X5_test

y_train = np.concatenate((y1_train,y2_train,y3_train,y4_train,y5_train))
del y1_train,y2_train,y3_train,y4_train,y5_train
y_test = np.concatenate((y1_test,y2_test,y3_test,y4_test,y5_test))
del y1_test,y2_test,y3_test,y4_test,y5_test

#chop out unnecessary data
X_train = X_train[:,4:9,:,50:150]
X_test = X_test[:,4:9,:,50:150]
with tf.device('/cpu:0'):
    if args.model == 'hybrid_LSTM':
        model = hybrid_LSTM(depth=2,conv_size=8,dense_size=512,input_dim=(5,9,100,),dropoutRate=0.2)
    if args.model == 'baseline':
        model = baseline(input_dim=(4500,))
        #baseline is FC layers not CNN, need to flatten
        X_train = np.reshape(X_train,(X_train.shape[0],-1))
        X_test = np.reshape(X_test,(X_test.shape[0],-1))

model.compile(optimizer=SGD(lr=0.002,decay=1E-5),loss=[mean_squared_error_ignore_0,'binary_crossentropy'],metrics=['accuracy'],loss_weights=[0.4,0.6])
parallel_model = multi_gpu_model(model, gpus=2)
parallel_model.__setattr__('callback_model',model)
parallel_model.compile(optimizer=SGD(lr=0.002,decay=1E-5),loss=[mean_squared_error_ignore_0,'binary_crossentropy'],metrics=['accuracy'],loss_weights=[0.4,0.6])


print(model.summary())

#train the model
csv_logger = CSVLogger(out+'.log')
filepath="out"+{epoch:02d}+".hdf5"
Esempio n. 7
0
estimator = util.get_class(model_name)(**params['model'])

estimator.fit(lead_data.X[train], lead_data.y[train])

print 'Validating model'
print '    on ' + str(test.sum()) + ' examples'

y_score = pd.Series(model.y_score(estimator, lead_data.X[test]),
                    index=lead_data.X[test].index)

counts = [.01, .02, .05, .1]
precisions = model.precision(lead_data.y[test], y_score,
                             [.01, .02, .05, .1, .2])

print '    baseline: ' + str(model.baseline(lead_data.y[test]))
print '    precision: ' + str(', '.join('%s=%.2f' % t
                                        for t in zip(counts, precisions)))
print '    auc: ' + str(model.auc(lead_data.y[test], y_score))

if 'output' in params:
    print 'Writing results in ' + params['output']
    if not os.path.exists(params['output']):
        os.makedirs(params['output'])

    with open(os.path.join(params['output'], 'params.yaml'), 'w') as outfile:
        yaml.dump(params_orig, outfile)
    joblib.dump(estimator, os.path.join(params['output'], 'estimator.pkl'))
    y = pd.DataFrame({
        'score': y_score,
        'true': lead_data.y[test]
Esempio n. 8
0
def main(filename):
    print('loading data')
    # Establish database connection
    with open(
            '/data/groups/schools1/mlpolicylab_fall20_schools1/pipeline/db_info.yaml',
            'r') as f:
        db_params = yaml.safe_load(f)['db']

    engine = create_engine('postgres://:@{host}:{port}/{dbname}'.format(
        host=db_params['host'],
        port=db_params['port'],
        dbname=db_params['dbname'],
    ))
    # Load data from database to dataframe
    df = load_data(filename, engine)

    # Split dataframe into train and test data.
    splits, years_reference = train_test_split(df)

    for i, (train_df, test_df) in enumerate(splits):
        print(f'processing split {i}')

        # Explore data for each of the cohort
        explore_data(train_df)

        # Process train and test data seperately
        updated_df_train = process_data(train_df)
        updated_df_test = process_data(test_df)

        # Upload the test and train data to database for future reference and easy retrival
        updated_df_train.columns = [
            col.replace('(', '').replace(')',
                                         '').replace(' ',
                                                     '_').replace('/', '_')
            for col in updated_df_train.columns
        ]
        updated_df_test.columns = [
            col.replace('(', '').replace(')',
                                         '').replace(' ',
                                                     '_').replace('/', '_')
            for col in updated_df_test.columns
        ]

        table_name = timestamp + '_' + str(years_reference[i][1]) + '_' + str(
            years_reference[i][0])

        df_to_db(table_name, 'processed_data', updated_df_train,
                 updated_df_test, engine)

        # Retreive test and train data from database
        processed_train, processed_test = db_to_df(table_name,
                                                   'processed_data', engine)

        updated_df_train_f = processed_train.copy()
        updated_df_train_l = processed_train.copy()
        updated_df_test_f = processed_test.copy()
        updated_df_test_l = processed_test.copy()

        # Create features for test and train data
        features_train, train_student_ids = create_features(updated_df_train_f)
        features_test, test_student_ids = create_features(updated_df_test_f)

        # Create labels
        label_train = create_label(updated_df_train_l)
        label_test = create_label(updated_df_test_l)

        # Concatenating features and labels to save in the database
        train_concat = pd.concat([features_train, label_train],
                                 axis=1,
                                 sort=False)
        test_concat = pd.concat([features_test, label_test],
                                axis=1,
                                sort=False)

        # Calculating baseline rate using grade 9 gpa and base rate
        baseline_precision = baseline(test_concat, years_reference[i])
        base_rate = sum(train_concat.not_graduated) / len(train_concat)

        # Saving and reading from database
        df_to_db(table_name, 'model_data', train_concat, test_concat, engine)
        model_train, model_test = db_to_df(table_name, 'model_data', engine)

        features_train = model_train.iloc[:, :-1]
        label_train = model_train.iloc[:, -1]
        features_test = model_test.iloc[:, :-1]
        label_test = model_test.iloc[:, -1]

        # Build model
        algos = ["Logistic", "SVM", "RandomForest", "DecisionTree"]
        gs_params = {
            "Logistic":
            ParameterGrid({
                'solver': ['lbfgs', 'liblinear', 'saga'],
                'C': [0.001, 0.01, 0.1, 1, 2, 5, 10]
            }),
            "SVM":
            ParameterGrid({
                'C': [0.01, 1, 2, 5, 10],
                'kernel': ['rbf', 'sigmoid']
            }),
            "RandomForest":
            ParameterGrid({
                'n_estimators': [30, 50, 100, 500, 1000, 10000],
                'max_depth': [5, 10, 20, 50],
                'min_samples_split': [5, 10, 15],
                'max_features': ['auto', 'log2', 'sqrt']
            }),
            "DecisionTree":
            ParameterGrid({
                'criterion': ['gini', 'entropy'],
                'max_depth': [5, 10, 20, 50],
                'min_samples_split': [5, 10, 15]
            })
        }

        print('performing model grid search')
        for model_name in algos:
            params = gs_params[model_name]
            for param in params:
                model = build_model(features_train, label_train, model_name,
                                    param)

                # Perform prediction
                pred_proba_train = prediction(features_train, model)
                pred_proba_test = prediction(features_test, model)

                # Convert prediction probabilities to dataframes for further processing
                pred_train_df = pd.DataFrame(pred_proba_train,
                                             columns=['probability'])
                pred_test_df = pd.DataFrame(pred_proba_test,
                                            columns=['probability'])

                # Retreive hyperparameters for processing
                hyperparameters = ' '.join(
                    ["{}: {}".format(key, param[key]) for key in param.keys()])

                pred_train_df['model'], pred_train_df[
                    'params'] = model_name, hyperparameters
                pred_test_df['model'], pred_test_df[
                    'params'] = model_name, hyperparameters

                # Get the prediction scores for test and train data
                predictions_train = pd.concat(
                    [train_student_ids, pred_train_df], axis=1, sort=False)
                predictions_test = pd.concat([test_student_ids, pred_test_df],
                                             axis=1,
                                             sort=False)

                # Calculate the bias metrics
                TPR_gender, FDR_gender = bias_metrics(predictions_test,
                                                      processed_test, 'gender')
                TPR_disadvantagement, FDR_disadvantagement = bias_metrics(
                    predictions_test, processed_test, 'disadvantagement')

                # Load the prediction results to database for creating visualizations
                df_to_db(table_name, 'predictions', predictions_train,
                         predictions_test, engine)

                # Evaluate model
                metric = evaluate_model(features_test,
                                        label_test,
                                        model,
                                        model_name,
                                        baseline_precision,
                                        hyperparameters,
                                        columns=model_train.columns[:-1])

                # saving results
                df_summary = pd.DataFrame({
                    'test_year':
                    years_reference[i][1],
                    'train_since':
                    years_reference[i][0],
                    'algorithm':
                    model_name,
                    'hyperparameters':
                    hyperparameters,
                    'baserate':
                    base_rate,
                    'baseline': [baseline_precision],
                    'precision':
                    metric,
                    'TPR_gender':
                    TPR_gender,
                    'FDR_gender':
                    FDR_gender,
                    'TPR_disadvantagement':
                    TPR_disadvantagement,
                    'FDR_disadvantagement':
                    FDR_disadvantagement
                })
                df_summary.to_sql(name=timestamp,
                                  schema='performance_metrics',
                                  con=engine,
                                  if_exists='append',
                                  index=False)