Exemplo n.º 1
0
def train(fname, out_fname):
    """ Train and save CNN model on ShipsNet dataset

    Args:
        fname (str): Path to ShipsNet JSON dataset
        out_fname (str): Path to output Tensorflow model file (.tfl)
    """

    # Load shipsnet data
    f = open(fname)
    shipsnet = json.load(f)
    f.close()

    # Preprocess image data and labels for input
    X = np.array(shipsnet['data']) / 255.
    X = X.reshape([-1, 3, 80, 80]).transpose([0, 2, 3, 1])
    Y = np.array(shipsnet['labels'])
    Y = to_categorical(Y, 2)

    # Train the model
    model.fit(X,
              Y,
              n_epoch=50,
              shuffle=True,
              validation_set=.2,
              show_metric=True,
              batch_size=128,
              run_id='shipsnet')

    # Save trained model
    model.save(out_fname)
Exemplo n.º 2
0
def create_model(X_train, X_test, y_train, y_test, model_type):   # function contains three different models for predictions
    from model import model
    model = model(model_type)     # argument has to be provided among Linear, Lasso, Ridge (Case Sensitive).
            
    model.fit(X_train, y_train) 
    predictions = model.predict(X_test)
    
    print('Making predictions and calculating errors...')
    error = y_test.values.reshape(3000, 1) - predictions
    mae = mean_absolute_error(y_test, predictions)
    rmse = np.sqrt(mean_squared_error(y_test, predictions))
    
    coefficients = model.coef_
    intercept = model.intercept_
    
    errors = pd.DataFrame({'Y': y_test, 'Predictions': predictions})
    plt.figure(figsize = (8,8))
    sns.set_style('darkgrid')
    sns.lmplot(x = 'Predictions', y = 'Y', data = errors)
    
    plt.figure(figsize = (8,8))
    sns.set_style('darkgrid')
    sns.distplot(error)
    print('\n')
    print('The mean absolute error of model is:' + str(mean_absolute_error(y_test, predictions)))
    print('The root mean squared error of model is:' + str(np.sqrt(mean_squared_error(y_test, predictions))))
    print('\n')
    return predictions, error, coefficients, intercept, mae, rmse, model
Exemplo n.º 3
0
def neural_network(X_train, X_test, y_train, y_test, epoch, batch):    # a seprate neural network.
    from neural_model import neural_model
    model = neural_model()

    print('Training Neural Network...')
    model.fit(x = X_train, y = y_train.values,
              validation_data = (X_test, y_test.values),
              epochs = epoch, batch_size = batch)
              
    print('\n')          
    print('Neural Network trained!')
    loss = pd.DataFrame(model.history.history)
    sns.set_style('darkgrid')
    plt.figure(figsize = (8,8))
    loss.plot()

    print('Making predictions...')
    predictions= model.predict(X_test)

    error = y_test.values.reshape(3000, 1) - predictions
    mae = mean_absolute_error(y_test, predictions)
    rmse = np.sqrt(mean_squared_error(y_test, predictions))
    
    sns.set_style('darkgrid')
    plt.figure(figsize = (8,8))
    sns.distplot(error, kde = True)

    weights = model.weights
    print('\n')
    print('The mean absolute error of model is:' + str(mean_absolute_error(y_test, predictions)))
    print('The root mean squared error of model is:' + str(np.sqrt(mean_squared_error(y_test, predictions))))

    return predictions, error, weights, mae, rmse, model
Exemplo n.º 4
0
def main():
    parser = ArgumentParser()
    parser.add_argument('sources', help='sources', type=existing_directory)
    parser.add_argument('--model',
                        help='model name',
                        default='model' + datetime.now().isoformat('T'))
    args = parser.parse_args()

    k = 2
    images, labels = load_images_and_labels(args.sources)

    train_data = images[len(images)//k:]
    train_labels = labels[len(images)//k:]

    test_data = images[:len(images)//k]
    test_labels = labels[:len(labels)//k]

    model.fit(train_data,
              train_labels,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              shuffle=True,
              validation_data=(test_data, test_labels))

    with open('bin/{}.json'.format(args.model), 'w+') as file:
        file.write(model.to_json())

    model.save_weights('bin/{}.h5'.format(args.model))
Exemplo n.º 5
0
    def sUpdate(self):
        if self.debug:
            print "Starting student update"
        #Do a single fitting of the student parameters and update them
        nrs = len(self.st)
        nrkc = len(self.ca)
        labels = self.data.labels
        #Create an array of dimensions number of datapoints by number of students *2 + number of kcs
        if self.fullmatrix:
            studentdata = np.zeros((len(self.data.data), nrs * 2 + nrkc))
        else:
            studentdata = sparsesp.lil_matrix(
                (len(self.data.data), nrs * 2 + nrkc))
        #keep track of questions answered correctly and questions answered wrongly
        kcc = self.kcc = self.basekcc.copy()
        kcf = self.kcf = self.basekcf.copy()
        totalerror = 0.0
        for nr, d in enumerate(self.data.giveData()):

            s = d[0]
            it = d[1]
            x = 0
            k = float(len(self.ikc[it]))
            for c in self.ikc[it]:
                x += self.ca[c] * self.st[s] / k + (
                    kcf[s, c] * self.cr[c] +
                    kcc[s, c] * self.cg[c]) * self.se[s] - self.cb[c]
                studentdata[nr, s] += self.ca[c] / k
                studentdata[
                    nr,
                    s + nrs] += self.cg[c] * kcc[s, c] + self.cr[c] * kcf[s, c]
                studentdata[nr, nrs * 2 + c] = -1
                if labels[nr]:
                    kcc[s, c] += 1
                else:
                    kcf[s, c] += 1
            try:
                big = m.exp(x) + 1
                if labels[nr]:
                    totalerror += 1 / big
                else:
                    totalerror += 1 - (1 / big)
            except:
                if not labels[nr]:
                    print "WARNING: major error added in s"
                    totalerror += 1
        model = linear_model.LogisticRegression(fit_intercept=False,
                                                penalty='l1',
                                                C=10 ^ 9)
        model.fit(studentdata, labels)

        self.st[:] = model.coef_[0][:nrs]
        self.se[:] = model.coef_[0][nrs:nrs * 2]
        self.cb[:] = model.coef_[0][nrs * 2:]
        #Save the found kcc and kcf

        return totalerror / len(self.data.data)
Exemplo n.º 6
0
def train(model, X, args):
    """ Train VAE. """

    # clip data per feature
    X = np.clip(X, [-c for c in args.clip], args.clip)

    # apply scaling and save data preprocessing method
    axis = 0
    if args.standardized:
        print('\nStandardizing data')
        mu, sigma = np.mean(X, axis=axis), np.std(X, axis=axis)
        X = (X - mu) / (sigma + 1e-10)

        with open(args.save_path + 'preprocess_' + args.model_name + '.pickle',
                  'wb') as f:
            pickle.dump(['standardized', args.clip, axis, mu, sigma], f)

    if args.minmax:
        print('\nMinmax scaling of data')
        xmin, xmax = X.min(axis=axis), X.max(axis=axis)
        min, max = 0, 1
        X = ((X - xmin) / (xmax - xmin)) * (max - min) + min

        with open(args.save_path + 'preprocess_' + args.model_name + '.pickle',
                  'wb') as f:
            pickle.dump(['minmax', args.clip, axis, xmin, xmax, min, max], f)

    # set training arguments
    if args.print_progress:
        verbose = 1
    else:
        verbose = 0

    kwargs = {}
    kwargs['epochs'] = args.epochs
    kwargs['batch_size'] = args.batch_size
    kwargs['shuffle'] = True
    kwargs['validation_data'] = (X, None)
    kwargs['verbose'] = verbose

    if args.save:  # create callback
        checkpointer = ModelCheckpoint(filepath=args.save_path +
                                       args.model_name + '_weights.h5',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=True)
        kwargs['callbacks'] = [checkpointer]

        # save model architecture
        with open(args.save_path + args.model_name + '.pickle', 'wb') as f:
            pickle.dump([
                X.shape[1], args.hidden_layers, args.latent_dim,
                args.hidden_dim, args.output_activation
            ], f)

    model.fit(X, **kwargs)
Exemplo n.º 7
0
    def sUpdate(self):

        #Do a single fitting of the student parameters and update them
        nrs = len(self.st)
        nrkc = len(self.cb)
        labels = self.data.labels
        #Create an array of dimensions number of datapoints by number of students *2 + number of kcs
        if self.fullmatrix:
            studentdata = np.zeros((len(self.data.data), nrs + nrkc * 3))
        else:
            studentdata = sparsesp.lil_matrix(
                (len(self.data.data), nrs + nrkc * 3))
        #keep track of questions answered correctly and questions answered wrongly
        self.resetKCCF()
        kcc = self.kcc
        kcf = self.kcf
        totalerror = 0.0
        for nr, d in enumerate(self.data.giveData()):

            s = d[0]
            it = d[1]
            x = self.st[s]
            studentdata[nr, s] = 1
            for c in self.ikc[it]:
                x += kcf[s, c] * self.cr[c] + kcc[s,
                                                  c] * self.cg[c] - self.cb[c]
                studentdata[nr, c + nrs] = kcc[s, c]
                studentdata[nr, c + nrs + nrkc] = kcf[s, c]
                studentdata[nr, nrs + c + 2 * nrkc] = -1
                if labels[nr]:
                    kcc[s, c] += 1
                else:
                    kcf[s, c] += 1
            try:
                big = m.exp(x) + 1
                if labels[nr]:
                    totalerror += np.log(1 / big)
                else:
                    totalerror += np.log(1 - (1 / big))
            except:
                if not labels[nr]:
                    print "WARNING: major error added in s"
                    totalerror += np.log(.95)
        model = linear_model.LogisticRegression(fit_intercept=False,
                                                penalty='l1',
                                                C=10**9)
        model.fit(studentdata, labels)

        self.st[:] = model.coef_[0][:nrs]
        self.cg[:] = model.coef_[0][nrs:nrs + nrkc]
        self.cr[:] = model.coef_[0][nrs + nrkc:nrs + nrkc * 2]
        self.cb[:] = model.coef_[0][nrs + nrkc * 2:]
        #Save the found kcc and kcf

        return totalerror / len(self.data)
Exemplo n.º 8
0
def _train_test():
    tag_folder = '../data/2015/training/event_tags/'
    data_folder = '../data/2015/training/stanford_parse/'
    data = get_data(tag_folder, data_folder)

    if not combined:
        train_data_context_x, train_data_context_pos_deprel, train_data_lemma_x, train_data_pos_deprel, train_data_children_pos_deprel, train_data_y = _get_data(
            data)
    else:
        train_x1, train_y1 = _get_joint(data)

    tag_folder = '../data/2015/eval/event_tags/'
    data_folder = '../data/2015/eval/stanford_parse/'
    data = get_data(tag_folder, data_folder)

    if not combined:
        test_data_context_x, test_data_context_pos_deprel, test_data_lemma_x, test_data_pos_deprel, test_data_children_pos_deprel, test_data_y = _get_data(
            data)
    else:
        train_x2, train_y2 = _get_joint(data)

    tag_folder = '../data/2016/event_tags/'
    data_folder = '../data/2016/stanford_parse/'
    data = get_data(tag_folder, data_folder)
    train_x3, train_y3 = _get_joint(data)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if not combined:
        model.fit([
            np.array(train_data_context_x + test_data_context_x),
            np.array(train_data_context_pos_deprel +
                     test_data_context_pos_deprel),
            np.array(train_data_lemma_x + test_data_lemma_x),
            np.array(train_data_pos_deprel + test_data_pos_deprel),
            np.array(train_data_children_pos_deprel +
                     test_data_children_pos_deprel),
        ],
                  np.array(train_data_y + test_data_y),
                  batch_size=1500,
                  nb_epoch=15,
                  verbose=1,
                  shuffle=True)
    else:
        model.fit(np.array(train_x1 + train_x2 + train_x3),
                  np.array(train_y1 + train_y2 + train_y3),
                  batch_size=1000,
                  nb_epoch=15,
                  verbose=1,
                  shuffle=True)

    model.save('realis_models/model_6.h5')
    """
Exemplo n.º 9
0
    def kcUpdate(self):
        if self.debug:
            print "Starting kc update"
        nrs = len(self.st)
        nrkc = len(self.ca)
        labels = self.data.labels
        #Do a single fitting of the kc parameters and update them
        if self.fullmatrix:
            kcdata = np.zeros((len(self.data.data), 4 * nrkc))
        else:
            kcdata = sparsesp.lil_matrix((len(self.data.data), 4 * nrkc))

        kcc = np.zeros((nrs, nrkc))
        kcf = np.zeros((nrs, nrkc))
        totalerror = 0.0
        for nr, d in enumerate(self.data.giveData()):
            s = d[0]
            it = d[1]
            k = float(len(self.ikc[it]))
            x = 0
            for c in self.ikc[it]:
                x += self.ca[c] * self.st[s] / k + (
                    kcf[s, c] * self.cr[c] +
                    kcc[s, c] * self.cg[c]) * self.se[s] - self.cb[c]
                kcdata[nr, c] += self.st[s] / k
                kcdata[nr, c + nrkc] += self.se[s] * kcc[s, c]
                kcdata[nr, c + 2 * nrkc] += self.se[s] * kcf[s, c]
                kcdata[nr, nrkc * 3 + c] = -1
                if labels[nr]:
                    kcc[s, c] += 1
                else:
                    kcf[s, c] += 1
            try:
                big = m.exp(x) + 1
                if labels[nr]:
                    totalerror += 1 / big
                else:
                    totalerror += 1 - (1 / big)
            except:
                if not labels[nr]:
                    print "WARNING: major error added in kc"
                    totalerror += 1

        model = linear_model.LogisticRegression(fit_intercept=False,
                                                penalty='l1',
                                                C=10 ^ 9)
        model.fit(kcdata, labels)

        self.ca[:] = model.coef_[0][:nrkc]
        self.cg[:] = model.coef_[0][nrkc:nrkc * 2]
        self.cr[:] = model.coef_[0][nrkc * 2:nrkc * 3]
        self.cb[:] = model.coef_[0][nrkc * 3:]

        return totalerror / len(self.data.data)
Exemplo n.º 10
0
    def sUpdate(self):
        if self.debug:
            print "Starting student update"
        #Do a single fitting of the student parameters and update them
        nrs=len(self.st)
        nrkc=len(self.ca)
        labels=self.data.labels
        #Create an array of dimensions number of datapoints by number of students *2 + number of kcs
        if self.fullmatrix:  
            studentdata=np.zeros((len(self.data.data),nrs*2+nrkc))
        else:
            studentdata=sparsesp.lil_matrix((len(self.data.data),nrs*2+nrkc))
        #keep track of questions answered correctly and questions answered wrongly
        
        kcc = self.kcc= self.basekcc.copy()
        kcf = self.kcf= self.basekcf.copy()
        likely=0.0
        for nr,d in enumerate(self.data.giveData()):
            
            s=d[0]
            it=d[1]
            x=0
            k=float(len(self.ikc[it]))
            for c in self.ikc[it]:
                x+=self.ca[c]*self.st[s]/k+self.ca[c]*(kcf[s,c]+kcc[s,c])*self.se[s]-self.cb[c]
                studentdata[nr,s]+=self.ca[c]/k
                studentdata[nr,s+nrs]+=(kcc[s,c]+kcf[s,c])*self.ca[c]
                studentdata[nr,nrs*2+c]=-1
                if labels[nr]:
                    kcc[s,c]+=1
                else:
                    kcf[s,c]+=1
            try:
                big=m.exp(x)+1
                if labels[nr]:
                    likely+=np.log(1-(1/big))
                else:
                    likely+=np.log(1/big)
            except:
                if not labels[nr]:
                    print "WARNING: major error added in s"
                    likely+=np.log(10**-99)
        model=linear_model.LogisticRegression(fit_intercept=False,penalty='l1',C=10^9)
        model.fit(studentdata,labels)
                  
        self.st[:]=model.coef_[0][:nrs]
        self.se[:]=model.coef_[0][nrs:nrs*2]
        self.cb[:]=model.coef_[0][nrs*2:]
        #Save the found kcc and kcf

        
        return likely/len(self.data.data)
Exemplo n.º 11
0
    def sUpdate(self):
        
        #Do a single fitting of the student parameters and update them
        nrs=len(self.st)
        nrkc=len(self.cb)
        labels=self.data.labels
        #Create an array of dimensions number of datapoints by number of students *2 + number of kcs
        if self.fullmatrix:  
            studentdata=np.zeros((len(self.data.data),nrs+nrkc*3))
        else:
            studentdata=sparsesp.lil_matrix((len(self.data.data),nrs+nrkc*3))
        #keep track of questions answered correctly and questions answered wrongly
        self.resetKCCF()
        kcc = self.kcc
        kcf = self.kcf
        totalerror=0.0
        for nr,d in enumerate(self.data.giveData()):
            
            s=d[0]
            it=d[1]
            x=self.st[s]
            studentdata[nr,s]=1
            for c in self.ikc[it]:
                x+=kcf[s,c]*self.cr[c]+kcc[s,c]*self.cg[c]-self.cb[c]
                studentdata[nr,c+nrs]=kcc[s,c]
                studentdata[nr,c+nrs+nrkc]=kcf[s,c]
                studentdata[nr,nrs+c+2*nrkc]=-1
                if labels[nr]:
                    kcc[s,c]+=1
                else:
                    kcf[s,c]+=1
            try:
                big=m.exp(x)+1
                if labels[nr]:
                    totalerror+=np.log(1/big)
                else:
                    totalerror+=np.log(1-(1/big))
            except:
                if not labels[nr]:
                    print "WARNING: major error added in s"
                    totalerror+=np.log(.95)
        model=linear_model.LogisticRegression(fit_intercept=False,penalty='l1',C=10**9)
        model.fit(studentdata,labels)
                  
        self.st[:]=model.coef_[0][:nrs]
        self.cg[:]=model.coef_[0][nrs:nrs+nrkc]
        self.cr[:]=model.coef_[0][nrs+nrkc:nrs+nrkc*2]
        self.cb[:]=model.coef_[0][nrs+nrkc*2:]
        #Save the found kcc and kcf

        
        return totalerror/len(self.data)
Exemplo n.º 12
0
 def sUpdate(self):
     #Only a single update function necessary, because the model is linear within the logistic function
     nrs=len(self.st)
     nrkc=len(self.cb)
     labels=self.data.labels
     #Create an array of dimensions number of datapoints by number of students *2 + number of kcs
     if self.fullmatrix:  
         studentdata=np.zeros((len(self.data.data),nrs+nrkc*2))
     else:
         studentdata=sparsesp.lil_matrix((len(self.data.data),nrs+nrkc*2))
     #keep track of questions answered correctly and questions answered wrongly
     self.resetKCCF()        
     kcc = self.kcc
     kcf = self.kcf
     totalerror=0.0
     for nr,d in enumerate(self.data.giveData()):
         s=d[0]
         it=d[1]
         x=self.st[s]
         studentdata[nr,s]=1
         for c in self.ikc[it]:
             x+=(kcf[s,c]+kcc[s,c])*self.cg[c]-self.cb[c]
             studentdata[nr,c+nrs]=kcc[s,c]+kcf[s,c]
             studentdata[nr,nrs+c+nrkc]=-1
             if labels[nr]:
                 kcc[s,c]+=1
             else:
                 kcf[s,c]+=1
         try:
             big=m.exp(x)+1
             if labels[nr]:
                 totalerror+=np.log(1/big)
             else:
                 totalerror+=np.log(1-(1/big))
         except:
             if not labels[nr]:
                 print "WARNING: major error added in s"
                 totalerror+=np.log(.95)
     model=linear_model.LogisticRegression(fit_intercept=False,penalty='l1',C=10**9)
     if sum(labels)==0 or sum(labels)==len(labels):
         print "about to die!"
         print "corrects / Total data", sum(labels), len(labels)
         
     
     model.fit(studentdata,labels)       
     self.st[:]=model.coef_[0][:nrs]
     self.cg[:]=model.coef_[0][nrs:nrs+nrkc]
     self.cb[:]=model.coef_[0][nrs+nrkc:]  
     return totalerror/len(self.data)
Exemplo n.º 13
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, 1, 4), Input data for train.
        y_train: ndarray(number, ), result data for train.
        (7776, 4, 1)
        (7776,)
        name: String, name of model.
        config: Dict, parameter for train.
    """
    opt = keras.optimizers.Adam(learning_rate=0.001)
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    model.compile(loss="mse", optimizer=opt, metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/model_out/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/model_loss/' + name + '_loss.csv',
              encoding='utf-8',
              index=False)
Exemplo n.º 14
0
def train(model, x_train, y_train):
    print('[INFO] Start training model')

    train_start = time.time()

    model.fit(x_train,
              y_train,
              validation_split=config.validation_rate,
              batch_size=config.batch_size,
              epochs=config.num_epochs,
              verbose=1,
              callbacks=config.callbacks)

    train_end = time.time()
    print('[INFO] End training model')
    print('[INFO] Training time: ', train_end - train_start)
Exemplo n.º 15
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
        
    """
    # Define the Keras TensorBoard callback.
    logdir = os.path.join(
    "logs",
    "fit",
    name,
    'lstm_4_4',
    datetime.now().strftime("%Y%m%d-%H%M"),
)
    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

    model.compile(loss="mse", optimizer="adam", metrics=['mape'])
    early = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
    hist = model.fit(
        X_train, y_train,
        batch_size=config["batch"],
        epochs=config["epochs"],
        validation_split=0.05,
        callbacks=[tensorboard_callback, early])
    print(name);
    model.save('model/' + name + '4_layers_4'  + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name  +' loss.csv', encoding='utf-8', index=False)
Exemplo n.º 16
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """
    mlflow.set_tracking_uri("http://127.0.0.1:5000")
    tracking_uri = mlflow.get_tracking_uri()
    print("Current tracking uri: {}".format(tracking_uri))

    tags = {"usuario": "Anonymous"}

    mlflow.set_experiment("traffic_flow-saes")
    with mlflow.start_run() as run:
        mlflow.set_tags(tags)
        mlflow.keras.autolog()

        model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
        #early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
        hist = model.fit(X_train,
                         y_train,
                         batch_size=config["batch"],
                         epochs=config["epochs"],
                         validation_split=0.05)

        model.save('model/' + name + '.h5')
        df = pd.DataFrame.from_dict(hist.history)
        df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
        mlflow.log_param("Run_id", run.info.run_id)
Exemplo n.º 17
0
 def kcUpdate(self):
     if self.debug:
         print "Starting kc update"
     nrs=len(self.st)
     nrkc=len(self.ca)
     labels=self.data.labels
     #Do a single fitting of the kc parameters and update them
     if self.fullmatrix:
         kcdata=np.zeros((len(self.data.data),2*nrkc))
     else:
         kcdata=sparsesp.lil_matrix((len(self.data.data),4*nrkc))
     
     kcc = np.zeros((nrs,nrkc))
     kcf = np.zeros((nrs,nrkc))
     likely=0.0    
     for nr,d in enumerate(self.data.giveData()):
         s=d[0]
         it=d[1]
         k=float(len(self.ikc[it]))
         x=0
         for c in self.ikc[it]:
             x+=self.ca[c]*self.st[s]/k+(kcf[s,c]+kcc[s,c])*self.se[s]*self.ca[c]-self.cb[c]
             kcdata[nr,c]+=self.st[s]/k+(kcf[s,c]+kcc[s,c])*self.se[s]
             kcdata[nr,nrkc+c]=-1
             if labels[nr]:
                 kcc[s,c]+=1
             else:
                 kcf[s,c]+=1
         try:
             big=m.exp(x)+1
             if labels[nr]:
                 likely+=np.log(1-(1/big))
             else:
                 likely+=np.log(1/big)
         except:
             if not labels[nr]:
                 print "WARNING: major error added in kc"
                 likely+=np.log(10**-99)
   
     model=linear_model.LogisticRegression(fit_intercept=False,penalty='l1',C=10^9)
     model.fit(kcdata,labels)
     
     
     self.ca[:]=model.coef_[0][:nrkc]
     self.cb[:]=model.coef_[0][nrkc:nrkc*2]
     
     return likely/len(self.data.data)
Exemplo n.º 18
0
def train_model(model,X_train,y_trian,name,config):
    model.compile(loss='mse',optimizer='rmsprop',metrics=['mape'])
    hist=model.fit(X_train,y_trian,
        batch_size=config['batch'],
        epochs=config['epochs'],
        validation_split=0.05)
    model.save('model/'+name+'.h5')
    df=pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/'+name+'_loss.csv',encoding='utf-8',index=False)
Exemplo n.º 19
0
def create_and_train_model(train_X, train_y, test_X, test_y, epoch, batch):
    from model import model
    print('Training model ..')
    model = model()
    # Training the model on 90% of the dataset (train data)
    model.fit(x=train_X,
              y=train_y.values,
              validation_data=(test_X, test_y.values),
              epochs=epoch,
              batch_size=batch)

    loss = pd.DataFrame(model.history.history)
    loss.plot()

    # Predicting the output (prieUSD) for test set.
    preds = model.predict(test_X)
    return preds, model
    print('Training completed.')
Exemplo n.º 20
0
def train(fname, out_fname):
    """ 
    All data was stored in a json file.
    """
    # Load dataset
    f = open(fname)
    planesnet = json.load(f)
    f.close()

    # Preprocess image data and labels for input
    X = np.array(planesnet['data']) / 255.
    X = X.reshape([-1,3,20,20]).transpose([0,2,3,1])
    Y = np.array(planesnet['labels'])
    Y = to_categorical(Y, 2)

    # Train the model
    model.fit(X, Y, n_epoch=50, shuffle=True, validation_set=.2,
              show_metric=True, batch_size=128, run_id='planesnet')

    # Save trained model
    model.save(out_fname)
Exemplo n.º 21
0
def train(model, X, args):
    """ Train VAE. """

    if args.standardized:
        mu = np.mean(X, axis=0)
        sigma = np.std(X, axis=0)
        # save mu and sigma
        with open(args.save_path + 'mu_sigma.pickle', 'wb') as f:
            pickle.dump([mu, sigma], f)
        X = (X - mu) / (sigma + 1e-10)  # standardize input variables

    # set training arguments
    if args.print_progress:
        verbose = 1
    else:
        verbose = 0

    kwargs = {}
    kwargs['epochs'] = args.epochs
    kwargs['batch_size'] = args.batch_size
    kwargs['shuffle'] = True
    kwargs['validation_data'] = (X, None)
    kwargs['verbose'] = verbose

    if args.save:  # create callback
        checkpointer = ModelCheckpoint(filepath=args.save_path +
                                       'vae_weights.h5',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=True)
        kwargs['callbacks'] = [checkpointer]

        # save model architecture
        with open(args.save_path + 'model.pickle', 'wb') as f:
            pickle.dump([
                X.shape[1], args.hidden_layers, args.latent_dim,
                args.hidden_dim
            ], f)

    model.fit(X, **kwargs)
Exemplo n.º 22
0
def train_model(model, X_train, y_train, name, config):

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
Exemplo n.º 23
0
def train_model(model, X_train, y_train, name, config):
    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)
    temp = 'scaler'
    model.save('model/' + name + temp + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + temp + ' loss.csv',
              encoding='utf-8',
              index=False)
Exemplo n.º 24
0
def train_allDense_model(model, X_train, y_train, name, config, lag):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"])

    model.save('model/' + name + '-' + str(lag) + '.h5')
Exemplo n.º 25
0
def train_model(model, X_train, y_train, name, config, lag):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '-' + str(lag) + '.h5')
Exemplo n.º 26
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['rmse'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
Exemplo n.º 27
0
from time import time

import keras as k

import util
from dataset import data_training
from model import model


start_time = time()
data = data_training()

print('Training model...')
model.fit(data[0], data[1],
	batch_size=512, epochs=50,
	callbacks=[k.callbacks.TensorBoard(write_images=True)])
model.save('data/model.h5')
print('Done')
print('Time:', util.time_delta(time() - start_time))
Exemplo n.º 28
0
logging.info(f"TRAIN_ID {proj_id}")
logging.info(f"TRAIN_PATH {train_path}")

#
# Read dataset
#
#fields = """doc_id,hotel_name,hotel_url,street,city,state,country,zip,class,price,
#num_reviews,CLEANLINESS,ROOM,SERVICE,LOCATION,VALUE,COMFORT,overall_ratingsource""".replace("\n",'').split(",")

read_table_opts = dict(sep="\t", names=fields, index_col=False)
df = pd.read_table(train_path, **read_table_opts)

#split train/test
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, 2:],
                                                    df.iloc[:, 1],
                                                    test_size=0.2)

#
# Train the model
#
model.fit(X_train, y_train)

#model_score = model.score(X_test, y_test)
model_score = log_loss(y_test, model.predict_proba(X_test)[:, 1])

logging.info(f"model score: {model_score:.3f}")

# save the model
dump(model, "{}.joblib".format(proj_id))
Exemplo n.º 29
0
print('Will train with {} and test with {} samples'.format(
    len(train_inputs[0]), len(test_inputs[0])))

avg_winners = np.mean(train_output, axis=0)


def custom_loss(y_true, y_pred):
    normalized_error = (y_pred - y_true) / avg_winners
    return tf.reduce_mean(tf.math.square(normalized_error), axis=1)


model.compile(optimizer='adam', loss=[None, custom_loss])
model.fit(train_inputs,
          train_output,
          validation_data=(test_inputs, test_output),
          epochs=1000,
          callbacks=[
              tf.keras.callbacks.EarlyStopping('loss', patience=5),
              tf.keras.callbacks.TensorBoard(log_dir='logs/' +
                                             time.strftime('%Y%m%d%H%M%S'),
                                             histogram_freq=1)
          ])

model.save('results/model.h5', include_optimizer=False)
normal_probs, lucky_probs = model.get_layer('gather_probs_layer').get_probs()
normal_probs = pd.Series(normal_probs, index=np.arange(1, 50))
lucky_probs = pd.Series(lucky_probs, index=np.arange(1, 11))
normal_probs.to_csv('results/normal_probs.csv', header=False)
lucky_probs.to_csv('results/lucky_probs.csv', header=False)
Exemplo n.º 30
0
validation_steps = validation_generator.n//validation_generator.batch_size


checkpoint = tf.keras.callbacks.ModelCheckpoint("os./model_weights.h5", monitor='val_accuracy',
                            save_weights_only = True,
                            mode = 'max',
                            verbose = 1)
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=3, verbose=1)
checkpointer = tf.keras.callbacks.ModelCheckpoint("model.h5", monitor='val_loss', verbose=1, save_best_only=True)

callbacks = [checkpoint, lr_reducer, checkpointer]

history = model.fit(
    x=train_generator,
    steps_per_epoch=steps_per_epoch,
    epochs=epochs,
    validation_data = validation_generator,
    validation_steps = validation_steps,
    callbacks=callbacks
)
 


model = tf.keras.models.load_model('C:/Users/eshna airon/Downloads/FacialExpressionRecognition-master/app/model.h5')  ### Update it to your path

def predict_images(img):
  

    img = img_to_array(img)
    
    img = img.reshape(1, 100, 100, 3)
Exemplo n.º 31
0
from keras.optimizers import Adam
from nmt_utils import *
from model import model

m = 10000
Tx = 30
Ty = 10
n_a = 32
n_s = 64
learning_rate = 0.005
batch_size = 100

dataset, human_vocab, machine_vocab, inv_vocab = load_dataset(m)
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))
# model.summary()
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, decay=0.001)
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
outputs = list(Yoh.swapaxes(0, 1))
model.fit([Xoh, s0, c0], outputs, epochs=50, batch_size=batch_size)
model.save_weights('models/model_50.h5')
Exemplo n.º 32
0
reduce_lr_callback = tf.keras.callbacks.ReduceLROnPlateau(
    monitor='loss',
    factor=FLAGS['lr_factor'],
    patience=FLAGS['patience'],
    verbose=0,
    mode='auto',
    min_delta=FLAGS['min_delta'],
    cooldown=0,
    min_lr=FLAGS['min_lr'])

early_stop_callback = tf.keras.callbacks.EarlyStopping(monitor='loss',
                                                       patience=10)

EPOCHS = FLAGS['epochs']

history = model.fit(dataset,
                    epochs=EPOCHS,
                    callbacks=[checkpoint_callback, reduce_lr_callback])

end = datetime.now()
END_TIME = str(end).replace(' ', '_')[:-7]

training_time = str(end - start)
print('Training took {} hour/min/sec'.format(training_time.split('.')[0]))

# Save final model weights for freezing and exporting later
save_model_path = os.path.join(basedir, 'saved_models',
                               'final_{}'.format(END_TIME))
model.save_weights(save_model_path)
Exemplo n.º 33
0
warnings.warn = warn

from bert4keras.backend import keras
from dataset import PoetryDataGenerator, tokenizer, poetry
from model import model
import config
import utils


class Evaluator(keras.callbacks.Callback):
    """评估与保存
    """
    def __init__(self):
        self.lowest = 1e10

    def on_epoch_end(self, epoch, logs=None):
        # 保存最优
        if logs['loss'] <= self.lowest:
            self.lowest = logs['loss']
            model.save_weights(config.BEST_MODEL_PATH)
        print(utils.generate_random_poetry(tokenizer, model))


# 创建数据生成器
data_generator = PoetryDataGenerator(poetry, batch_size=config.BATCH_SIZE)
# 开始训练
model.fit(data_generator.forfit(),
          steps_per_epoch=data_generator.steps,
          epochs=config.TRAIN_EPOCHS,
          callbacks=[Evaluator()])