示例#1
0
 #    test_data.append(test_data[0])
     print(parameter)
     evaluations=[]
     if params.match_type == 'pointwise':
         if params.onehot:
             params.lr = 10 *params.lr
         test_data = [to_array(i,reader.max_sequence_length) for i in test_data]
         loss_type,metric_type = ("categorical_hinge","acc") if params.onehot else ("mean_squared_error","mean_squared_error")
         model.compile(loss =loss_type, #""
                 optimizer = units.getOptimizer(name=params.optimizer,lr=params.lr),
                 metrics=[metric_type])
         for i in range(params.epochs):
             if "unbalance" in  params.__dict__ and params.unbalance:
                 model.fit_generator(reader.getPointWiseSamples4Keras(onehot = params.onehot,unbalance=params.unbalance),
                                     epochs = 1,steps_per_epoch=int(len(reader.datas["train"])/reader.batch_size),
                                     verbose = True,callbacks=[logger.getCSVLogger()])        
             else:
                 model.fit_generator(reader.getPointWiseSamples4Keras(onehot = params.onehot),
                                     epochs = 1,steps_per_epoch=len(reader.datas["train"]["question"].unique())/reader.batch_size,
                                     verbose = True,callbacks=[logger.getCSVLogger()])        
             y_pred = model.predict(x = test_data,batch_size=params.batch_size) 
             score =batch_softmax_with_first_item(y_pred)[:,1]  if params.onehot else y_pred
             
             metric = reader.evaluate(score, mode = "test")
             evaluations.append(metric)
             logger.info(metric)
           
     elif params.match_type == 'pairwise':
         test_data.append(test_data[0])
         test_data = [to_array(i,reader.max_sequence_length) for i in test_data]
         model.compile(loss = identity_loss,
示例#2
0
    for parameter in parameters:
#        old_dataset = params.dataset_name
        params.setup(zip(grid_parameters.keys(),parameter))
        
        import models.representation as models
        import dataset
        reader = dataset.setup(params)
        params = dataset.process_embedding(reader,params)
        qdnn = models.setup(params)
        model = qdnn.getModel()
        
        model.compile(loss = params.loss,
                    optimizer = units.getOptimizer(name=params.optimizer,lr=params.lr),
                    metrics=['accuracy'])
#        model.summary()
        (train_x, train_y),(test_x, test_y),(val_x, val_y) = reader.get_processed_data()
        history = model.fit(x=train_x, y = train_y, batch_size = params.batch_size, epochs= params.epochs,validation_data= (test_x, test_y)
        ,verbose=False,callbacks=[logger.getCSVLogger()])#,verbose=False
        logger.info(parameter)
        logger.info(max(history.history["val_acc"]))
        evaluation = model.evaluate(x = val_x, y = val_y)
        K.clear_session()
#    test_match()


# x_input = np.asarray([b])
# y = model.predict(x_input)
# print(y)