Beispiel #1
0
 def loss(self, data, model, tt, name):
     losses = self.get_predictions_loss(data, model, tt)[1]
     loss = np.sum(losses)
     info('{} total loss: {:0.3f}'.format(name, loss))
     avg_loss = np.mean(losses)
     info('{} average loss per protein: {:0.3f}'.format(name, avg_loss))
     return ['loss_' + tt, 'ave_loss_' + tt], [loss, avg_loss]
    def test_constructor_arg_logfile_cfgfile_valid(self, cfgfile_valid, logfile):
        # Tests the constructor against a valid argument and valid configfile
        # Logging in sent to a logfile in config dir
        p = logfile
        args = [f'--config={cfgfile_valid}', f'--log-file={p}']

        config.Configuration(args)
        config.info('Test')
        assert os.path.getsize(p) != 0
Beispiel #3
0
 def fit(self, exp_spec, data, model):
     """ trains model by iterating mini-batches for specified number of epochs """
     # train for specified number of epochs
     for epoch in range(1, exp_spec['num_epochs'] + 1):
         info('epoch_' + str(epoch))
         self.train_epoch(data['train'], model, exp_spec['mini_batch_size'])
     # calculate train and test metrics
     headers, result = self.processor.process_results(
         exp_spec, data, model, 'epoch_' + str(exp_spec['num_epochs']))
     # clean up
     self.processor.reset()
     model.close()
     return headers, result
Beispiel #4
0
    def determinerGagnant(self):
        """Détermine le gagnant de la partie à la fin du jeu."""
        self.cote_gagnant = self.plateau.obtenirCoteGagnant()
        if self.cote_gagnant != None:
            self.gagnant = self.joueurs[self.cote_gagnant].nom
            cfg.info("Le joueur " + self.gagnant + " a gagne.",
                     nom_fichier="othello.py")
        else:
            cfg.info("Match nul.", nom_fichier="othello.py")
            self.gagnant = None

        # Faire attention au fait que le plateau ne connait que des cotés, et à
        # aucun moment il ne possède les vrais joueurs comme attributs.
        # Effectivement,
        # ce sont les joueurs qui utilise le plateau et non l'inverse.
        return self.gagnant
Beispiel #5
0
 def process_results(self, exp_spec, data, model, name):
     """ Processes each result in the results object based on its type and returns stuff if specified in exp_spec """
     info('Results for {}'.format(name))
     self.test_batch_size = exp_spec['test_batch_size']
     metrics = [
         'loss_train', 'loss_test', 'roc_train', 'roc_test', 'auprc_train',
         'auprc_test'
     ]
     _headers = []
     _results = []
     for metric in metrics:
         process_function = getattr(self, metric)
         headers, results = process_function(data, model, name)
         _headers += headers
         _results += results
     return _headers, _results
Beispiel #6
0
 def auprc(self, data, model, tt, name):
     scores = self.get_predictions_loss(data, model, tt)[0]
     labels = [protein['label'][:, 2] for protein in data[tt]]
     close_count = 0
     auprcs = []
     for preds, lbls in zip(scores, labels):
         if np.allclose(preds[:, 0],
                        np.zeros_like(preds[:, 0]) + np.mean(preds[:, 0])):
             close_count += 1
         auprcs.append(average_precision_score(lbls, preds))
     if close_count > 0:
         info(
             'For {} proteins, all predicted scores are close to each other, auprc may be based on improper sorting'
             .format(close_count))
     med_auprc = np.median(auprcs)
     info('{} median auprc: {:0.3f}'.format(name, med_auprc))
     return ['auprc_med_' + tt], [med_auprc]
Beispiel #7
0
 def roc(self, data, model, tt, name):
     scores = self.get_predictions_loss(data, model, tt)[0]
     labels = [prot['label'][:, 2] for prot in data[tt]]
     fprs = []
     tprs = []
     roc_aucs = []
     for s, l in zip(scores, labels):
         fpr, tpr, _ = roc_curve(l, s)
         roc_auc = auc(fpr, tpr)
         fprs.append(fpr)
         tprs.append(tpr)
         roc_aucs.append(roc_auc)
     auc_prot_med = np.median(roc_aucs)
     auc_prot_ave = np.mean(roc_aucs)
     info('{} average protein auc: {:0.3f}'.format(name, auc_prot_ave))
     info('{} median protein auc: {:0.3f}'.format(name, auc_prot_med))
     return ['auc_prot_ave_' + tt,
             'auc_prot_med_' + tt], [auc_prot_ave, auc_prot_med]
Beispiel #8
0
 def actualiser(self):
     """Actualise le jeu."""
     self.bordure.actualiser(self.rang, self.plateau.obtenirScores(),
                             self.fini, self.gagnant)
     if self.panneau:
         self.panneau.check()
         self.ouvert = self.panneau.open
     if not self.plateau.estFini():
         if self.panneau:
             self.afficher()
         self.faireTour()
     else:
         if not self.fini:
             self.fini = not (self.fini)
             self.determinerGagnant()
             cfg.info(
                 "Fin de partie : Le gagnant est {}".format(
                     repr(self.gagnant)),
                 nom_fichier="othello.py",
             )
         if self.panneau:
             self.afficher()
         else:
             self.ouvert = False
Beispiel #9
0
 def __init__(self, layers, layer_args, train_data, learning_rate, pn_ratio,
              res_dir):
     """ Assumes same dimensions and neighborhoods for l_ and r_
     :param layers: exp_spec['spec']->spec['layers']
     :param layer_args: exp_spec['spec']->spec['layer_args']
     :param train_data: exp_spec['train_data_file']->data['train']
     :param learning_rate: Learning Rate
     :param pn_ratio: Positive-Negative Ratio
     :param res_dir: output directory of results
     data: 'l_'->ligand protein; 'r_'->receptor protein
     """
     # parameters & args
     self.layer_args = layer_args
     self.params = {}
     # 'tensorFlow' stuff
     self.graph = tf.Graph()
     self.session = None
     self.predictions = None
     # get details of train data
     self.dcnn = ('l_edge'
                  not in train_data[0])  # convolution type is dcnn or not
     self.vertex_dimension = train_data[0]['l_vertex'].shape[-1]
     if self.dcnn:
         self.hop = train_data[0]['l_power_series'].shape[1]
     else:
         self.edge_dimension = train_data[0]['l_edge'].shape[-1]
         self.hood_size = train_data[0]['l_hood_indices'].shape[1]
     # set self.graph as default Graph Object
     with self.graph.as_default():
         # shapes and 'tensorFlow' variables
         self.vertex1 = tf.placeholder(tf.float32,
                                       [None, self.vertex_dimension],
                                       'vertex1')
         self.vertex2 = tf.placeholder(tf.float32,
                                       [None, self.vertex_dimension],
                                       'vertex2')
         if self.dcnn:
             self.hop1 = tf.placeholder(tf.float32, [None, self.hop, None],
                                        'hop1')
             self.hop2 = tf.placeholder(tf.float32, [None, self.hop, None],
                                        'hop2')
             input1 = self.vertex1, self.hop1
             input2 = self.vertex2, self.hop2
         else:
             self.edge1 = tf.placeholder(
                 tf.float32, [None, self.hood_size, self.edge_dimension],
                 'edge1')
             self.edge2 = tf.placeholder(
                 tf.float32, [None, self.hood_size, self.edge_dimension],
                 'edge2')
             self.hood_indices1 = tf.placeholder(tf.int32,
                                                 [None, self.hood_size, 1],
                                                 'hood_indices1')
             self.hood_indices2 = tf.placeholder(tf.int32,
                                                 [None, self.hood_size, 1],
                                                 'hood_indices2')
             input1 = self.vertex1, self.edge1, self.hood_indices1
             input2 = self.vertex2, self.edge2, self.hood_indices2
         self.examples = tf.placeholder(tf.int32, [None, 2], 'examples')
         self.labels = tf.placeholder(tf.float32, [None], 'labels')
         self.keep_prob = tf.placeholder(tf.float32, [], 'keep_prob')
         # make layers
         legs = True
         inputs = None
         i = 0
         while i < len(layers):
             layer = layers[i]
             args = copy.deepcopy(layer_args)
             args['keep_prob'] = self.keep_prob
             layer_type = layer[0]
             next_arg = layer[1] if len(layer) > 1 else {}
             flag = layer[2] if len(layer) > 2 else None
             args.update(next_arg)
             # getattr(object, name[, default])-> return 'attr' of 'object'
             layer_function = getattr(components,
                                      layer_type)  # return layer function
             # flag == ['merge'] if flag is not None
             if flag is not None and 'merge' in flag:
                 legs = False
                 # take vertex features only
                 inputs = input1[0], input2[0], self.examples
             if legs:
                 # make leg layers(everything up to the merge layer)
                 name = 'leg1_{}_{}'.format(layer_type, i)
                 with tf.name_scope(name):
                     output, params = layer_function(input1, None, **args)
                     # update params
                     if params is not None:
                         info("not merge")
                         print(len(params.items()))
                         self.params.update({
                             '{}_{}'.format(name, key): value
                             for key, value in params.items()
                         })
                     if self.dcnn:
                         input1 = output, self.hop1
                     else:
                         input1 = output, self.edge1, self.hood_indices1
                 name = 'leg2_{}_{}'.format(layer_type, i)
                 with tf.name_scope(name):
                     output = layer_function(input2, params, **args)[0]
                     if self.dcnn:
                         input2 = output, self.hop2
                     else:
                         input2 = output, self.edge2, self.hood_indices2
             else:
                 # merged layers
                 name = '{}_{}'.format(layer_type, i)
                 with tf.name_scope(name):
                     inputs, params = layer_function(inputs, None, **args)
                     # update params
                     if params is not None and len(params.items()) > 0:
                         self.params.update({
                             '{}_{}'.format(name, key): value
                             for key, value in params.items()
                         })
             i += 1
         self.predictions = inputs
         # loss
         with tf.name_scope('loss'):
             scale_vector = (pn_ratio * (self.labels - 1) / -2) + (
                 (self.labels + 1) / 2)
             logits = tf.concat([-self.predictions, self.predictions],
                                axis=1)
             labels = tf.stack([(self.labels - 1) / -2,
                                (self.labels + 1) / 2],
                               axis=1)
             self.loss = tf.losses.softmax_cross_entropy(
                 labels, logits, weights=scale_vector)
         # optimizer
         with tf.name_scope('optimizer'):
             # generate an op which trains the model
             self.train_op = tf.train.GradientDescentOptimizer(
                 learning_rate).minimize(self.loss)
         # set up 'tensorFlow' session
         self.sess = tf.Session()
         self.sess.run(tf.global_variables_initializer())
Beispiel #10
0
    Building of models  --classifier.py
    Training and testing of models  --fitter.py
    Process and output of results  --processor.py
"""

# load experiment specified in system args
# exp_file = sys.argv[1]
# exp_file = "no_conv.yml"
exp_file = "single_weight_matrix.yml"
# exp_file = "node_avg.yml"
# exp_file = "node_edge_avg.yml"
# exp_file = "order_dependent.yml"
# exp_file = "deep_tensor.yml"
# exp_file = "dcnn_hop2.yml"
# exp_file = "dcnn_hop5.yml"
info("Running Experiment File: {}".format(exp_file))
f_name = exp_file.split('.')[0] if '.' in exp_file else exp_file
exp_spec = yaml.load(open(os.path.join(exp_dir, exp_file), 'r').read())
# ensure output path of results is exist
res_dir = os.path.join(out_dir, f_name)
if not os.path.exists(res_dir):
    os.mkdir(res_dir)
processor = Processor()
# create results log
res_log = os.path.join(res_dir, "results.csv")
with open(res_log, 'w') as f:
    f.write("")
# write experiment specifications to file
with open(os.path.join(res_dir, "exp.yml"), 'w') as f:
    f.write("{}\n".format(yaml.dump(exp_spec)))
# perform each experiment
Beispiel #11
0
 def __init__(self):
     self.__info = config.info()
     self.__host = self.__info[0]
     self.__user = self.__info[1]
     self.__passwd = self.__info[2]
     self.__db = self.__info[3]
Beispiel #12
0
from termcolor import colored as c
from libs.console import Autocomplete, start_shell
import libs.server as server
import libs.ngrok as ngrok
import config
from getpass import getpass
from uuid import uuid4
from subprocess import Popen, PIPE
import time, _thread, os
import code, platform

arm = "arm aarch64_be aarch64 armv8b armv8l".split(" ")
x86 = "i386 i686".split(" ")
x64 = ["x86_64"]

author = config.info()
mch = platform.machine()


class var(object):
    current_agent = None


var = var()

man = {
    'help': 'Displays the help menu',
    'banner': 'Prints the program\'s banner',
    'exit': 'Exits the program',
    'man': 'Find more information about a command. Usage: \'man help\'',
    'listAgents': 'List active agents',