Пример #1
0
 def addReferer(self, fieldDescr):
     '''p_fieldDescr is a Ref type definition.'''
     k = fieldDescr.appyType.klass
     refClassName = getClassName(k, self.applicationName)
     if not self.referers.has_key(refClassName):
         self.referers[refClassName] = []
     self.referers[refClassName].append(fieldDescr)
Пример #2
0
 def addReferer(self, fieldDescr):
     '''p_fieldDescr is a Ref type definition.'''
     k = fieldDescr.appyType.klass
     refClassName = getClassName(k, self.applicationName)
     if not self.referers.has_key(refClassName):
         self.referers[refClassName] = []
     self.referers[refClassName].append(fieldDescr)
 def __init__(self, klass, orderedAttributes, generator):
     Descriptor.__init__(self, klass, orderedAttributes, generator)
     self.methods = ''  # Needed method definitions will be generated here
     self.name = getClassName(self.klass, generator.applicationName)
     self.predefined = False
     self.customized = False
     # Phase and page names will be calculated later, when first required.
     self.phases = None
     self.pages = None
Пример #4
0
 def __init__(self, klass, orderedAttributes, generator):
     Descriptor.__init__(self, klass, orderedAttributes, generator)
     self.methods = '' # Needed method definitions will be generated here
     self.name = getClassName(self.klass, generator.applicationName)
     self.predefined = False
     self.customized = False
     # Phase and page names will be calculated later, when first required.
     self.phases = None
     self.pages = None
Пример #5
0
 def walkRef(self):
     '''How to generate a Ref?'''
     # Update the list of referers
     self.generator.addReferer(self)
     # Add the widget label for the back reference
     back = self.appyType.back
     refClassName = getClassName(self.appyType.klass, self.applicationName)
     if back.hasLabel:
         backName = self.appyType.back.attribute
         self.i18n('%s_%s' % (refClassName, backName), backName)
     # Add the label for the confirm message if relevant
     if self.appyType.addConfirm:
         label = '%s_%s_addConfirm' % (self.classDescr.name, self.fieldName)
         self.i18n(label, po.CONFIRM, nice=False)
Пример #6
0
 def walkRef(self):
     """How to generate a Ref?"""
     # Update the list of referers
     self.generator.addReferer(self)
     # Add the widget label for the back reference
     back = self.appyType.back
     refClassName = getClassName(self.appyType.klass, self.applicationName)
     if back.hasLabel:
         backLabel = "%s_%s" % (refClassName, self.appyType.back.attribute)
         poMsg = PoMessage(backLabel, "", self.appyType.back.attribute)
         poMsg.produceNiceDefault()
         self.generator.labels.append(poMsg)
     # Add the label for the confirm message if relevant
     if self.appyType.addConfirm:
         label = "%s_%s_addConfirm" % (self.classDescr.name, self.fieldName)
         msg = PoMessage(label, "", PoMessage.CONFIRM)
         self.generator.labels.append(msg)
Пример #7
0
 def walkRef(self):
     '''How to generate a Ref?'''
     # Update the list of referers
     self.generator.addReferer(self)
     # Add the widget label for the back reference
     back = self.appyType.back
     refClassName = getClassName(self.appyType.klass, self.applicationName)
     if back.hasLabel:
         backLabel = "%s_%s" % (refClassName, self.appyType.back.attribute)
         poMsg = PoMessage(backLabel, '', self.appyType.back.attribute)
         poMsg.produceNiceDefault()
         self.generator.labels.append(poMsg)
     # Add the label for the confirm message if relevant
     if self.appyType.addConfirm:
         label = '%s_%s_addConfirm' % (self.classDescr.name, self.fieldName)
         msg = PoMessage(label, '', PoMessage.CONFIRM)
         self.generator.labels.append(msg)
Пример #8
0
 def __init__(self, klass, orderedAttributes, generator):
     Descriptor.__init__(self, klass, orderedAttributes, generator)
     self.methods = ""  # Needed method definitions will be generated here
     # We remember here encountered pages and groups defined in the Appy
     # type. Indeed, after having parsed all application classes, we will
     # need to generate i18n labels for every child class of the class
     # that declared pages and groups.
     self.labelsToPropagate = []  # ~[PoMessage]~ Some labels (like page,
     # group or action names) need to be propagated in children classes
     # (because they contain the class name). But at this time we don't know
     # yet every sub-class. So we store those labels here; the Generator
     # will propagate them later.
     self.toolFieldsToPropagate = []  # For this class, some fields have
     # been defined on the Tool class. Those fields need to be defined
     # for child classes of this class as well, but at this time we don't
     # know yet every sub-class. So we store field definitions here; the
     # Generator will propagate them later.
     self.name = getClassName(self.klass, generator.applicationName)
     self.predefined = False
     self.customized = False
     # Phase and page names will be calculated later, when first required.
     self.phases = None
     self.pages = None
Пример #9
0
 def __init__(self, klass, orderedAttributes, generator):
     Descriptor.__init__(self, klass, orderedAttributes, generator)
     self.methods = ''  # Needed method definitions will be generated here
     # We remember here encountered pages and groups defined in the Appy
     # type. Indeed, after having parsed all application classes, we will
     # need to generate i18n labels for every child class of the class
     # that declared pages and groups.
     self.labelsToPropagate = []  #~[PoMessage]~ Some labels (like page,
     # group or action names) need to be propagated in children classes
     # (because they contain the class name). But at this time we don't know
     # yet every sub-class. So we store those labels here; the Generator
     # will propagate them later.
     self.toolFieldsToPropagate = []  # For this class, some fields have
     # been defined on the Tool class. Those fields need to be defined
     # for child classes of this class as well, but at this time we don't
     # know yet every sub-class. So we store field definitions here; the
     # Generator will propagate them later.
     self.name = getClassName(self.klass, generator.applicationName)
     self.predefined = False
     self.customized = False
     # Phase and page names will be calculated later, when first required.
     self.phases = None
     self.pages = None
Пример #10
0
def evaluate(model, criterion, dataloader, exp_dir):
    model.eval()

    total_loss = 0
    batch = 0

    all_predicted = []
    all_targets = []
    all_devices = []

    with torch.no_grad():
        for x, targets, devices in dataloader:
            x = torch.FloatTensor(x).to(device)
            targets = torch.LongTensor(targets).to(device)
            outputs = model(x)
            loss = criterion(outputs, targets)
            total_loss += loss

            _, predicted = torch.max(outputs, 1)

            all_predicted.append(predicted)
            all_targets.append(targets)
            all_devices.append(devices)

            batch += 1
    # forward done, got all prediction of evaluate data, Start statistic

    eval_loss = total_loss / batch
    all_predicted = torch.cat(all_predicted).cpu().data.numpy()
    all_targets = torch.cat(all_targets).cpu().data.numpy()
    all_devices = torch.cat(all_devices).numpy()

    assert (all_predicted.shape == all_targets.shape == all_devices.shape)

    # np.save("prediction.npy", all_predicted)
    # np.save("targets.npy", all_targets)

    # all_predicted = np.load("prediction.npy")
    # all_targets = np.load("targets.npy")

    # compute the confusion matrix
    confu_mat = confusion_matrix(all_targets, all_predicted)
    confu_mat_norm = confu_mat.astype('float') / confu_mat.sum(
        axis=1)[:, np.newaxis]

    class_total = np.sum(confu_mat, axis=1)
    acc = np.trace(confu_mat) / np.sum(class_total)
    # accuracy is the sum of confusion matrix diagonal divide total number of sample in evalset

    for i in range(len(class_total)):
        print('* Accuracy of {:18s} : {:2.2f}% {:4d}/{:<4d}'.format(
            getClassName(i), 100 * confu_mat[i][i] / class_total[i],
            confu_mat[i][i], class_total[i]))

    dev_res = deviceStat(all_predicted, all_targets, all_devices)
    for i in range(len(dev_res)):
        print(": Accuracy of Device {:>2s}: {:>4.2f}%".format(
            hparams.devices[i], dev_res[i] * 100))

    print(": Accuracy All: %f%%" % (acc * 100))

    return acc, eval_loss, confu_mat
                         ngram_range=(1, 2)), preprocessing.LabelEncoder()),
        (TfidfVectorizer(analyzer='word',
                         token_pattern=r'\w{1,}',
                         ngram_range=(1, 2)), preprocessing.LabelEncoder())
    ]

    modelsList = [
        naive_bayes.MultinomialNB(),
        linear_model.LogisticRegression(solver='saga', multi_class='auto'),
        ensemble.RandomForestClassifier(n_estimators=25),
        xgboost.XGBClassifier()
    ]

    for xEncoder, yEncoder in dataEncodersList:
        print('Using {} and {} for encoding xData and yData'.format(
            utils.getClassName(xEncoder), utils.getClassName(yEncoder)))

        # fit the encoders on the dataset
        xEncoder.fit(xData)
        yEncoder.fit(yData)

        print('Encoding and splitting xData, yData')
        xDataEncoded, yDataEncoded = xEncoder.transform(
            xData), yEncoder.transform(yData)
        xTrain, xValid, yTrain, yValid = model_selection.train_test_split(
            xDataEncoded, yDataEncoded)

        for model in modelsList:
            print('Training model:', utils.getClassName(model))
            trainedModel, accuracy = trainModel(model, xTrain, yTrain, xValid,
                                                yValid)
Пример #12
0

def printTopics(model):
    predicted_topics = model.print_topics(num_topics=5, num_words=5)
    for i, topics in predicted_topics:
        print('Words in Topic {}:\n {}'.format(i + 1, topics))


if __name__ == '__main__':
    arguments = parseArgs()
    dataset = utils.loadDataset(arguments.reprocessDataset,
                                classification=False,
                                splitWords=True)

    # Creating dictionary from dataset, where each unique term is assigned an index
    dictionary = corpora.Dictionary(dataset)

    # Converting list of documents into Bag of Words using dictionary
    doc_term_matrix = [dictionary.doc2bow(doc) for doc in dataset]

    # Training models on the document term matrix
    modelList = [
        LdaModel(doc_term_matrix, num_topics=10, id2word=dictionary, passes=2),
        LsiModel(doc_term_matrix, num_topics=10, id2word=dictionary)
    ]

    for model in modelList:
        print('Topic Modelling using %s' % utils.getClassName(model))
        printTopics(model)
        utils.saveModel(model)