Пример #1
0
 def doneCapturingPictures(self):
     self.pushButton_5.setEnabled(False)
     self.pushButton_6.setEnabled(False)
     username = str(self.lineEdit.text())
     test.testPCA("test/testUser", "databases/user_database", username)
     resultText = results.getResults(self.threshold);
     if resultText == "pass":
         resultText = "Welcome " + username + '!'
     else:
         resultText = "Verification of identity was not met.\n An administrator will be notified."
     self.label_12.setText(_translate("Form", resultText, None))
     if os.path.exists("test/testUser"):
         shutil.rmtree("test/testUser")
Пример #2
0
def main():

    with open('data.json') as json_file:
        data = json.load(json_file)
    #roll_nos={'178x1a0585':"*****@*****.**",'178x1a0580':"*****@*****.**"}
    students = data['students']
    marks = {}

    for studentIndex in students:
        roll_no = studentIndex['rollno']
        finalResult = results.getResults(roll_no)
        marks[roll_no] = finalResult

    for roll_num in marks:
        print(roll_num)
        receiverId = getEmail(data['students'], roll_num)
        mail.sendGmail("*****@*****.**", "********", receiverId,
                       marks.get(roll_num))
Пример #3
0
def search(request):
        # In case of request  POST
        if request.method == 'POST':  
                # We get back the form's data
                form = SearchForm(request.POST)  

                # We verify if the form's data is valid
                if form.is_valid(): 
                        # Here we can work on the form's data
                        keywords = form.cleaned_data['keywords']
                        jaccard_index = form.cleaned_data['jaccard_index']
                        envoi = True

                        print "Getting classic results ..."
                        groups = results.getResults(keywords,jaccard_index)

                        print "Getting keyword-related movies ..."
                        movies = results.getMovies(keywords)
                        movies_exist = False
                        # Empty list is falsy
                        if bool(movies)==True:
                                movies_exist = True
                 

                        print "Getting country info ..."
                        countries, img = results.getCountry(keywords)
                        is_country = False
                        if bool(countries)==True:
                                is_country = True
                                

        # If not POST, it should be GET
        else:
                # We create an empty form
                form = SearchForm()  
                        
                
        return render(request, 'cactus_search/search.html', locals())
Пример #4
0
    'gas [m3]_peak', 'gasPeak1DBefore', 'gas1DBefore', 'gas1WBefore']      
    
if sys.argv[2] == 'p':
    df = p.extractBuildingData(join(datasets, data), building)
    df = p.mergeWithHolidays(join(datasets, holidayDates), df)
    df = p.mergeWithForecasts(join(datasets, weatherData), df)
    h.save_file(df, join(datasets, unclean))
elif sys.argv[2] == 'c':
    df = cleanData(join(datasets, unclean), building)
    h.save_file(df, join(datasets, clean))
elif sys.argv[2] == 'pl':
    plotData(join(datasets, clean))
elif sys.argv[2] == 'f':
    df = addAllFeatures(join(datasets, clean))
    h.save_file(df, join(saveFolder, allFeatures))
    #h.save_features(df, featuresFile, features)
elif sys.argv[2] == 'sy':
    df = createSyntheticData(join(root, 'testAllF.csv'))
    h.save_file(df, join(saveFolder, synthesized))
elif sys.argv[2] == 's':
    splitData(join(saveFolder, allFeatures), features16)
elif sys.argv[2] == 'r':
    getResults(join(saveFolder, allFeatures), join(saveFolder, synthesized), #join(root, 'testAllF.csv'), 
    join(root, 'predict.csv'))
elif sys.argv[2] == 't':
    test(join(saveFolder, allFeatures))
else:
    print helpstring
    
    
# schrijf method om te detecteren welke outliers hij kan vinden
Пример #5
0
def main(argv=None):

    with open('objs.pickle') as f:
        __C = pickle.load(f)

    # Get the data.
    train_classes_filename = __C.get('TRAIN_CLASS_PATH')
    test_classes_filename = __C.get('TEST_CLASS_PATH')
    attribute_vectors_filename = __C.get('ATTRIBUTE_VECTOR_PATH')
    predicate_matrix_filename = __C.get('PREDICATE_MATRIX_PATH')
    attr_classifiers_filename =  __C.get('ATTR_CLASSIFIER_RESULTS_PATH')
    groundtruth_labels_filename = __C.get('GROUND_TRUTH_LABELS')
    train_image_labels_filename = __C.get('TRAIN_IMAGE_LABELS')
    train_scores_filename = __C.get('TRAIN_SCORES')
    logFileName = __C.get('LOG_FILE')
    tmpFileName = __C.get('TMP_FILENAME')
    plotAccuracyPerNIter = __C.get('PLOT_ACC_PER_N_ITER')

    networkModel = __C.get('CURR_MODEL')

    # Get the number of epochs for training.
    num_epochs = __C.get('NUM_EPOCH')

    #Get the verbose status
    verbose = __C.get('VERBOSE')

    # Get the size of layer one.
    num_hidden = __C.get('CURR_HIDDEN')

    # Get the status of hand-crafted examples
    perturbed_examples = __C.get('PERTURBED_EXAMPLES')

    #Get the corruption level of hand-crafted examples
    corruption_level = __C.get('PERTURBED_EXAMPLE_CORRLEVEL')

    #get batch size
    batch_size = __C.get('MAX_BATCH_SIZE')-1

    trainClasses =  extractData(train_classes_filename, 'trainClasses')
    testClasses = extractData(test_classes_filename, 'testClasses')
    attributeVectors = extractData(attribute_vectors_filename, 'attributeVectors')
    predicateMatrix = extractData(predicate_matrix_filename, 'predicateMatrix')
    attributeClassifierResults = extractData(attr_classifiers_filename, 'attClassifierResults')
    groundTruthLabels = extractData(groundtruth_labels_filename, 'groundTruthLabels')
    trainImageLabels = extractData(train_image_labels_filename, 'trainImageLabels')
    trainScores = extractData(train_scores_filename, 'trainScores')

    # XXX TEMPORARY
    #trainClasses = trainClasses / np.linalg.norm(trainClasses, axis = 1, keepdims=True)
    #testClasses = testClasses / np.linalg.norm(testClasses, axis = 1, keepdims=True)
    #attributeVectors = attributeVectors / np.linalg.norm(attributeVectors, axis = 1, keepdims=True)

    # XXX TEMPORARY
    #const_scale=0.4
    #attributeVectors = attributeVectors*const_scale
    #trainClasses = trainClasses*const_scale
    #testClasses = testClasses*const_scale


    # Get the shape of the training data.
    train_size,num_features = trainClasses.shape

    # Get the shape of the training images.
    image_size, _ = predicateMatrix.shape

    # Get Average word vectors
    averageTrainAttributeVectors = generateAverageWordVectors( attributeVectors, trainScores )
    averageTrainPredicateMatrixBasedAttributeVectors = generateAverageWordVectors(attributeVectors, predicateMatrix)
    averageTestAttributeVectors = generateAverageWordVectors( attributeVectors, attributeClassifierResults )

    # This is where training samples and labels are fed to the graph.
    # These placeholder nodes will be fed a batch of training data at each
    # training step using the {feed_dict} argument to the Run() call below.
    classVecInput = tf.placeholder("float", shape=[None, num_features], name='CC')
    correctAttributeVecInput = tf.placeholder("float", shape=[None, num_features], name='CA')
    wrongPredicateBasedAttributeVecInput = tf.placeholder("float", shape=[None, num_features], name='WPA')
    correctPredicateBasedAttributeVecInput = tf.placeholder("float", shape=[None, num_features], name='CPA')
    hammingDistanceInput = tf.placeholder("float", shape=[None, None], name='HD')
    wrongClassVecInput = tf.placeholder("float", shape=[None, num_features], name='WC')
    groundTruthLabelsInput = tf.constant(groundTruthLabels.T, 'float')

    # hamming distance between class vectors.
    hammingDistClasses = np.zeros((len(predicateMatrix),len(predicateMatrix)), dtype=float)
    for i in xrange(len(predicateMatrix)):
        for j in xrange(len(predicateMatrix)):
            hammingDistClasses[i,j] = spatial.distance.hamming( predicateMatrix[i,:], predicateMatrix[j,:] )

    # Initialize the hidden weights and pass inputs
    with tf.variable_scope("wScope", reuse=False):
        wHidden = tf.get_variable('W1',
            shape=[num_features, num_hidden],
            initializer=tflearn.initializations.uniform_scaling(shape=None, factor=1.0, dtype=tf.float32, seed=0))

        wHidden2 = tf.get_variable('W2',
            shape=[num_hidden, num_hidden],
            initializer=tflearn.initializations.uniform_scaling(shape=None, factor=1.0, dtype=tf.float32, seed=0))

        firstLayer = tf.nn.tanh(tf.matmul(classVecInput, wHidden))
        correctClassOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))

    with tf.variable_scope("wScope", reuse=True):
        wHidden = tf.get_variable('W1')
        wHidden2 = tf.get_variable('W2')

        firstLayer = tf.nn.tanh(tf.matmul(correctAttributeVecInput, wHidden))
        correctAttributeOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))

    with tf.variable_scope("wScope", reuse=True):
        wHidden = tf.get_variable('W1')
        wHidden2 = tf.get_variable('W2')

        firstLayer = tf.nn.tanh(tf.matmul(wrongClassVecInput, wHidden))
        wrongClassOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))

    with tf.variable_scope("wScope", reuse=True):
        wHidden = tf.get_variable('W1')
        wHidden2 = tf.get_variable('W2')

        firstLayer = tf.nn.tanh(tf.matmul(correctPredicateBasedAttributeVecInput, wHidden))
        correctPredicateBasedAttributeOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))

    with tf.variable_scope("wScope", reuse=True):
        wHidden = tf.get_variable('W1')
        wHidden2 = tf.get_variable('W2')

        firstLayer = tf.nn.tanh(tf.matmul(wrongPredicateBasedAttributeVecInput, wHidden))
        wrongPredicateBasedAttributeOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))

    loss = tf.reduce_sum(
                lossFunction(correctClassOutput, correctAttributeOutput, wrongClassOutput,
                correctPredicateBasedAttributeOutput, wrongPredicateBasedAttributeOutput, hammingDistanceInput))

    # Optimization.
    train = tf.train.AdamOptimizer(1e-4).minimize(loss)

    accuracy =  evalFunction( correctClassOutput, correctAttributeOutput, groundTruthLabelsInput )

    classVectorsTensor = correctClassOutput
    attributeVectorsTensor = correctAttributeOutput

    #write results to the tmp file.
    file_ = open( tmpFileName, 'a' )

    logFile = open(logFileName, 'a')

    saver = tf.train.Saver()

    randomnessFlag = False

    timeStamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    loggedTrainData = []
    loggedTestData = []
    initializationFlag = False

    # Create a local session to run this computation.
    with tf.Session() as s:
        # Run all the initializers to prepare the trainable parameters.
        try:
            if __C.get('SAVE_MODEL') == True:
                saver.restore(s, __C.get('LEARNED_MODEL_PATH')+str(num_hidden)+".ckpt")
            else:
                tf.initialize_all_variables().run()
        except:
            tf.initialize_all_variables().run()


        totalLoss = 0

        numberOfVectorPerIter = len( trainImageLabels )

        # Iterate and train.
        for step in xrange( num_epochs * image_size):

            offset = step % train_size

            currClassIndices = [i for i, x in enumerate(trainImageLabels) if x == offset+1] #is this class valid for training set?
            if currClassIndices != []:

                currTrainClass = trainClasses[offset:(offset + 1), :] # word vector of current training class

                # determine average word vector of attributes which is valid for currTraining Class
                currTrainAttributes = averageTrainAttributeVectors[currClassIndices, :]

                validIndices = range(0, numberOfVectorPerIter)
                validIndices = list(set(validIndices) - set(currClassIndices)) # find valid training indices for another classes
                invalidClasses = np.unique(trainImageLabels[validIndices]) # determine another classes
                wrongTrainClasses = trainClasses[invalidClasses-1, :] # word vectors of another classes

                currPredicateBasedTrainAttributes = averageTrainPredicateMatrixBasedAttributeVectors[np.unique(trainImageLabels[currClassIndices])-1,:]
                wrongPredicateBasedTrainAttributes = averageTrainPredicateMatrixBasedAttributeVectors[np.unique(invalidClasses-1,),:]


                if master.applyLossType == master.lossType[2]:
                    currPredicateBasedTrainAttributes = \
                        np.repeat(currPredicateBasedTrainAttributes, len(currTrainAttributes), axis=0)

                    repeatTimes = len(currTrainAttributes) / len(wrongPredicateBasedTrainAttributes)

                    wrongPredicateBasedTrainAttributes = \
                        np.repeat(wrongPredicateBasedTrainAttributes, repeatTimes+1, axis=0)

                    wrongPredicateBasedTrainAttributes = wrongPredicateBasedTrainAttributes[0:len(currTrainAttributes),:]

                currentHammingDistance = hammingDistClasses[offset:(offset + 1), invalidClasses-1]

                #forward pass
                _, curr_loss = s.run([train, loss], feed_dict={classVecInput: currTrainClass,
                                    correctAttributeVecInput: currTrainAttributes,
                                    wrongClassVecInput: wrongTrainClasses,
                                    correctPredicateBasedAttributeVecInput: currPredicateBasedTrainAttributes,
                                    wrongPredicateBasedAttributeVecInput: wrongPredicateBasedTrainAttributes,
                                    hammingDistanceInput: currentHammingDistance.T})

                totalLoss = curr_loss + totalLoss

                if offset == 0:
                    if verbose:
                        print 'Loss: ', totalLoss


                    trainAccuracy = 0
                    testAccuracy = 0

                    accuracyFlag = False

                    if (step % plotAccuracyPerNIter) == 0:
                        #evaluate network results
                        trainScores = \
                            accuracy.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels)-1,:],
                                                     correctAttributeVecInput: averageTrainAttributeVectors})

                        trainAccuracy = results.getResults(trainImageLabels, trainScores)
                        print 'train Accuracy: ' + str(trainAccuracy)
                        accuracyFlag = True

                        testScores = \
                            accuracy.eval(feed_dict={classVecInput: testClasses,
                                                     correctAttributeVecInput: averageTestAttributeVectors})

                        testAccuracy = results.getResults(groundTruthLabels, testScores, False)
                        print 'Test Accuracy: ' + str(testAccuracy)


                    if initializationFlag == False:
                        if master.saveWordVectors == True:
                            initialTestClasses = \
                                classVectorsTensor.eval(feed_dict={classVecInput: testClasses,
                                                                   correctAttributeVecInput: averageTestAttributeVectors})
                            initialAttributes = \
                                attributeVectorsTensor.eval(feed_dict={classVecInput: testClasses,
                                                                       correctAttributeVecInput: averageTestAttributeVectors})
                            initialTrainClasses = \
                                classVectorsTensor.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels) - 1, :],
                                               correctAttributeVecInput: averageTrainAttributeVectors})

                            initialTestScores = testScores

                        initializationFlag = True

                    if accuracyFlag == True:
                        loggedTrainData.append(trainAccuracy*100)
                        loggedTestData.append(testAccuracy*100)
                        logFile.write('#HiddenUnit:'+ str(__C.get('CURR_HIDDEN'))
                                      +',Step:'+str(step)+',Accuracy:'+str(testAccuracy*100) + '\n')

                        if master.applyCrossValidation == False:
                            results.drawAccuracyCurves(loggedTrainData, loggedTestData, timeStamp)

                    if (totalLoss <= __C.get('OVERFITTING_THRESHOLD') or __C.get('STOP_ITER') <= step) and step !=0:
                        testAccuracy = results.getResults(groundTruthLabels, testScores, False)
                        file_.write(str(testAccuracy) + '\n')
                        file_.close()
                        logFile.close()
                        results.getResults(groundTruthLabels, testScores, False, True)

                        if __C.get('SAVE_MODEL') == True:
                            saver.save(s, __C.get('LEARNED_MODEL_PATH')+str(num_hidden)+".ckpt")

                        if master.saveWordVectors == True:

                            wordVectorsSavePath = __C.get('WORD_VECTORS')

                            finalTrainClasses = \
                                classVectorsTensor.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels) - 1, :],
                                               correctAttributeVecInput: averageTrainAttributeVectors})

                            finalTestClasses = \
                                classVectorsTensor.eval(feed_dict={classVecInput: testClasses,
                                                         correctAttributeVecInput: averageTestAttributeVectors})

                            finalAttributes = \
                                attributeVectorsTensor.eval(feed_dict={classVecInput: testClasses,
                                                         correctAttributeVecInput: averageTestAttributeVectors})

                            finalTestScores = testScores

                            sio.savemat(wordVectorsSavePath+'initialTestClasses.mat', {'initialTestClasses': initialTestClasses})
                            sio.savemat(wordVectorsSavePath+'finalTestClasses.mat', {'finalTestClasses': finalTestClasses})
                            sio.savemat(wordVectorsSavePath+'initialAttributes.mat', {'initialAttributes': initialAttributes})
                            sio.savemat(wordVectorsSavePath+'finalAttributes.mat', {'finalAttributes': finalAttributes})
                            sio.savemat(wordVectorsSavePath + 'initialTrainClasses.mat',{'initialTrainClasses': initialTrainClasses})
                            sio.savemat(wordVectorsSavePath + 'finalTrainClasses.mat',{'finalTrainClasses': finalTrainClasses})
                            sio.savemat(wordVectorsSavePath + 'initialTestScores.mat',{'initialTestScores': initialTestScores})
                            sio.savemat(wordVectorsSavePath + 'finalTestScores.mat',{'finalTestScores': finalTestScores})
                        return
                    totalLoss = 0
Пример #6
0
import results
import mail

roll_nos = {
    '178x1a0585': "*****@*****.**",
    '178x1a0580': "*****@*****.**"
}
marks = {}

for roll_no in roll_nos:
    finalResult = results.getResults(roll_no)
    marks[roll_no] = finalResult

print(marks)

for roll_num in marks:
    print(roll_num)
    mail.sendGmail("*****@*****.**", "hulknanda", roll_nos[roll_num],
                   marks.get(roll_num))
Пример #7
0
def main(argv=None):

    with open('objs.pickle') as f:
        __C = pickle.load(f)

    # Get the data.
    train_classes_filename = __C.get('TRAIN_CLASS_PATH')
    test_classes_filename = __C.get('TEST_CLASS_PATH')
    attribute_vectors_filename = __C.get('ATTRIBUTE_VECTOR_PATH')
    predicate_matrix_filename = __C.get('PREDICATE_MATRIX_PATH')
    attr_classifiers_filename = __C.get('ATTR_CLASSIFIER_RESULTS_PATH')
    groundtruth_labels_filename = __C.get('GROUND_TRUTH_LABELS')
    train_image_labels_filename = __C.get('TRAIN_IMAGE_LABELS')
    train_scores_filename = __C.get('TRAIN_SCORES')
    logFileName = __C.get('LOG_FILE')
    tmpFileName = __C.get('TMP_FILENAME')
    plotAccuracyPerNIter = __C.get('PLOT_ACC_PER_N_ITER')

    networkModel = __C.get('CURR_MODEL')

    # Get the number of epochs for training.
    num_epochs = __C.get('NUM_EPOCH')

    #Get the verbose status
    verbose = __C.get('VERBOSE')

    # Get the size of layer one.
    num_hidden = __C.get('CURR_HIDDEN')

    # Get the status of hand-crafted examples
    perturbed_examples = __C.get('PERTURBED_EXAMPLES')

    #Get the corruption level of hand-crafted examples
    corruption_level = __C.get('PERTURBED_EXAMPLE_CORRLEVEL')

    #get batch size
    batch_size = __C.get('MAX_BATCH_SIZE') - 1

    trainClasses = extractData(train_classes_filename, 'trainClasses')
    testClasses = extractData(test_classes_filename, 'testClasses')
    attributeVectors = extractData(attribute_vectors_filename,
                                   'attributeVectors')
    predicateMatrix = extractData(predicate_matrix_filename, 'predicateMatrix')
    attributeClassifierResults = extractData(attr_classifiers_filename,
                                             'attClassifierResults')
    groundTruthLabels = extractData(groundtruth_labels_filename,
                                    'groundTruthLabels')
    trainImageLabels = extractData(train_image_labels_filename,
                                   'trainImageLabels')
    trainScores = extractData(train_scores_filename, 'trainScores')

    # XXX TEMPORARY
    #trainClasses = trainClasses / np.linalg.norm(trainClasses, axis = 1, keepdims=True)
    #testClasses = testClasses / np.linalg.norm(testClasses, axis = 1, keepdims=True)
    #attributeVectors = attributeVectors / np.linalg.norm(attributeVectors, axis = 1, keepdims=True)

    # XXX TEMPORARY
    #const_scale=0.4
    #attributeVectors = attributeVectors*const_scale
    #trainClasses = trainClasses*const_scale
    #testClasses = testClasses*const_scale

    # Get the shape of the training data.
    train_size, num_features = trainClasses.shape

    # Get the shape of the training images.
    image_size, _ = predicateMatrix.shape

    # Get Average word vectors
    averageTrainAttributeVectors = generateAverageWordVectors(
        attributeVectors, trainScores)
    averageTrainPredicateMatrixBasedAttributeVectors = generateAverageWordVectors(
        attributeVectors, predicateMatrix)
    averageTestAttributeVectors = generateAverageWordVectors(
        attributeVectors, attributeClassifierResults)

    # This is where training samples and labels are fed to the graph.
    # These placeholder nodes will be fed a batch of training data at each
    # training step using the {feed_dict} argument to the Run() call below.
    classVecInput = tf.placeholder("float",
                                   shape=[None, num_features],
                                   name='CC')
    correctAttributeVecInput = tf.placeholder("float",
                                              shape=[None, num_features],
                                              name='CA')
    wrongPredicateBasedAttributeVecInput = tf.placeholder(
        "float", shape=[None, num_features], name='WPA')
    correctPredicateBasedAttributeVecInput = tf.placeholder(
        "float", shape=[None, num_features], name='CPA')
    hammingDistanceInput = tf.placeholder("float",
                                          shape=[None, None],
                                          name='HD')
    wrongClassVecInput = tf.placeholder("float",
                                        shape=[None, num_features],
                                        name='WC')
    groundTruthLabelsInput = tf.constant(groundTruthLabels.T, 'float')

    # hamming distance between class vectors.
    hammingDistClasses = np.zeros((len(predicateMatrix), len(predicateMatrix)),
                                  dtype=float)
    for i in xrange(len(predicateMatrix)):
        for j in xrange(len(predicateMatrix)):
            hammingDistClasses[i, j] = spatial.distance.hamming(
                predicateMatrix[i, :], predicateMatrix[j, :])

    # Initialize the hidden weights and pass inputs
    with tf.variable_scope("wScope", reuse=False):
        wHidden = tf.get_variable(
            'W1',
            shape=[num_features, num_hidden],
            initializer=tflearn.initializations.uniform_scaling(
                shape=None, factor=1.0, dtype=tf.float32, seed=0))

        wHidden2 = tf.get_variable(
            'W2',
            shape=[num_hidden, num_hidden],
            initializer=tflearn.initializations.uniform_scaling(
                shape=None, factor=1.0, dtype=tf.float32, seed=0))

        firstLayer = tf.nn.tanh(tf.matmul(classVecInput, wHidden))
        correctClassOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))

    with tf.variable_scope("wScope", reuse=True):
        wHidden = tf.get_variable('W1')
        wHidden2 = tf.get_variable('W2')

        firstLayer = tf.nn.tanh(tf.matmul(correctAttributeVecInput, wHidden))
        correctAttributeOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))

    with tf.variable_scope("wScope", reuse=True):
        wHidden = tf.get_variable('W1')
        wHidden2 = tf.get_variable('W2')

        firstLayer = tf.nn.tanh(tf.matmul(wrongClassVecInput, wHidden))
        wrongClassOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))

    with tf.variable_scope("wScope", reuse=True):
        wHidden = tf.get_variable('W1')
        wHidden2 = tf.get_variable('W2')

        firstLayer = tf.nn.tanh(
            tf.matmul(correctPredicateBasedAttributeVecInput, wHidden))
        correctPredicateBasedAttributeOutput = tf.nn.sigmoid(
            tf.matmul(firstLayer, wHidden2))

    with tf.variable_scope("wScope", reuse=True):
        wHidden = tf.get_variable('W1')
        wHidden2 = tf.get_variable('W2')

        firstLayer = tf.nn.tanh(
            tf.matmul(wrongPredicateBasedAttributeVecInput, wHidden))
        wrongPredicateBasedAttributeOutput = tf.nn.sigmoid(
            tf.matmul(firstLayer, wHidden2))

    loss = tf.reduce_sum(
        lossFunction(correctClassOutput, correctAttributeOutput,
                     wrongClassOutput, correctPredicateBasedAttributeOutput,
                     wrongPredicateBasedAttributeOutput, hammingDistanceInput))

    # Optimization.
    train = tf.train.AdamOptimizer(1e-4).minimize(loss)

    accuracy = evalFunction(correctClassOutput, correctAttributeOutput,
                            groundTruthLabelsInput)

    classVectorsTensor = correctClassOutput
    attributeVectorsTensor = correctAttributeOutput

    #write results to the tmp file.
    file_ = open(tmpFileName, 'a')

    logFile = open(logFileName, 'a')

    saver = tf.train.Saver()

    randomnessFlag = False

    timeStamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    loggedTrainData = []
    loggedTestData = []
    initializationFlag = False

    # Create a local session to run this computation.
    with tf.Session() as s:
        # Run all the initializers to prepare the trainable parameters.
        try:
            if __C.get('SAVE_MODEL') == True:
                saver.restore(
                    s,
                    __C.get('LEARNED_MODEL_PATH') + str(num_hidden) + ".ckpt")
            else:
                tf.initialize_all_variables().run()
        except:
            tf.initialize_all_variables().run()

        totalLoss = 0

        numberOfVectorPerIter = len(trainImageLabels)

        # Iterate and train.
        for step in xrange(num_epochs * image_size):

            offset = step % train_size

            currClassIndices = [
                i for i, x in enumerate(trainImageLabels) if x == offset + 1
            ]  #is this class valid for training set?
            if currClassIndices != []:

                currTrainClass = trainClasses[offset:(
                    offset + 1), :]  # word vector of current training class

                # determine average word vector of attributes which is valid for currTraining Class
                currTrainAttributes = averageTrainAttributeVectors[
                    currClassIndices, :]

                validIndices = range(0, numberOfVectorPerIter)
                validIndices = list(
                    set(validIndices) - set(currClassIndices)
                )  # find valid training indices for another classes
                invalidClasses = np.unique(trainImageLabels[validIndices]
                                           )  # determine another classes
                wrongTrainClasses = trainClasses[
                    invalidClasses - 1, :]  # word vectors of another classes

                currPredicateBasedTrainAttributes = averageTrainPredicateMatrixBasedAttributeVectors[
                    np.unique(trainImageLabels[currClassIndices]) - 1, :]
                wrongPredicateBasedTrainAttributes = averageTrainPredicateMatrixBasedAttributeVectors[
                    np.unique(invalidClasses - 1, ), :]

                if master.applyLossType == master.lossType[2]:
                    currPredicateBasedTrainAttributes = \
                        np.repeat(currPredicateBasedTrainAttributes, len(currTrainAttributes), axis=0)

                    repeatTimes = len(currTrainAttributes) / len(
                        wrongPredicateBasedTrainAttributes)

                    wrongPredicateBasedTrainAttributes = \
                        np.repeat(wrongPredicateBasedTrainAttributes, repeatTimes+1, axis=0)

                    wrongPredicateBasedTrainAttributes = wrongPredicateBasedTrainAttributes[
                        0:len(currTrainAttributes), :]

                currentHammingDistance = hammingDistClasses[offset:(offset +
                                                                    1),
                                                            invalidClasses - 1]

                #forward pass
                _, curr_loss = s.run(
                    [train, loss],
                    feed_dict={
                        classVecInput: currTrainClass,
                        correctAttributeVecInput: currTrainAttributes,
                        wrongClassVecInput: wrongTrainClasses,
                        correctPredicateBasedAttributeVecInput:
                        currPredicateBasedTrainAttributes,
                        wrongPredicateBasedAttributeVecInput:
                        wrongPredicateBasedTrainAttributes,
                        hammingDistanceInput: currentHammingDistance.T
                    })

                totalLoss = curr_loss + totalLoss

                if offset == 0:
                    if verbose:
                        print 'Loss: ', totalLoss

                    trainAccuracy = 0
                    testAccuracy = 0

                    accuracyFlag = False

                    if (step % plotAccuracyPerNIter) == 0:
                        #evaluate network results
                        trainScores = \
                            accuracy.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels)-1,:],
                                                     correctAttributeVecInput: averageTrainAttributeVectors})

                        trainAccuracy = results.getResults(
                            trainImageLabels, trainScores)
                        print 'train Accuracy: ' + str(trainAccuracy)
                        accuracyFlag = True

                        testScores = \
                            accuracy.eval(feed_dict={classVecInput: testClasses,
                                                     correctAttributeVecInput: averageTestAttributeVectors})

                        testAccuracy = results.getResults(
                            groundTruthLabels, testScores, False)
                        print 'Test Accuracy: ' + str(testAccuracy)

                    if initializationFlag == False:
                        if master.saveWordVectors == True:
                            initialTestClasses = \
                                classVectorsTensor.eval(feed_dict={classVecInput: testClasses,
                                                                   correctAttributeVecInput: averageTestAttributeVectors})
                            initialAttributes = \
                                attributeVectorsTensor.eval(feed_dict={classVecInput: testClasses,
                                                                       correctAttributeVecInput: averageTestAttributeVectors})
                            initialTrainClasses = \
                                classVectorsTensor.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels) - 1, :],
                                               correctAttributeVecInput: averageTrainAttributeVectors})

                            initialTestScores = testScores

                        initializationFlag = True

                    if accuracyFlag == True:
                        loggedTrainData.append(trainAccuracy * 100)
                        loggedTestData.append(testAccuracy * 100)
                        logFile.write('#HiddenUnit:' +
                                      str(__C.get('CURR_HIDDEN')) + ',Step:' +
                                      str(step) + ',Accuracy:' +
                                      str(testAccuracy * 100) + '\n')

                        if master.applyCrossValidation == False:
                            results.drawAccuracyCurves(loggedTrainData,
                                                       loggedTestData,
                                                       timeStamp)

                    if (totalLoss <= __C.get('OVERFITTING_THRESHOLD')
                            or __C.get('STOP_ITER') <= step) and step != 0:
                        testAccuracy = results.getResults(
                            groundTruthLabels, testScores, False)
                        file_.write(str(testAccuracy) + '\n')
                        file_.close()
                        logFile.close()
                        results.getResults(groundTruthLabels, testScores,
                                           False, True)

                        if __C.get('SAVE_MODEL') == True:
                            saver.save(
                                s,
                                __C.get('LEARNED_MODEL_PATH') +
                                str(num_hidden) + ".ckpt")

                        if master.saveWordVectors == True:

                            wordVectorsSavePath = __C.get('WORD_VECTORS')

                            finalTrainClasses = \
                                classVectorsTensor.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels) - 1, :],
                                               correctAttributeVecInput: averageTrainAttributeVectors})

                            finalTestClasses = \
                                classVectorsTensor.eval(feed_dict={classVecInput: testClasses,
                                                         correctAttributeVecInput: averageTestAttributeVectors})

                            finalAttributes = \
                                attributeVectorsTensor.eval(feed_dict={classVecInput: testClasses,
                                                         correctAttributeVecInput: averageTestAttributeVectors})

                            finalTestScores = testScores

                            sio.savemat(
                                wordVectorsSavePath + 'initialTestClasses.mat',
                                {'initialTestClasses': initialTestClasses})
                            sio.savemat(
                                wordVectorsSavePath + 'finalTestClasses.mat',
                                {'finalTestClasses': finalTestClasses})
                            sio.savemat(
                                wordVectorsSavePath + 'initialAttributes.mat',
                                {'initialAttributes': initialAttributes})
                            sio.savemat(
                                wordVectorsSavePath + 'finalAttributes.mat',
                                {'finalAttributes': finalAttributes})
                            sio.savemat(
                                wordVectorsSavePath +
                                'initialTrainClasses.mat',
                                {'initialTrainClasses': initialTrainClasses})
                            sio.savemat(
                                wordVectorsSavePath + 'finalTrainClasses.mat',
                                {'finalTrainClasses': finalTrainClasses})
                            sio.savemat(
                                wordVectorsSavePath + 'initialTestScores.mat',
                                {'initialTestScores': initialTestScores})
                            sio.savemat(
                                wordVectorsSavePath + 'finalTestScores.mat',
                                {'finalTestScores': finalTestScores})
                        return
                    totalLoss = 0