Beispiel #1
0
def check_commands(cmds):
    cmd = cmds.split(" ")

    if not cmd[0] in commands_list:
        print("\n[-] Unknown command '%s'" % (cmd[0]))
        print("[!] Type \"help\" for commands list\n")
    else:
        if cmd[0] == "help":
            hlp = helper("")
            hlp.print_help()
        elif cmd[0] == "bash":
            if len(cmd) < 2:
                print("\n[-] Not enough arguments")
                print("[!] Usage: bash [command]\n")
            else:
                bash_execute(cmd)
        elif cmd[0] == "load":
            if len(cmd) - 1 < 2:
                print("\n[-] Not enough arguments")
                print("[!] Usage: load [module name] [arguments]")
                print("[!] Type \"modules\" for modules list\n")
            else:
                load_module(cmd[1], cmd)
        else:
            if len(cmd) == 1:
                hlp = helper("")
                hlp.print_modules()
            else:
                if not cmd[1] in modules_list:
                    print("\n[-] Unknown module '%s'" % (cmd[1]))
                    print("[!] Type \"modules\" for modules list\n")
                else:
                    hlp = helper(cmd[1])
                    hlp.print_help()
Beispiel #2
0
    def helper(board, parent, depth):
        if depth == 0:
            return

        for move in MOVES:
            new_board, result, moves = move_sim(copy.deepcopy(board), size,
                                                move, target)
            if moves <= 0: continue
            if result not in [True, False]: result = estimator(new_board, size)
            child = Node(str(result), parent=parent, id=parent.id + move + ' ')
            helper(new_board, child, depth - 1)
    def backPropogate(self, x, y):
        lengthX = len(x)
        length = len(y)

        h = self.encode(x)
        c = h[-1]
        s, o = self.decode(x, c, y)

        dLdUe = np.zeros(self.Ue.shape)
        dLdWe = np.zeros(self.We.shape)
        dLdP = np.zeros(self.P.shape)
        dLdV = np.zeros(self.V.shape)
        dLdUd = np.zeros(self.Ud.shape)
        dLdWd = np.zeros(self.Wd.shape)
        delta_c_t = helper(c)
        delta_o = o
        delta_o[np.arange(len(y)), y] -= 1.
        #check the gradients and weight

        delta_o = np.asarray([weight * delta_o[t] for t in np.arange(len(y))])

        for t in np.arange(length)[::-1]:
            #check for the calculations
            dLdP += np.outer(delta_o[t], s[t])
            #print the values
            delta_s_t = np.dot(self.P.T, delta_o[t]) * helper(s[t])
            dLdc = np.zeros(self.hiddenDim)

            for decode_step in np.arange(
                    max(1, t - self.backPropogate_truncate), t + 1)[::-1]:

                # summation of gradiemts to the previous steps
                dLdUd += np.outer(delta_s_t, s[decode_step - 1])
                dLdV += np.outer(delta_s_t, c)
                dLdWd[:, y[decode_step - 1]] += delta_s_t
                dLdc += np.dot(self.V.T, delta_s_t)

                delta_s_t = np.dot(self.Ud.T, delta_s_t) * helper(
                    s[decode_step - 1])
            if t - self.backPropogate_truncate < 1:
                dLdV += np.outer(delta_s_t, c)
                dLdc += np.dot(self.V.T, delta_s_t)
            delta_c_t = dLdc * helper(c)
            for encode_step in np.arange(
                    max(0, lengthX - self.backPropogate_truncate),
                    lengthX)[::-1]:
                dLdWe[:, x[encode_step]] += delta_c_t
                dLdUe += np.outer(delta_c_t, h[encode_step - 1])

                delta_c_t = np.dot(self.Ue.T, delta_c_t) * helper(
                    h[encode_step - 1])

        return [dLdUe, dLdWe, dLdUd, dLdWd, dLdV, dLdP]
class DelegateButton(QStyledItemDelegate):

    helperModel = helper(None)

    def paint(self, painter, option, index):
        starRating = index.data().toList()
        #s = QVariant(["ss","nn"])
        print self.helperModel.translate(starRating[0].toString())

        print starRating

#         if isinstance(starRating, StarRating):
#             if option.state & QtGui.QStyle.State_Selected:
#                 painter.fillRect(option.rect, option.palette.highlight())
#
#             starRating.paint(painter, option.rect, option.palette,
#                     StarRating.ReadOnly)
#         else:
#             super(StarDelegate, self).paint(painter, option, index)

    def sizeHint(self, option, index):
        starRating = index.data()
#         if isinstance(starRating, StarRating):
#             return starRating.sizeHint()
#         else:
#             return super(StarDelegate, self).sizeHint(option, index)

    def createEditor(self, parent, option, index):
        starRating = index.data()
        btn = QPushButton("hhi", parent=parent)
        return btn
#         if isinstance(starRating, StarRating):
#             editor = StarEditor(parent)
#             editor.editingFinished.connect(self.commitAndCloseEditor)
#             return editor
#         else:
#             return super(StarDelegate, self).createEditor(parent, option, index)

    def setEditorData(self, editor, index):
        starRating = index.data()
#         if isinstance(starRating, StarRating):
#             editor.setStarRating(starRating)
#         else:
#             super(StarDelegate, self).setEditorData(editor, index)

    def setModelData(self, editor, model, index):
        starRating = index.data()
#         if isinstance(starRating, StarRating):
#             model.setData(index, editor.starRating())
#         else:
#             super(StarDelegate, self).setModelData(editor, model, index)

    def commitAndCloseEditor(self):
        editor = self.sender()
        self.commitData.emit(editor)
        self.closeEditor.emit(editor)
Beispiel #5
0
 def __init__(self,
              origin_lat,
              origin_lon,
              origin_alt):
     
     self.my_flight_tuple         = {}
     self.max_range_seen          = 0
     self.qth_lla                 = np.array([origin_lat, origin_lon, origin_alt])
     self.h                       = helper(origin_lat, origin_lon, origin_alt)
     self.qth_ecef                = self.h.GetECEFPositionVectors(self.qth_lla)
Beispiel #6
0
#!/usr/bin/python

import sys 
import tweepy
from auth import getAPIHandle 
from helper import *

#DEBUG Flag
DEBUG = 0

#Get API Handle and Helper
apiHandle = getAPIHandle()
twitter = apiHandle.getAPI()
h = helper()

#Get Top User List from file
topUserList= h.getTopUserList()

#For each top user, get 100 status messages posted by user (will look for more if ten not found)
for topUser in topUserList:
    public_tweets = twitter.mentions(id=topUser)
    for t in public_tweets:
        print t.__dict__
    break
Beispiel #7
0
import direct.directbase.DirectStart #starts Panda
from pandac.PandaModules import * #basic Panda modules
from direct.showbase.DirectObject import DirectObject #for event handling
from direct.actor.Actor import Actor #for animated models
from direct.interval.IntervalGlobal import * #for compound intervals
from direct.task import Task #for update functions
from projectiles import *
from helper import *
import sys, math, random

players = helper()



#default weapon (revolver)
class Weapon(DirectObject):
	def __init__(self, x, y, z, angle, bullets, id, projZ):
		self.keyMap = {"firing":0}
		self.prevtime = 0
		#id will be 0 for player, 1 - whatever for ai's
		self.playerid = id
		
		#print(players.players[1])
		
		if str(self.playerid) == "0":
			self.accept("space", self.setKey, ["firing", 1] )
			self.accept("space-up", self.setKey, ["firing", 0] )
		#note - projectiles should be an empty list the first time you create the weapon
		self.bullets = bullets
		
		#set weapon cooldown and how long it slows a player down for
Beispiel #8
0
    def return_stump(self, depth, root, attributes, results, total_results,
                     weights):
        """
        Function returns a decision stump
        :param depth:Depth of the tree we are at
        :param root:
        :param attributes:
        :param results:
        :param total_results:
        :param weights:
        :return:
        """
        gain = []
        results_en = 0
        results_nl = 0
        for index in total_results:
            if results[index] == 'en':
                results_en = results_en + 1 * weights[index]
            else:
                results_nl = results_nl + 1 * weights[index]

        for index_attribute in range(len(attributes)):
            count_true_en = 0
            count_true_nl = 0
            count_false_en = 0
            count_false_nl = 0
            for index in total_results:
                if attributes[index_attribute][index] is True and results[
                        index] == 'en':
                    count_true_en = count_true_en + 1 * weights[index]
                elif attributes[index_attribute][index] is True and results[
                        index] == 'nl':
                    count_true_nl = count_true_nl + 1 * weights[index]
                elif attributes[index_attribute][index] is False and results[
                        index] == 'en':
                    count_false_en = count_false_en + 1 * weights[index]
                elif attributes[index_attribute][index] is False and results[
                        index] == 'nl':
                    count_false_nl = count_false_nl + 1 * weights[index]

            # Handliing certain outlier conditions
            if count_true_en == 0:
                rem_true_value = 0
                rem_false_value = ((count_false_en + count_false_nl) /
                                   (results_nl + results_nl)) * self.entropy(
                                       count_false_en /
                                       (count_false_nl + count_false_en))
            elif count_false_en == 0:
                rem_false_value = 0
                rem_true_value = ((count_true_en + count_true_nl) /
                                  (results_nl + results_en)) * self.entropy(
                                      count_true_en /
                                      (count_true_nl + count_true_en))
            else:
                rem_true_value = ((count_true_en + count_true_nl) /
                                  (results_nl + results_en)) * self.entropy(
                                      count_true_en /
                                      (count_true_nl + count_true_en))

                rem_false_value = ((count_false_en + count_false_nl) /
                                   (results_nl + results_en)) * self.entropy(
                                       count_false_en /
                                       (count_false_nl + count_false_en))

            gain_for_attribute = self.entropy(
                results_en /
                (results_en + results_nl)) - (rem_true_value + rem_false_value)
            gain.append(gain_for_attribute)

        max_gain_attribute = gain.index(max(gain))
        root.value = max_gain_attribute
        count_max_true_en = 0
        count_max_true_nl = 0
        count_max_false_en = 0
        count_max_false_nl = 0

        for index in range(len(attributes[max_gain_attribute])):
            if attributes[max_gain_attribute][index] is True:
                if results[index] == 'en':
                    count_max_true_en = count_max_true_en + 1 * weights[index]
                else:
                    count_max_true_nl = count_max_true_nl + 1 * weights[index]
            else:
                if results[index] == 'en':
                    count_max_false_en = count_max_false_en + 1 * weights[index]
                else:
                    count_max_false_nl = count_max_false_nl + 1 * weights[index]

        left_obj = helper(attributes, None, results, None, depth + 1, None,
                          None)
        right_obj = helper(attributes, None, results, None, depth + 1, None,
                           None)
        if count_max_true_en > count_max_true_nl:
            left_obj.value = 'en'
        else:
            left_obj.value = 'nl'
        if count_max_false_en > count_max_false_nl:
            right_obj.value = 'en'
        else:
            right_obj.value = 'nl'

        root.left = left_obj
        root.right = right_obj

        return root
Beispiel #9
0
    def collect_data_ada(self, example_file, hypothesis_file):
        """
        Collection of data for Adaboost , collection of stumps formed
        :param example_file:Training file for training
        :param hypothesis_file:Hypothesis file to write the set of hypothesis
        :return:None
        """
        # Collection of examples from the training file
        statements, results = self.gather_data(example_file)
        weights = [1 / len(statements)] * len(statements)

        # Number of hypothesis
        number_of_decision_stumps = 50

        attribute1 = []
        attribute2 = []
        attribute3 = []
        attribute4 = []
        attribute5 = []
        attribute6 = []
        attribute7 = []
        attribute8 = []
        attribute9 = []
        attribute10 = []
        attribute11 = []

        # For each 15-word line in training set decide on the value of features
        for line in statements:
            attribute1.append(self.containsQ(line))
            attribute2.append(self.containsX(line))
            attribute3.append(self.check_avg_word_length_greater_than_5(line))
            attribute4.append(self.presence_of_van(line))
            attribute5.append(self.presence_of_de_het(line))
            attribute6.append(self.check_for_een(line))
            attribute7.append(self.check_for_en(line))
            attribute8.append(self.check_for_common_dutch_words(line))
            attribute9.append(self.check_for_common_english_words(line))
            attribute10.append(self.presence_of_a_an_the(line))
            attribute11.append(self.check_presence_of_and(line))

        attributes = []
        attributes.append(attribute1)
        attributes.append(attribute2)
        attributes.append(attribute3)
        attributes.append(attribute4)
        attributes.append(attribute5)
        attributes.append(attribute6)
        attributes.append(attribute7)
        attributes.append(attribute8)
        attributes.append(attribute9)
        attributes.append(attribute10)
        attributes.append(attribute11)

        number_lst = []
        stump_values = []

        hypot_weights = [1] * number_of_decision_stumps

        # Set contining indices of all the examples
        for i in range(len(results)):
            number_lst.append(i)

        # Initialization of the root

        # Adaboost algorithm for training
        for hypothesis in range(0, number_of_decision_stumps):

            root = helper(attributes, None, results, number_lst, 0, None, None)
            # For every hypothesis index generate a hypotesis to be added
            stump = self.return_stump(0, root, attributes, results, number_lst,
                                      weights)
            error = 0
            correct = 0
            incorrect = 0
            for index in range(len(statements)):

                # Check for number of examples that do not match with hypothesis output value and update error value
                if self.prediction(stump, statements[index], attributes,
                                   index) != results[index]:
                    error = error + weights[index]
                    incorrect = incorrect + 1

            for index in range(len(statements)):

                # Check for number of examples that do mathc with the hypothesis output value and update weights of examples
                if self.prediction(stump, statements[index], attributes,
                                   index) == results[index]:
                    weights[index] = weights[index] * error / (1 - error)
                    correct = correct + 1
            total = 0
            # Calculation for normalization
            for weight in weights:
                total += weight
            for index in range(len(weights)):
                weights[index] = weights[index] / total

            # Updated values for hypothseis weight
            hypot_weights[hypothesis] = math.log(((1 - error) / (error)), 2)
            stump_values.append(stump)

        # Dump the set of generated hypothesis
        filehandler = open(hypothesis_file, 'wb')
        pickle.dump((stump_values, hypot_weights), filehandler)
Beispiel #10
0
    def train_decision_tree(self, root, attributes, seen, results,
                            total_results, depth, prevprediction):
        """
        Decides on the best splitting attribute for a given depth , make a node for that and connects it with
        two nodes containing the left and right childs for the given so called root node
        :param root: The node that is being considered right now
        :param attributes:Total set of attributes and their values
        :param seen:Every node that has been seen till now
        :param results:Final results in a list
        :param total_results:Index of examples that are there at this level
        :param depth:Level in consideration
        :param prevprediction:Prediction made before this depth
        :return:None
        """

        # If depth is reached return the plurality of the remaining set
        if depth == len(attributes) - 1:
            counten = 0
            countnl = 0
            for index in total_results:
                if results[index] is 'en':
                    counten = counten + 1
                elif results[index] is 'nl':
                    countnl = countnl + 1
            if counten > countnl:
                root.value = 'en'
                print('en')
            else:
                root.value = 'nl'
                print('nl')

        # If there are no examples left return the prediction made at the previous level
        elif len(total_results) == 0:
            root.value = prevprediction
            print(prevprediction)

        # If there are only positive or only negative examples left return the prediction directly from the plurality
        elif self.number_of_diff_values(results, total_results) == 0:
            root.value = results[total_results[0]]
            print(results[total_results[0]])

        # If all the attributes have been used for splitting along a given path return the prediction of the set of examples
        elif len(attributes) == len(seen):
            counten = 0
            countnl = 0
            for index in total_results:
                if results[index] is 'en':
                    counten = counten + 1
                elif results[index] is 'nl':
                    countnl = countnl + 1
            if counten > countnl:
                root.value = 'en'
            else:
                root.value = 'nl'

        # Find the attribute to split on
        else:
            gain = []
            results_en = 0
            results_nl = 0

            # Take the total number of positive and negative examples at this level
            for index in total_results:
                if results[index] == 'en':
                    results_en = results_en + 1
                else:
                    results_nl = results_nl + 1
            # For each attribute
            for index_attribute in range(len(attributes)):

                # Check if it has already been used for splitting so , no gain in splitting over it again
                if index_attribute in seen:
                    gain.append(0)
                    continue

                # Else see for the best splitting attribute
                else:
                    count_true_en = 0
                    count_true_nl = 0
                    count_false_en = 0
                    count_false_nl = 0

                    for index in total_results:

                        if attributes[index_attribute][
                                index] is True and results[index] == 'en':
                            count_true_en = count_true_en + 1
                        elif attributes[index_attribute][
                                index] is True and results[index] == 'nl':
                            count_true_nl = count_true_nl + 1
                        elif attributes[index_attribute][
                                index] is False and results[index] == 'en':
                            count_false_en = count_false_en + 1
                        elif attributes[index_attribute][
                                index] is False and results[index] == 'nl':
                            count_false_nl = count_false_nl + 1

                    # If only positive or only negative examples remain at a particular point , no point in splitting
                    if (count_true_nl + count_true_en
                            == 0) or (count_false_en + count_false_nl == 0):
                        gain_for_attribute = 0
                        gain.append(gain_for_attribute)
                        continue
                    # Handliing certain outlier conditions
                    if count_true_en == 0:
                        rem_true_value = 0
                        # rem_false_value = 0
                        rem_false_value = (
                            (count_false_en + count_false_nl) /
                            (results_nl + results_nl)) * self.entropy(
                                count_false_en /
                                (count_false_nl + count_false_en))
                    elif count_false_en == 0:
                        rem_false_value = 0
                        #rem_true_value = 0
                        rem_true_value = (
                            (count_true_en + count_true_nl) /
                            (results_nl + results_en)) * self.entropy(
                                count_true_en /
                                (count_true_nl + count_true_en))
                    else:
                        rem_true_value = (
                            (count_true_en + count_true_nl) /
                            (results_nl + results_en)) * self.entropy(
                                count_true_en /
                                (count_true_nl + count_true_en))

                        rem_false_value = (
                            (count_false_en + count_false_nl) /
                            (results_nl + results_en)) * self.entropy(
                                count_false_en /
                                (count_false_nl + count_false_en))

                    # Find the gain for each attribute
                    gain_for_attribute = self.entropy(
                        results_en /
                        (results_en + results_nl)) - (rem_true_value +
                                                      rem_false_value)
                    gain.append(gain_for_attribute)
        # Check if the max gain is 0 then return back as no more gain possible along this path
            continue_var = self.check_for_0_gain(gain)
            if continue_var is False:
                root.value = prevprediction
                print(root.value)
                return

            # Select the max gain attribute
            max_gain_attribute = gain.index(max(gain))

            seen.append(max_gain_attribute)

            index_True = []
            index_False = []

            # Separate out true and false portion for the found out max gain attribute
            for index in total_results:
                if attributes[max_gain_attribute][index] is True:
                    index_True.append(index)
                else:
                    index_False.append(index)

            # Prediction at this stage
            prediction_at_this_stage = ''

            if results_en > results_nl:
                prediction_at_this_stage = 'en'
            else:
                prediction_at_this_stage = 'nl'

            bool_false = False
            bool_true = True
            root.value = max_gain_attribute

            # Make left portion for the max gain attribute

            left_obj = helper(attributes, None, results, index_True, depth + 1,
                              prediction_at_this_stage, bool_true)
            # Make right portion for the max gain attribute

            right_obj = helper(attributes, None, results, index_False,
                               depth + 1, prediction_at_this_stage, bool_false)
            root.left = left_obj
            root.right = right_obj
            # Recurse left and right portions
            self.train_decision_tree(left_obj, attributes, seen, results,
                                     index_True, depth + 1,
                                     prediction_at_this_stage)
            self.train_decision_tree(right_obj, attributes, seen, results,
                                     index_False, depth + 1,
                                     prediction_at_this_stage)

            del seen[-1]
Beispiel #11
0
    def collect_data_dt(self, example_file, hypothesis_file):
        """
        Collection of data and calling the required functions
        :param example_file:Training file
        :param hypothesis_file:File to which hypothesis is to be written
        :return:None
        """

        statements, results = self.gather_data(example_file)
        print(len(results))
        attribute1 = []
        attribute2 = []
        attribute3 = []
        attribute4 = []
        attribute5 = []
        attribute6 = []
        attribute7 = []
        attribute8 = []
        attribute9 = []
        attribute10 = []
        attribute11 = []

        # For each line set the values for features for that line
        for line in statements:
            attribute1.append(self.containsQ(line))
            attribute2.append(self.containsX(line))
            attribute3.append(self.check_avg_word_length_greater_than_5(line))
            attribute4.append(self.presence_of_van(line))
            attribute5.append(self.presence_of_de_het(line))
            attribute6.append(self.check_for_een(line))
            attribute7.append(self.check_for_en(line))
            attribute8.append(self.check_for_common_dutch_words(line))
            attribute9.append(self.check_for_common_english_words(line))
            attribute10.append(self.presence_of_a_an_the(line))
            attribute11.append(self.check_presence_of_and(line))

        attributes = []
        attributes.append(attribute1)
        attributes.append(attribute2)
        attributes.append(attribute3)
        attributes.append(attribute4)
        attributes.append(attribute5)
        attributes.append(attribute6)
        attributes.append(attribute7)
        attributes.append(attribute8)
        attributes.append(attribute9)
        attributes.append(attribute10)
        attributes.append(attribute11)

        number_lst = []
        for i in range(len(results)):
            number_lst.append(i)

        # To keep track of attributes splitted along a path
        seen = []
        root = helper(attributes, None, results, number_lst, 0, None, None)

        # Calling decision tree function here
        value = self.train_decision_tree(root, attributes, seen, results,
                                         number_lst, 0, None)

        # Dumping the hypothesis to a file using pickle
        filehandler = open(hypothesis_file, 'wb')
        pickle.dump(root, filehandler)
Beispiel #12
0

#The definitions for methods and classes.

#This defines the classes.
CLASSES = ["sitting", "walking", "standing", "standingup", "sittingdown"]

#This defines the available classifiers and the console parameter it can be started with.
CLASSIFIERS = {'MLE': mleonevsall, 'SOFTZEROONE': softzeroone, 'HINGE': hinge, 'MAV': majorityvote, 'WAVG': weightedclassifiers, 'NN': neuralnetwork, 'NND': neuralnetworkDropout, 'NNDB': neuralnetworkDropoutBatch, 'NNB': neuralnetworkBatch}

#This defines the parameters they will be given.
PARAMETERS = {'MLE': [6e-5, 0.991], 'SOFTZEROONE': [0.0001, 0.99993, 2.5, 1e-7], 'HINGE': [8e-5, 0.9995], 'MAV': None, 'WAVG': None, 'NN': [1e-2, 1, 3, [16, 50, 5]], 'NND': [1e-2, 1, 4, [16, 50, 50, 5], 0.7], 'NNDB': [1e-2, 0.97, 7, [16, 50, 50, 50, 50, 50, 5], 100, 0.8], 'NNB': [1e-2, 0.97, 7, [16, 50, 50, 50, 50, 50, 5], 100, 0.7]}

MAXSTEPS = 3500
MAXNONCHANGINGSTEPS = 1000
helper = helper()

classifierGeneralInstance = None
noLearn = False
terminate = False
startWeights = None
trainingDataFilename = None
testDataFilename = None

#read cmd line arguments
#try:
print("About to checking arguments...")

for i in range(len(sys.argv)):
    if sys.argv[i] == "-h" or sys.argv[i] == "--help":
        print("ClassifierGeneral help:")