예제 #1
0
파일: Peak_bkup.py 프로젝트: SHMoody/SamL
    def area(self, bg=0):

        if b_any(self.shape in x for x in self.shapes[:4]):
            self.area_anal = pk.popt[-2]
        elif self.shape == 'Multiple':
            self.area_anal = 0.
            for i in self.__argsareeasy(self.popt)[:-1]:
                self.area_anal += i[-1]

        if bg == 0:
            bg = (self.xdata.max() - self.xdata.min()) * self.popt[-1]
        elif bg == 1:
            bg = (self.ydata[0] + self.ydata[-1]) * \
                (self.xdata.max() - self.xdata.min())
        f = self.funcfinder()
        if (b_any(self.shape in x for x in self.shapes[:4])
                or self.shape == 'Multiple'):
            total = quad(f,
                         self.xdata.min(),
                         self.xdata.max(),
                         args=tuple(self.popt))
            self.area_quad = total[0] - bg
            self.area_quade = total[1]
        xvals = np.linspace(self.xdata.min(), self.xdata.max(), 10000)
        self.area_trapz = trapz(f(xvals, *self.popt), xvals) - bg

        self.area_raw = trapz(self.ydata, x=self.xdata) - bg
예제 #2
0
def parse_solution(problem, prefix):
    stdout_filename = prefix + '.stdout'
    with open(stdout_filename) as result:
        stdout_txt = result.readlines()

    from  __builtin__ import any as b_any
    a_solution = b_any('----------' in x for x in stdout_txt)
    the_solution = b_any('==========' in x for x in stdout_txt)

    if not (a_solution or the_solution):
        return False, None, None

    stdout_txt.reverse()
    found = False
    for index, line in enumerate(stdout_txt):
        if 'assignments' in line:
            assignments = re.findall('\[.*\]', line)
            solution = ast.literal_eval(assignments[0])
            objectives = re.findall('\d+', stdout_txt[index+1])
            objective = int(objectives[0])
            found = True
            break
    if not found:
        print "ERROR - Minizinc could not parse output."
        sys.exit(-1)
    return the_solution, solution, objective
예제 #3
0
def gather_nifti_globs(pipeline_output_folder, resource_list):

    # the number of directory levels under each participant's output folder
    # can vary depending on what preprocessing strategies were chosen, and
    # there may be several output filepaths with varying numbers of directory
    # levels

    # this parses them quickly while also catching each preprocessing strategy

    import os
    import glob
    from __builtin__ import any as b_any

    ext = ".nii"
    nifti_globs = []

    if len(resource_list) == 0:
        err = "\n\n[!] No derivatives selected!\n\n"
        raise Exception(err)

    # remove any extra /'s
    pipeline_output_folder = pipeline_output_folder.rstrip("/")

    # grab MeanFD_Jenkinson just in case
    resource_list.append("power_params")

    print "\n\nGathering the output file paths from %s..." \
          % pipeline_output_folder

    for resource_name in resource_list:

        glob_string = os.path.join(pipeline_output_folder, "*", \
                                       resource_name, "*", "*")

        # get all glob strings that result in a list of paths where every path
        # ends with a NIFTI file
        
        prog_string = ".."

        while len(glob.glob(glob_string)) != 0:

            if b_any(ext in x for x in glob.glob(glob_string)) == True:
                nifti_globs.append(glob_string)
        
            glob_string = os.path.join(glob_string, "*")
            prog_string = prog_string + "."
            print prog_string

    if len(nifti_globs) == 0:
        err = "\n\n[!] No output filepaths found in the pipeline output " \
              "directory provided for the derivatives selected!\n\nPipeline "\
              "output directory provided: %s\nDerivatives selected:%s\n\n" \
              % (pipeline_output_folder, resource_list)
        raise Exception(err)

    return nifti_globs
예제 #4
0
def gather_nifti_globs(pipeline_output_folder, resource_list):

    # the number of directory levels under each participant's output folder
    # can vary depending on what preprocessing strategies were chosen, and
    # there may be several output filepaths with varying numbers of directory
    # levels

    # this parses them quickly while also catching each preprocessing strategy

    import os
    import glob
    from __builtin__ import any as b_any

    ext = ".nii"
    nifti_globs = []

    if len(resource_list) == 0:
        err = "\n\n[!] No derivatives selected!\n\n"
        raise Exception(err)

    # remove any extra /'s
    pipeline_output_folder = pipeline_output_folder.rstrip("/")

    # grab MeanFD_Jenkinson just in case
    resource_list.append("power_params")

    print "\n\nGathering the output file paths from %s..." \
          % pipeline_output_folder

    for resource_name in resource_list:

        glob_string = os.path.join(pipeline_output_folder, "*", \
                                       resource_name, "*", "*")

        # get all glob strings that result in a list of paths where every path
        # ends with a NIFTI file

        prog_string = ".."

        while len(glob.glob(glob_string)) != 0:

            if b_any(ext in x for x in glob.glob(glob_string)) == True:
                nifti_globs.append(glob_string)

            glob_string = os.path.join(glob_string, "*")
            prog_string = prog_string + "."
            print prog_string

    if len(nifti_globs) == 0:
        err = "\n\n[!] No output filepaths found in the pipeline output " \
              "directory provided for the derivatives selected!\n\nPipeline "\
              "output directory provided: %s\nDerivatives selected:%s\n\n" \
              % (pipeline_output_folder, resource_list)
        raise Exception(err)

    return nifti_globs
def get_names_from_link(link):
    r = requests.get(link)
    if "There are no listings for" in r.text:
        return []
    soup = BeautifulSoup(r.text, "html.parser")
    body = soup.find("body")
    content = body.find("div", id="Content")
    listings = content.find_all("p")[2]
    split = (str(listings)).split("<br")
    return [a for a in split if ("Name:" in a and (b_any("sitar" in x for x in split) or (b_any("Sitar" in x for x in split))))]
예제 #6
0
def findId(movie_name, dataset):
    decoded = [str(x).decode('UTF8') for x in dataset['titles']]
    encoded = [x.encode('ascii', 'replace') for x in decoded]

    lower_encoded = [x.lower() for x in encoded]
    if b_any(movie_name.lower() in x.lower() for x in encoded):
        index = lower_encoded.index(movie_name.lower())
        return index
    else:
        return 1
예제 #7
0
def __workerProcess__(queue, database):
    """
    Worker process. Downloads documents from URLs.

    Then parses them and puts any URLs in the queue.
    """
    while True:
        try:
            url = queue.get(block=False)
            print "Starting processing of " + url
            response = urlopen(url)

            # Check headers to find out if URL leads to a html document
            if b_any('text/html' in x for x in response.info().headers):
                htmlBytes = response.read().decode('utf-8')

                conn = sqlite3.connect(database)
                c = conn.cursor()
                # Insert data(document) in content storage
                c.execute("INSERT INTO storage VALUES (?, ?, ?)",
                          (strftime("%Y-%m-%d %H:%M:%S"),
                           htmlBytes, unicode(url)))
                # Save changes and close connection
                conn.commit()
                conn.close()

                # parse document, replace relative URLs with canonized URL
                htmlparser = LinksExtractor.LinksExtractor()
                links = htmlparser.get_links(htmlBytes, url)

                # add URLs to queue
                for link in links:
                    conn = sqlite3.connect(database)
                    c = conn.cursor()
                    # Check if URL is already in queue
                    c.execute(
                        """SELECT count(*) FROM frontier WHERE url = (?)
                        """, (unicode(link),))
                    result = c.fetchone()[0]
                    # Save changes and close connection
                    conn.commit()
                    conn.close()
                    if not result:
                        queue.put(link)
            else:
                print url + ' is not a document'
        except Exception, e:
            # sleep if queue is (temporarily) empty or there is no response
            sleep(1)

            # if no update for a while: kill thread
            if queue.isTimedOut():
                break
예제 #8
0
def get_final_list(wordsList):

    '''functions for get the sematically linked words list
      traverse and check every word then '''
    
    words1 = wordsList
    words2 = copy.copy(wordsList)
    final_list = []
    for w in  words1:
            synlst = wordNet.getsynList(w)
            hyplst = wordNet.gethypSet(w)
            hyperlst = wordNet.gethyperSet(w)
            for w2 in words2:
                    if (w2 in synlst
                        or w2 in hyplst
                        or w2 in hyperlst
                        or b_any(w2 in x for x in synlst)
                        or b_any(w2 in x for x in hyplst)
                        or b_any(w2 in x for x in hyperlst)):
                            final_list.append(w2)
                            words2.remove(w2)
    return final_list
예제 #9
0
파일: Peak_bkup.py 프로젝트: SHMoody/SamL
    def area(self, bg=0):

        if b_any(self.shape in x for x in self.shapes[:6]):
            self.area_anal = pk.popt[-2]
        elif self.shape == 'Multiple':
            self.area_anal = 0.
            for i in self.__argsareeasy(self.popt)[:-1]:
                self.area_anal += i[-1]
        else:
            self.area_anal = 'No Analytical Area'

        if bg == 0:
            bg = (self.xdata.max() - self.xdata.min()) * self.popt[-1]
        elif bg == 1:
            bg = (((self.xdata.max() - self.xdata.min()) *
                   (self.ydata.max() - self.ydata.min())) *
                  (self.zdata[0][0] + self.zdata[0][-1] + self.zdata[-1][0] +
                   self.zdata[-1][-1]) / 4)
    def parse(self, response):
        '''if (self.start_urls[0].endswith(".com")!=0):
            print "wrong data source."
           ''' 
        for sel in response.xpath('//ul/li'):
            

            '''
            1. get all the links as they are.
            '''

            title=sel.xpath('a/text()').extract()
            url = sel.xpath('a/@href').extract()

            '''
            2. now for each link, get only those whose titles match the product name[0]
            '''
            products = []
            products.append("Endpoint Protection")

            
            if( b_any(products[0] in x for x in sel.xpath('a/text()').extract())):
                '''
                now check if link is relative, if yes append the domain name and then change it in specific.
                '''
                if ( url[0].find("http") == -1):
                    url[0]= self.start_urls[0]+url[0]


                self.item['url'] = url
                self.item['title']=title
                self.specific.append(url)
                self.links.append(self.item['url'][0]+','+self.item['title'][0])          
        

        
        self.links2.append(len(self.specific))
        self.links2.append(products[0])
        for link in self.links:
            self.links2.append(link)

        for data in self.links2:
            print data
                
예제 #11
0
  def checkForOutputFailure(self,output,workingDir):
    """
      This method is called by the RAVEN code at the end of each run  if the return code is == 0.
      This method needs to be implemented by the codes that, if the run fails, return a return code that is 0
      This can happen in those codes that record the failure of the job (e.g. not converged, etc.) as normal termination (returncode == 0)
      This method can be used, for example, to parse the outputfile looking for a special keyword that testifies that a particular job got failed
      (e.g. in RELAP5 would be the keyword "********")
      @ In, output, string, the Output name root
      @ In, workingDir, string, current working dir
      @ Out, failure, bool, True if the job is failed, False otherwise
    """
    failure = True
    goodWord  = ["Transient terminated by end of time step cards","Transient terminated by trip"]
    try:
      outputToRead = open(os.path.join(workingDir,output+'.o'),"r")
    except:
      return failure
    readLines = outputToRead.readlines()

    for goodMsg in goodWord:
       if b_any(goodMsg in x for x in readLines[-20:]):
           failure = False
    return failure
예제 #12
0
def _addTopoInfo(theChainDef,chainDict, topoAlgs, doAtL2AndEF=True):

    maxL2SignatureIndex = -1
    for signatureIndex,signature in enumerate(theChainDef.signatureList):
        if signature['listOfTriggerElements'][0][0:2] == "L2":
            maxL2SignatureIndex = max(maxL2SignatureIndex,signatureIndex)
    
    inputTEsL2 = theChainDef.signatureList[maxL2SignatureIndex]['listOfTriggerElements'] 
    inputTEsEF = theChainDef.signatureList[-1]['listOfTriggerElements']
 
    if ('muvtx' in topoAlgs):
       # import pdb;pdb.set_trace()
        theChainDef = generateMuonClusterLLPchain(theChainDef, chainDict, inputTEsL2, inputTEsEF, topoAlgs)
    elif ('revllp' in topoAlgs):
        theChainDef = generateReversedCaloRatioLLPchain(theChainDef, chainDict, inputTEsL2, inputTEsEF, topoAlgs)
    elif ('llp' in topoAlgs):
        theChainDef = generateCaloRatioLLPchain(theChainDef, chainDict, inputTEsL2, inputTEsEF, topoAlgs)
    elif b_any(('invm' or 'deta' in x) for x in topoAlgs):
        theChainDef = addDetaInvmTopo(theChainDef,chainDict,inputTEsL2, inputTEsEF, topoAlgs)
    else:
        logJet.error('Your favourite topo configuration is missing.')

    return theChainDef
def detect_objects(frame):
    cv2.imwrite('first_frame.jpg', frame)

    # Making sure that the file is now available on disk
    while(True):
        i=0
        for fname in os.listdir('.'):
            if fname == "first_frame.jpg":
                i = 1
                break
        if i == 1:
            break

    # Run darknet from python
    p = subprocess.Popen(["./darknet", "detector",  "test",  "cfg/coco.data", "cfg/yolo.cfg", "yolo.weights", "first_frame.jpg"], cwd=os.getcwd(), stdout=subprocess.PIPE).wait()
    
    # If couldn't run darknet, exit
    if p != 0:
        print ('Couldn\'t run darknet')
        sys.exit()

    # Our customized Darknet generates a file containing bounding box of recognized objects
    with open("program.txt") as f:
        content = f.readlines()
    
    content = [x.strip() for x in content]

    # Selecting desirable objects
    a = []
    for i in content:
        indices = [s.start() for s in re.finditer(',', i)]
        if (b_any(i[:i.find(':')] in x for x in objects_to_be_found)):
            a.append([i[:i.find(':')], int(i[i.find(':')+2:indices[0]]), int(i[indices[0]+1:indices[1]]), int(i[indices[1]+1:indices[2]]), int(i[indices[2]+1:-1])])
            print ("FOUND: ", i[:i.find(':')])
        else:
            print ("Also found: ", i[:i.find(':')])
    return a
예제 #14
0
파일: my_siem.py 프로젝트: w0lverine/SENAMI
        elif sys.argv[2] == 'High':
            filter = '[High Alert]'
        elif sys.argv[2] == 'Tampering':
            filter = 'Value tampering'
        elif sys.argv[2] == 'Other':
            filter = 'non-S7'
        else:
            filter = sys.argv[2]
        whole_alert = []

        for line in f:
            if 'Alert]' in line and len(whole_alert) == 0:
                whole_alert.append(line.rstrip())
            elif 'Alert]' not in line and line != '\n':
                whole_alert.append(line.rstrip())
            elif 'Alert]' in line and b_any('Alert]' in x
                                            for x in whole_alert):
                #print whole_alert
                if sys.argv[1] == "-display":
                    if b_any(filter in x for x in whole_alert):
                        log_ctr += 1
                        print "\n".join(whole_alert)
                        print
                elif sys.argv[1] == "-exclude":
                    if not b_any(filter in x for x in whole_alert):
                        log_ctr += 1
                        print "\n".join(whole_alert)
                        print
                whole_alert = []
                whole_alert.append(line.rstrip())

            #if len(whole_alert) > 0 and line not in whole_alert:
예제 #15
0
def main(_):
    #convert jpg image(s) into iamge representations using alexnet:
    filenames = [
        os.path.join(image_dir, f) for f in [
            'overly-attached-girlfriend.jpg',
            'high-expectations-asian-father.jpg', 'foul-bachelor-frog.jpg',
            'stoner-stanley.jpg', 'y-u-no.jpg', 'willy-wonka.jpg',
            'futurama-fry.jpg', 'success-kid.jpg', 'one-does-not-simply.jpg',
            'bad-luck-brian.jpg', 'first-world-problems.jpg',
            'philosoraptor.jpg', 'what-if-i-told-you.jpg', 'TutorPP.jpg'
        ]
    ]
    print(filenames)
    tf.logging.info("Running caption generation on %d files matching %s",
                    len(filenames), FLAGS.input_files)
    #mean of imagenet dataset in BGR
    imagenet_mean = np.array([104., 117., 124.], dtype=np.float32)

    #placeholder for input and dropout rate
    x_Alex = tf.placeholder(tf.float32, [1, 227, 227, 3])
    keep_prob_Alex = tf.placeholder(tf.float32)

    #create model with default config ( == no skip_layer and 1000 units in the last layer)
    modelAlex = AlexNet(x_Alex, keep_prob_Alex, 1000, [], ['fc7', 'fc8'],
                        512)  #maybe need to put fc8 in skip_layers

    #define activation of last layer as score
    score = modelAlex.fc6

    meme_embeddings = []
    with tf.Session() as sess:

        # Initialize all variables
        sess.run(tf.global_variables_initializer())

        # Load the pretrained weights into the model
        modelAlex.load_initial_weights(sess)

        for i, meme in enumerate(filenames):
            img = Image.open(meme)
            try:
                img.thumbnail((227, 227), Image.ANTIALIAS)
                #img = img.resize((227,227))
                #use img.thumbnail for square images, img.resize for non square
                assert np.shape(img) == (227, 227, 3)
            except AssertionError:
                img = img.resize((227, 227))
                print('sizing error')

            # Subtract the ImageNet mean
            img = img - imagenet_mean  #should probably change this

            # Reshape as needed to feed into model
            img = img.reshape((1, 227, 227, 3))

            meme_vector = sess.run(score,
                                   feed_dict={
                                       x_Alex: img,
                                       keep_prob_Alex: 1
                                   })  #[1,4096]
            meme_vector = np.reshape(meme_vector, [4096])
            assert np.shape(meme_vector) == (4096, )

            #now have np embeddings to feed for inference
            meme_embeddings.append(meme_vector)

    with open('Captions.txt', 'r') as f:
        data_captions = f.readlines()
    data_captions = [s.lower() for s in data_captions]

    # Build the inference graph.
    g = tf.Graph()
    with g.as_default():
        model = inference_wrapper.InferenceWrapper()
        restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                                   FLAGS.checkpoint_path)
    g.finalize()

    # Create the vocabulary.
    vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

    #filenames = []
    #for file_pattern in FLAGS.input_files.split(","):
    #filenames.extend(tf.gfile.Glob(file_pattern))
    #tf.logging.info("Running caption generation on %d files matching %s",
    #len(filenames), FLAGS.input_files)
    with tf.Session(graph=g) as sess:
        # Load the model from checkpoint.
        restore_fn(sess)

        # Prepare the caption generator. Here we are implicitly using the default
        # beam search parameters. See caption_generator.py for a description of the
        # available beam search parameters.
        generator = caption_generator.CaptionGenerator(model, vocab)
        num_in_data_total = 0
        num_captions = 0
        for i, meme in enumerate(meme_embeddings):
            #with tf.gfile.GFile(filename, "rb") as f:
            #image = f.read()
            captions = generator.beam_search(sess, meme)
            print("Captions for image %s:" % os.path.basename(filenames[i]))
            num_in_data = 0
            for i, caption in enumerate(captions):
                # Ignore begin and end words.
                sentence = [
                    vocab.id_to_word(w) for w in caption.sentence[1:-1]
                ]
                sentence = " ".join(sentence)
                in_data = 0
                if b_any(sentence in capt for capt in data_captions):
                    in_data = 1
                    num_in_data += 1
                    num_in_data_total += 1
                    num_captions += 1
                else:
                    num_captions += 1
                print("  %d) %s (p=%f) [in data = %d]" %
                      (i, sentence, math.exp(caption.logprob), in_data))
            print("number of captions in data = %d" % (num_in_data))
        print("(total number of captions in data = %d) percent in data = %f" %
              (num_in_data_total, (num_in_data_total / num_captions)))
예제 #16
0
파일: base.py 프로젝트: yassinebha/brainbox
def find_files(in_path, ext, targets, template='(?<=\d{2})\d{5}', sub=False):
    """
    Finds matching files with extension ext and returns them in
    the order of the targets list given as argument
    Returns a dictionary identical to what I was using before
    Also drops duplicates
    """
    # Go through each directory and see if I can find the subjects I am looking
    # for
    ext = '*{}'.format(ext)
    out_dict = {key: [] for key in ['sub_name', 'dir', 'path']}
   
    if not sub:
        sub_dirs = [d for d in os.walk(in_path).next()[1]]

        for sub_dir in sub_dirs:
            tmp_dir = os.path.join(in_path, sub_dir)
            in_files = glob.glob(os.path.join(tmp_dir, ext))
            tmp_dict = dict()

            # Get the files that we have
            matches = [x for x in targets if b_any(str(x) in t for t in in_files)]

            for in_file in in_files:
                sub_name = os.path.basename(in_file.split('.')[0])
                sub_id = int(re.search(r'{}'.format(template), sub_name).group())
                if sub_id in tmp_dict.keys():
                    # This is a duplicate
                    continue
                tmp_dict[sub_id] = (sub_name, in_file)

            # Re-sort the path info
            sort_list = list()
            for target in matches:
                sub_name, in_file = tmp_dict[target]
                out_dict['sub_name'].append(sub_name)
                out_dict['dir'].append(sub_dir)
                out_dict['path'].append(in_file)
    else:
        sub_dir = sub
        tmp_dir = os.path.join(in_path, sub_dir)
        in_files = glob.glob(os.path.join(tmp_dir, ext))
        tmp_dict = dict()

        # Get the files that we have
        matches = [x for x in targets if b_any(str(x) in t for t in in_files)]

        for in_file in in_files:
            sub_name = os.path.basename(in_file.split('.')[0])
            sub_id = int(re.search(r'{}'.format(template), sub_name).group())
            if sub_id in tmp_dict.keys():
                # This is a duplicate
                continue
            tmp_dict[sub_id] = (sub_name, in_file)

        for target in matches:
            sub_name, in_file = tmp_dict[target]
            out_dict['sub_name'].append(sub_name)
            out_dict['dir'].append(sub_dir)
            out_dict['path'].append(in_file)
    return out_dict
for state in states:
	with open("states/" + state) as csvfile:
		reader = csv.DictReader(csvfile)
                districts = []
                for row in reader:
                        for key in row:
                                if "Estimate" in key:
                                        districts.append(key)
                        break
                csvfile.seek(0)
                districts.sort()
                for i in range (len(districts)):
                        prevTopic = ''
                        print(state)
                        print(districts[i])
		        for row in reader:
                                if row['Topic'] not in topics:
                                        continue
                                if prevTopic != row["Topic"]:
                                        f.write('\n')
                                        f.close()
                                        f = open("relations/" + row["Topic"] + ".txt", "a")
                                        f.write(state.replace(".csv", "") + str(i+1)+",")
                                        prevTopic = row["Topic"]
                                if row["Subject"] in subjectTitle:
                                        if b_any(title in row["Title"] for title in subjectTitle[row["Subject"]]):
                                                if (prevTitle != 'Civilian noninstitutionalized population under 19 years'):
                                                        f.write(row[districts[i]].strip()+",")
                                prevTitle = row['Title']
                        csvfile.seek(0)
#    theList.append(line)

nDone = 0
nFail = 0
nWrong = 0
nMem = 0
nChappy = 0
nType = 0
failedList = []
completedList = []
for line in sorted(theList):
    line = line.strip()
    taskid = line.split('_')[-1].split('.')[0]
    logfilename = line
    #if themagicpattern not in open(logfilename, 'r').read():
    if not b_any(themagicpattern in x
                 for x in open(logfilename, 'r').readlines()[-30:]):
        ## look for the subjob id printed on purpose in the jO:
        ## FLS: subjob number <MYID>
        fullline, err = subprocess.Popen('grep FLS %s' % logfilename,
                                         stdout=subprocess.PIPE,
                                         shell=True).communicate()
        fullline = fullline.strip()
        if fullline == '':
            print 'have to skip job %s because of some strange error' % taskid
            nWrong += 1
            failedList.append(taskid)
            continue
        print 'failed task id %s' % (taskid)
        failedList.append(taskid)
        nFail += 1
    # some jobs might look ok, but will have this line at the end resulting in an
예제 #19
0
    def run(self):
        PrimaryPathElementDictionary = {}
        PrimaryPathList = []
        ActiveSIDs = []
        OldActiveSIDs = []
        SecondaryPathList = []
        PrefixList = []
        OldPrimaryPathList = []
        OldSecondaryPathList = []
        OldPrefixList = []
        controller_ip = CONTROLLER_IP
        deadcounter = DEADTIMECOUNTER
        SecondaryFECServicePrefixList = []
        TopoChangeAddPathList = []
        TopoChangeRemovePathList = []
        PrimaryFECServicePrefixList = []
        count_list = []  #####  Remove this later
        while True:
            try:
                print "Running Main Routine.... All engines are go........"
                self.isis_switch = self.switches.values()[0]
                self.get_isis = Get_ISIS_SIDS()
                self.Adj_SIDs = self.get_isis.parse_isis_adj(self.isis_switch)
                self.Node_SIDs = self.get_isis.parse_isis_node(
                    self.isis_switch)
                self.data = []
                TopoChangeAddPathList = []
                TopoChangeRemovePathList = []
                self.rel_path = "new_path_info.json"
                self.script_dir = os.path.dirname(__file__)
                self.abs_file_path = os.path.join(self.script_dir,
                                                  self.rel_path)
                self.logfile = open(self.abs_file_path, 'r+')
                with self.logfile as f:
                    try:
                        self.data = json.load(f)
                        f.seek(0)
                        f.truncate()
                        f.close()
                    except ValueError:
                        #print "Path File Is Empty"
                        self.data = {
                            u'fec': u'',
                            u'dstPrefix': u'',
                            u'Primary': False,
                            u'dstNH': u'',
                            u'dstFecNH': u'',
                            u'RemoveFEC': u'',
                            u'path': [],
                            u'ManualFECPath': [],
                            u'RemoveRoute': u'',
                            u'Secondary': False
                        }
                        pass

        ### Here if there is a manual path entered and it comes over the JSON POST take it (a string)
        ### Listify it and save it as path so it's used just like a force path would be.
        ### Basic checking to make sure it's a 6 digit number or multiples space separated

                if len(self.data['ManualFECPath']) >= 1:
                    try:
                        if re.findall(r"\b\d{6}\b",
                                      self.data['ManualFECPath']):
                            path_sids = re.findall(r"\b\d{6}\b",
                                                   self.data['ManualFECPath'])
                            self.data['path'] = path_sids
                        else:
                            print " You need to input space separated 6 Digit Labels!!!"
                            return
                    except (KeyError, ValueError):
                        print " You need to input space separated 6 Digit Labels!!!"
                        return

        ### Right - now kick off the parsing and storing of said POST variables.
                try:
                    if str(self.data['dstPrefix']):
                        currentpath = str(
                            self.data['dstPrefix']) + ' next-hop ' + str(
                                self.data['dstNH'])
                        if PrefixList == []:
                            PrefixList.append(
                                str(self.data['dstPrefix']) + ' next-hop ' +
                                str(self.data['dstNH']))
                        for entry in PrefixList:
                            if currentpath in PrefixList:
                                pass
                            else:
                                PrefixList.append(
                                    str(self.data['dstPrefix']) +
                                    ' next-hop ' + str(self.data['dstNH']))
                    if self.data['Primary'] == True:
                        try:
                            for p in self.data['path']:
                                index = int(self.data['path'].index(p))
                                for node in self.Node_SIDs:
                                    if node.get(p) != None:
                                        self.data['path'][index] = str(
                                            node.get(p))
                            Path_String = ' '.join(self.data['path'])
                            currentpath = str(
                                self.data['fec']) + ' next-hop ' + str(
                                    self.data['dstFecNH']) + ' label [' + str(
                                        Path_String) + ']'
                            current_fec_NH = str(
                                self.data['fec']) + ' next-hop ' + str(
                                    self.data['dstFecNH'])
                            current_fec = str(self.data['fec'])

                            ### Do a couple of funky operations.  1) If the currentpath is in Primary path list - skip. 2) If the FEC  are the Same
                            ### The put the "latest" in Primary and relegate the current primary to secondary.  This keeps the router and the controller in sync.
                            ## As the router will always take the latest as advertised from exabgp. 3) Else - just add currentpath to PrimaryPath List

                            if PrimaryPathList == []:
                                PrimaryPathList.append(
                                    str(self.data['fec']) + ' next-hop ' +
                                    str(self.data['dstFecNH']) + ' label [' +
                                    str(Path_String) + ']')
                            for entry in PrimaryPathList:
                                if currentpath in PrimaryPathList:
                                    pass
                                elif current_fec in entry and currentpath in SecondaryPathList:
                                    SecondaryPathList.remove(currentpath)
                                    PrimaryPathList.append(currentpath)
                                    PrimaryPathList.remove(entry)
                                    SecondaryPathList.append(entry)
                                elif current_fec in entry and currentpath not in SecondaryPathList:
                                    PrimaryPathList.remove(entry)
                                    SecondaryPathList.append(entry)
                                    PrimaryPathList.append(currentpath)
                                elif currentpath not in PrimaryPathList:
                                    if b_any(current_fec in x
                                             for x in PrimaryPathList):
                                        pass
                                    else:
                                        PrimaryPathList.append(
                                            str(self.data['fec']) +
                                            ' next-hop ' +
                                            str(self.data['dstFecNH']) +
                                            ' label [' + str(Path_String) +
                                            ']')
                        except KeyError:
                            pass

                    if self.data['Secondary'] == True:
                        try:
                            for p in self.data['path']:
                                index = int(self.data['path'].index(p))
                                for node in self.Node_SIDs:
                                    if node.get(p) != None:
                                        self.data['path'][index] = str(
                                            node.get(p))
                            Path_String = ' '.join(self.data['path'])
                            currentpath = str(
                                self.data['fec']) + ' next-hop ' + str(
                                    self.data['dstFecNH']) + ' label [' + str(
                                        Path_String) + ']'
                            if SecondaryPathList == []:
                                SecondaryPathList.append(
                                    str(self.data['fec']) + ' next-hop ' +
                                    str(self.data['dstFecNH']) + ' label [' +
                                    str(Path_String) + ']')
                            for entry in SecondaryPathList:
                                if currentpath in SecondaryPathList:
                                    pass
                                else:
                                    SecondaryPathList.append(
                                        str(self.data['fec']) + ' next-hop ' +
                                        str(self.data['dstFecNH']) +
                                        ' label [' + str(Path_String) + ']')
                        except KeyError:
                            pass

            ###  Now remove any FEC routes (Primary or secondary) when the FECtoRemove is received

                    if str(self.data['RemoveFEC']):
                        FECtoRemove = self.data['RemoveFEC']
                        for line in PrimaryPathList:
                            if line.find(FECtoRemove) == -1:
                                pass
                            else:
                                PrimaryPathList.remove(line)
                        FECtoRemove = self.data['RemoveFEC']
                        for line in SecondaryPathList:
                            if line.find(FECtoRemove) == -1:
                                pass
                            else:
                                SecondaryPathList.remove(line)

            ###  Now remove any Route when the RouteRemove is received

                    if str(self.data['RemoveRoute']):
                        RoutetoRemove = self.data['RemoveRoute']
                        for line in PrefixList:
                            if line.find(RoutetoRemove) == -1:
                                pass
                            else:
                                PrefixList.remove(line)

                except KeyError:
                    pass

            #####  Now Find the SIDs ->
            ##### Just like in Get_SIDs_and_Dicts

                self.AllActiveSIDs = []
                self.ActiveAdjSIDs = []
                self.ActiveNodeSIDs = []

                ##  This just gets the SID from the dictionaries returned from the ISIS database parsing function
                ### add them to AllActiveSIDs

                for line in self.Adj_SIDs:
                    for node in line:
                        adjsid = line[node]
                        self.ActiveAdjSIDs.append(adjsid)

                for line in self.Node_SIDs:
                    for node in line:
                        nsid = line[node]
                        self.ActiveNodeSIDs.append(nsid)

                self.AllActiveSIDs = list(
                    set(self.ActiveNodeSIDs + self.ActiveAdjSIDs))

                # Search the primary FEC Path list- where one or more labels are missing from the
                # path.  Add these paths to the list TopoChangeRemovePathList (to be used later)

                path_ip_address_list = []
                for path in PrimaryPathList:
                    path_sids = re.findall(r"\b\d{6}\b", path)
                    if set(path_sids) < set(
                            self.AllActiveSIDs
                    ) and deadcounter == DEADTIMECOUNTER:
                        pass

        ### If things are Still busted, build a list of the next hops we need to look for in the Secondary table to add.

                    elif deadcounter == DEADTIMECOUNTER + 1:
                        for path in PrimaryPathList:
                            path_sids = re.findall(r"\b\d{6}\b", path)
                            if set(path_sids) < set(self.AllActiveSIDs):
                                deadcounter = DEADTIMECOUNTER
                                pass
                            else:
                                TopoChangeRemovePathList.append(path)
                                first_ip = re.search(
                                    r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',
                                    path).group()
                                path_ip_address_list.append(first_ip)
                        deadcounter = DEADTIMECOUNTER

        ### Give it a hot half second - might be a glitch getting the ISIS DB, or a refresh of DB
        ### Sleep for 0.5 seconds and request the SID's again.

                    else:
                        sleep(DEADTIMETIMER)
                        deadcounter += 1
                #print path_ip_address_list

        # First Step on secondary - Search the primary secondary Path list- if one or more labels are missing from the
        # path remove it.

                for path in SecondaryPathList:
                    path_sids = re.findall(r"\b\d{6}\b", path)
                    if set(path_sids) < set(self.AllActiveSIDs):
                        pass
                    else:
                        SecondaryPathList.remove(path)

        ## Second step on secondary table. We do the search for routes from the  after we've removed the once from the routing disruptions.
        ##  We now use the path_ip_address_list (from above) to see if there are any routes to add in.  So now we only care about the FEC NH
        ##  We do need to remove them from the secondary too - because they're now going to be active in Primary!

                for ip in path_ip_address_list:
                    for path in SecondaryPathList:
                        secondpath_first_ip = re.search(
                            r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',
                            path).group()
                        if secondpath_first_ip in path:
                            SecondaryPathList.remove(path)
                            TopoChangeAddPathList.append(path)

        #### Now update Primary Path list for Visibility.

                for path in TopoChangeAddPathList:
                    PrimaryPathList.append(path)
                for path in TopoChangeRemovePathList:
                    PrimaryPathList.remove(path)

                TopoChangeAddPathList = []
                TopoChangeRemovePathList = []

                ## We now have the primary table with reoved routes so len(OldPrimaryPathList) >= len(PrimaryPathList) should pick up the withdraw routes
                ## for the path routes.  We also have TopoChangeAddPathList and TopoChangeRemovePathList we need to action below.  We dont add these to the
                ## primaryPath list until after the Paths are added/removed

                ## Now Program the changed Primary FEC Routes: Program Them!!!  Skip if nothing changed completely.

                if len(OldPrimaryPathList) == len(PrimaryPathList) and cmp(
                        PrimaryPathList, OldPrimaryPathList) == 0:
                    pass

                elif len(OldPrimaryPathList) == len(PrimaryPathList) and cmp(
                        PrimaryPathList, OldPrimaryPathList) != 0:
                    print(
                        "Advertising the following newly learned FEC routes for the same FEC"
                    )
                    for ppath in PrimaryPathList:
                        if ppath not in OldPrimaryPathList:
                            r = requests.post(
                                'http://' + str(controller_ip) + ':5000',
                                files={
                                    'command':
                                    (None, 'announce route ' + str(ppath))
                                })
                            sleep(.2)
                            print 'announce route ' + str(ppath)

                elif len(
                        OldPrimaryPathList) == 0 and len(PrimaryPathList) >= 0:
                    print(
                        "Advertising the following newly learned FEC routes First Match"
                    )
                    for ppath in PrimaryPathList:
                        if ppath not in OldPrimaryPathList:
                            r = requests.post(
                                'http://' + str(controller_ip) + ':5000',
                                files={
                                    'command':
                                    (None, 'announce route ' + str(ppath))
                                })
                            sleep(.2)
                            print 'announce route ' + str(ppath)

                elif len(OldPrimaryPathList) > len(PrimaryPathList):
                    print("Removing the following FEC routes ")
                    for route in OldPrimaryPathList:
                        if route not in PrimaryPathList:
                            path_copy = copy.deepcopy(route)
                            withdraw_ip = path_copy.split("next-hop", 1)[0]
                            ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}',
                                                 path_copy)
                            next_hop_ip = ip_list[1]
                            r = requests.post(
                                'http://' + str(controller_ip) + ':5000',
                                files={
                                    'command':
                                    (None, 'withdraw route ' +
                                     str(withdraw_ip) + ' next-hop ' +
                                     str(next_hop_ip) + ' label [800000]'
                                     '\n')
                                })
                            sleep(.2)
                            print 'withdraw route ' + str(
                                withdraw_ip) + ' next-hop ' + str(
                                    next_hop_ip) + ' label [800000]' '\n'

                elif len(OldPrimaryPathList) < len(PrimaryPathList):
                    print(
                        "Advertising the following newly learned FEC routes last match"
                    )
                    for route in PrimaryPathList:
                        if route not in OldPrimaryPathList:
                            r = requests.post(
                                'http://' + str(controller_ip) + ':5000',
                                files={
                                    'command':
                                    (None, 'announce route ' + str(route))
                                })
                            sleep(.2)
                            print 'announce route ' + str(route)

        ### Final Step (from above:  Search the Active Service prefixes - and if there is no route in the primary or
        ### secondary table with it's NH - then use the default, or remove it.  I havent decided yet.
        ### Build a list of paths to remove/change

        ### Now that we added and removed the topology changes -> updated  the Primary Path list and determined
        ### even if any service prefixes need to be removed.  Lets just use the "normal function"

        ### Programmed the Active programmed routes.  Skip if nothing changed.

                if len(OldPrefixList) == len(PrefixList) and cmp(
                        PrefixList, OldPrefixList) == 0:
                    #print("No Change in the Route Table\n")
                    pass

                elif len(OldPrefixList) == 0 and len(PrefixList) >= 0:
                    print(
                        "Advertising the following newly learned routes First Match"
                    )
                    for ppath in PrefixList:
                        if ppath not in OldPrefixList:
                            r = requests.post(
                                'http://' + str(controller_ip) + ':5000',
                                files={
                                    'command':
                                    (None, 'announce route ' + str(ppath))
                                })
                            sleep(.2)
                            print 'announce route ' + str(ppath)

                elif len(OldPrefixList) >= len(PrefixList):
                    print("Removing the following routes ")
                    for route in OldPrefixList:
                        if route not in PrefixList:
                            r = requests.post(
                                'http://' + str(controller_ip) + ':5000',
                                files={
                                    'command':
                                    (None,
                                     'withdraw route ' + str(route) + '\n')
                                })
                            sleep(.2)
                            print 'withdraw route ' + str(route) + '\n'

                elif len(OldPrefixList) <= len(PrefixList):
                    print(
                        "Advertising the following newly learned routes last match"
                    )
                    for route in PrefixList:
                        if route not in OldPrefixList:
                            r = requests.post(
                                'http://' + str(controller_ip) + ':5000',
                                files={
                                    'command':
                                    (None, 'announce route ' + str(route))
                                })
                            sleep(.2)
                            print 'announce route ' + str(route)

        ###  Just print out the paths for visibility

                PrefixDict = []
                # print"here is the active Service prefix list"
                # pp(PrefixList)
                PrefixDict = dict(enumerate(PrefixList))
                # print PrefixDict
                # print"here is the current primary path list"
                # pp(PrimaryPathList)
                PrimaryPathDict = dict(enumerate(PrimaryPathList))
                # print"here is the old primary path list"
                # pp(OldPrimaryPathList)
                # print"here is the current secondary path list"
                # pp(SecondaryPathList)
                SecondaryPathDict = dict(enumerate(SecondaryPathList))

                json_prep = {
                    "prefixes": PrefixDict,
                    "primary": PrimaryPathDict,
                    "secondary": SecondaryPathDict
                }
                self.rel_path = "controller_output.json"
                self.script_dir = os.path.dirname(__file__)
                self.abs_file_path = os.path.join(self.script_dir,
                                                  self.rel_path)
                self.logfile = open(self.abs_file_path, 'w')
                with self.logfile as json_out:
                    json.dump(json_prep, json_out, indent=2)
                    json_out.close()

                sleep(1)
                _ = os.system("clear")  ### (need to move display to JSON))

                OldPrimaryPathList = copy.deepcopy(PrimaryPathList)
                OldPrefixList = copy.deepcopy(PrefixList)

            except KeyboardInterrupt:
                exit(0)
예제 #20
0
 else:
     ip = instaparse.InstaParse(FLAGS.infile, FLAGS)
     #return dictionary
     data = ip.get_data()
     f = Formatter(ip, FLAGS)
     graph = graphbuilder.GraphBuilder()
     top_tags = [t for t, c in ip.tags.items() if c >= 10]
     graph.edges_from_text(ip.d_insta, top_tags)
     #ccs = nx.connected_component_subgraphs(graph.G)
     #for c in ccs:
     cliq = list(nx.find_cliques(graph.G))
     skips = ['firestone']
     next_scrapings = set()
     for c in cliq:
         for word in c:
             if b_any(x in word for x in skips):
                 print word
             else:
                 next_scrapings.add(word)
     f.write_clique(next_scrapings)
     nx.make_max_clique_graph(graph.G)
     pos = nx.spring_layout(graph.G)
     elarge = [(u, v) for (u, v, d) in graph.G.edges(data=True)
               if d['weight'] > 15]
     esmall = [(u, v) for (u, v, d) in graph.G.edges(data=True)
               if d['weight'] <= 15]
     nx.draw_networkx_nodes(graph.G, pos, node_size=10)
     nx.draw_networkx_edges(graph.G,
                            pos,
                            edgelist=elarge,
                            width=3,