コード例 #1
0
 def build_features(self, image_shape):
     height, width = image_shape
     features = []
     # TODO: play with minimum feature size
     for w in range(1, width+1):
         for h in range(1, height+1):
             x = 0
             while x + w < width:
                 y = 0
                 while y + h < height:
                     # 2 horizontally aligned blocks
                     root = Region(x,y,w, h)
                     right = Region(x+w, y, w,h)
                     # check if the VJ feature can be fit into the image
                     if x + 2 * w < width:
                         features.append(Feature([right], [root]))
                     bottom = Region(x, y+h, w, h)
                     # 2 vertically aligned blocks
                     if y + 2 * h < height:
                         features.append(Feature([root],[bottom]))
                     # 3 horizontally aligned blocks 
                     right2 = Region(x+2*w, y, w,h)
                     if x + 3 * w < width:
                         features.append(Feature([right], [right2, root]))
                     cross_bottom = Region(x+w, y+h, w, h)
                     if x + 2 * w < width and y + 2 * h < height:
                         features.append(Feature([right, bottom], [root, cross_bottom]))
                     y += 1
                 x += 1
     return features
コード例 #2
0
def feature_detection(S_ana_log):

    indices = np.argwhere(
        ~np.isnan(S_ana_log))  # Find all non-NaN indices in S_ana_log
    #print(np.shape(indices))
    Features_list = []  # initialize Features_list

    for [x_ind, y_ind] in indices:  # For each pair of indices (pixel)

        if len(Features_list) == 0:  # if len of feature_list is 0
            newFeature = Feature(x_ind, y_ind)  # create a new feature
            Features_list.append(newFeature)  # add new feature to feature_list

        else:

            border_list = [
            ]  # initialize list of logicals on where a Feature borders current pixel
            sublist = [
            ]  # initialize list of neighboring Features that boarder current pixel
            for currentFeature in Features_list:  # for each feature in list

                border_list.append(currentFeature.borders(
                    x_ind, y_ind))  # find all features that boarders the pixel

            indslist = np.where(border_list)[0]
            if len(indslist
                   ) == 1:  # if current pixel boarders the exactly 1 Feature
                hunterFeature = Features_list[indslist[0]]  # find that Feature
                hunterFeature.add(x_ind,
                                  y_ind)  # add current pixel to that Feature

            if len(indslist
                   ) > 1:  # if the current pixel boarders more than 1 Feature
                for ind in indslist:
                    sublist.append(
                        Features_list[ind])  # add those Features to a list
                for s in sublist:
                    Features_list.remove(
                        s)  # remove those Features from Features_list

                hunterFeature = conjoin(
                    sublist)  # conjoin all Features in sublist
                hunterFeature.add(
                    x_ind, y_ind)  # add current pixel to conjoined Feature
                Features_list.append(
                    hunterFeature)  # add conjoined Feature to Feature_list

            else:  # if current pixel does not boarder any existing features
                newFeature = Feature(x_ind, y_ind)  # create a new feature
                Features_list.append(
                    newFeature)  # add new feature to feature_list

    return (Features_list, indices)
コード例 #3
0
def decorate_outside(obj, options=Map()):

    obj.points = []
    obj.points_edges = []
    obj.material_clear = Blocks.AIR

    border = flatten_list_of_lists(
        [vg.get_line_from_points(l[0], l[1]) for l in options.lines])

    if options.options.outside == "flowers":
        flowers_1 = []
        flowers_2 = []
        for i, b in enumerate(border):
            # TODO: Refactor to have multiple numbers of flowers

            if (i % 2) == 0:
                flowers_1.append(b)
            else:
                flowers_2.append(b)

        colors = Blocks.kind("Flower")
        np.random.shuffle(colors)

        obj.features.append(
            Feature("flowers", flowers_1, Map(material=colors[0])))
        obj.features.append(
            Feature("flowers", flowers_2, Map(material=colors[1])))

    elif options.options.outside == "trees":
        trees = []
        for i, b in enumerate(border):
            if (i % 3) == 0:
                trees.append(b)

        colors = Blocks.kind("Sapling")
        np.random.shuffle(colors)

        obj.features.append(Feature("flowers", trees, Map(material=colors[0])))

    elif options.options.outside == "grass":
        trees = []
        for i, b in enumerate(border):
            if (i % 3) == 0:
                trees.append(b)

        obj.features.append(
            Feature("flowers", trees, Map(material=Blocks.DOUBLETALLGRASS)))

    elif options.options.outside == "fence":
        fence_type = np.random.random_integers(188, 192)
        obj.features.append(Feature("fence", border, Map(material=fence_type)))

    return obj
コード例 #4
0
	def create_features(self, img_height, img_width, min_feature_width, max_feature_width, min_feature_height, max_feature_height):
	    features = []
	    print('Creating feature ...')
	    for feature in FeatureTypes:
	        feature_start_width = max(min_feature_width, feature[0])
	        for feature_width in range(feature_start_width, max_feature_width, feature[0]):
	            feature_start_height = max(min_feature_height, feature[1])
	            for feature_height in range(feature_start_height, max_feature_height, feature[1]):
	                for x in range(img_width - feature_width):
	                    for y in range(img_height - feature_height):
	                        features.append(Feature(feature, (x, y), feature_width, feature_height, 0, 1))
	                        features.append(Feature(feature, (x, y), feature_width, feature_height, 0, -1))
	    print('..done. ' + str(len(features)) + ' features created.')
	    return features
コード例 #5
0
    def prepare(self, scales):
        # const vector<Size>& scales
        # Initialize test locations for features
        totalFeatures = self.nstructs * self.structSize
        for i in range(len(scales)):
            tmp = []
            self.features.append(tmp)

        for i in range(totalFeatures):
            x1f = random.random()
            x2f = random.random()
            y1f = random.random()
            y2f = random.random()
            for j in range(len(scales)):
                # scales[j][0] = width, scales[j][1] = height
                x1 = x1f * scales[j][0]
                y1 = y1f * scales[j][1]
                x2 = x2f * scales[j][0]
                y2 = y2f * scales[j][1]
                self.features[j].append(Feature(x1, y1, x2, y2))

        # Thresholds
        self.thrN = 0.5 * self.nstructs

        # Initialize Posteriors
        # positives = Pcounter, negatives = Ncounter
        for i in range(self.nstructs):
            self.posteriors.append([0] * pow(2, self.structSize))
            self.pCounter.append([0] * pow(2, self.structSize))
            self.nCounter.append([0] * pow(2, self.structSize))
コード例 #6
0
ファイル: main.py プロジェクト: njnuzpy/CWS
def test_avg(iterations, test_file, beam_size):
    data = prepare_data.read_file(test_file)
    feature = Feature()
    decoder = Decoder(beam_size, feature.get_score)

    count = 0
    data_size = len(data)

    model_file = open(
        '/home/xzt/CWS/model_result/avg-model_beam-size-' + str(beam_size) +
        '.pkl', 'rb')
    feature.load_model(model_file)
    model_file.close()
    for line in data:
        z = decoder.beamSearch(line)
        seg_data = ' '.join(z)
        seg_data_file = '/home/xzt/CWS/test_seg_data/avg-test-seg-data' + '_beam-size-' + str(
            beam_size) + '.txt'
        with open(seg_data_file, 'a') as f:
            f.write(seg_data + '\n')
        count += 1
        if count % 1000 == 0:
            print("segment with avg-model, finish %.2f%%" %
                  ((count / data_size) * 100))
    f.close()
    print("segment with avg model finish")
コード例 #7
0
ファイル: main.py プロジェクト: njnuzpy/CWS
def train(iterations, train_file, beam_size):
    data = prepare_data.read_file(train_file)
    feature = Feature()
    decoder = Decoder(beam_size, feature.get_score)

    for t in range(iterations):
        count = 0
        data_size = len(data)

        for line in data:
            y = line.split()
            z = decoder.beamSearch(line)
            if z != y:
                feature.update_weight(y, z)

            train_seg = ' '.join(z)
            seg_data_file = '/home/xzt/CWS/train_seg_data/train-seg-data_ model-' + str(
                t) + '.txt'
            with open(seg_data_file, 'a') as f:
                f.write(train_seg + '\n')

            count += 1
            if count % 1000 == 0:
                print("iter %d , finish %.2f%%" % (t,
                                                   (count / data_size) * 100))

        model_file = open(
            "/home/xzt/CWS/model_result/model-" + str(t) + "_beam-size-" +
            str(beam_size) + '.pkl', 'wb')
        feature.save_model(model_file)

        model_file.close()
        f.close()
        print("segment with model-%d finish" % t)
        print("iteration %d finish" % t)
コード例 #8
0
def add_alters_to_ego_net(ego_net, alter_features_file, ego_net_features):
    '''
    ego_net: object of EgoNet class
    alter_features_file: file the user inputs
    ego_net_features: used to access feature dictionary
    function splits information in file
    gets the feature name and value using the different classes
    calls add_feature function from Node class and adds that to a node
    uses add_alter_node function from EgoNet class using ego_net object
    returns the ego_net object
    '''
    for line in alter_features_file:  #goes through each line in the file
        pieces = line.split()  #splits and creates list with information
        node_id = int(pieces[0])  #gets the id
        new_node = Node(node_id, len(
            pieces[1:]))  #calls Node class using id and other information

        for i, j in enumerate(
                pieces[1:]
        ):  #uses enumerate to get index and value of the information
            feature_name = ego_net_features[i][1]  #gets feature name
            feature_value = ego_net_features[i][0]  #gets feature value
            feature_object = Feature(
                feature_name, feature_value,
                int(j))  #calls Feature class to get information
            new_node.add_feature(
                i, feature_object
            )  #calls the add_feature funtion from the Node class and adds that to a node
        ego_net.add_alter_node(
            new_node
        )  #uses add_alter_node function from EgoNet class using ego_net object
    return ego_net  #returns the ego_net object
コード例 #9
0
    def add_feature(self, feature, **kwargs):
        """ add_feature(self, feature, **args)

            o feature       Bio.SeqFeature object

            o **kwargs      Keyword arguments for Feature.  Named attributes
                            of the Feature
                                                        

            Add a Bio.SeqFeature object to the diagram (will be stored
            internally in a Feature wrapper
        """
        id = self.next_id  # get id number
        self.features[id] = Feature(self, id, feature)  # add feature
        for key in kwargs:
            if key == "colour" or key == "color":
                #Deal with "colour" as a special case by also mapping to color.
                #If Feature.py used a python property we wouldn't need to call
                #set_color explicitly.  However, this is important to make sure
                #every color gets mapped to a colors object - for example color
                #numbers, or strings (may not matter for PDF, but does for PNG).
                self.features[id].set_color(kwargs[key])
                continue
            setattr(self.features[id], key, kwargs[key])
        self.next_id += 1  # increment next id
コード例 #10
0
def add_alters_to_ego_net(ego_net, alter_features_file, ego_net_features):
    '''
    Iterates through each line in the features_file using for loop
        Splits each line into a list separated by spaces
        Isolates the alter_id, and the alter values in the line_list
        Creates a Node object using the alter_id and the number of features
        For each value in the alter_values
            Use the alter add_feature method to add features to that alter
        Add the node/alter to the ego_net
    Returns: ego_net
    '''
    #Iterates through each line in the feature_fil;e
    for line in alter_features_file:
        #Splits line into a list
        a_list = line.split()
        #Isolates values
        alter_id = int(a_list[0])
        line_list = a_list[1:]
        #Creates an node object and assigning it to alter
        alter = Node(alter_id, len(line_list))
        #Iterates through each value in the alter_values list
        for i, digit in enumerate(line_list):
            # in order to add a feature we must create a Feature instance
            alter.add_feature(
                i,
                Feature((ego_net_features[i][1]), ego_net_features[i][0],
                        int(digit)))
        #Add the alter to the ego_net
        ego_net.add_alter_node(alter)
    return ego_net
コード例 #11
0
def flw_dataset_classify():
    f = Feature()
    paths, classes = loadFaceData('face.csv', nrows=82)
    X = []
    y = []
    for index, path in enumerate(paths):
        ar = f.getFeature(path)
        print(index, path)
        if ar.all() == 0:
            continue
        X.append(ar)
        y.append(classes[index])
    X = np.array(X)
    y = np.array(y)
    print(X.shape)
    print(X)
    print(y)
    X_train_data, X_test_data, y_train_data, y_test_data = train_test_split(
        X, y, test_size=0.3, stratify=y)
    nearestCentroid = NearestCentroid()
    nearestCentroid.fit(X_train_data, y_train_data)

    predict_y = nearestCentroid.predict(X_test_data)
    acc = accuracy_score(y_test_data, predict_y)

    print(acc)
コード例 #12
0
def scut_fbp_test():
    f = Feature()
    # af1and5 0.890287769784
    paths, classes = loadFaceData(
        './dataset/af1and5.csv',
        nrows=100)  # './dataset/all(round_score).csv' for full class
    X = []
    y = []
    for index, path in enumerate(paths):
        ar = f.getFeature(path)
        print(index, path)
        if ar.all() == 0:
            continue
        X.append(ar)
        y.append(round(classes[index]))
    X = np.array(X)
    y = np.array(y)
    print(X.shape)
    print(X)
    print(y)
    X_train_data, X_test_data, y_train_data, y_test_data = train_test_split(
        X, y, test_size=0.3, stratify=y)
    nearestCentroid = NearestCentroid()
    nearestCentroid.fit(X_train_data, y_train_data)

    predict_y = nearestCentroid.predict(X_test_data)
    acc = accuracy_score(y_test_data, predict_y)

    print(acc)
コード例 #13
0
    def __scaffold_contigs(self, contig_ids=None):
        seq = str(self.get_original_seq()).upper()
        s_id = self.get_name()
        slen = len(seq)

        i = c_start = 0

        contig_count = 0
        value = 1
        id = None
        last_contig = 0

        while True:
            i = seq.find('N', i)
            if i < 0: break
            # count consecutive Ns
            n_start = i
            while i < slen and seq[i] == 'N':
                i += 1
            # this many Ns in a row constitute a contig break (gap)
            n_len = i - n_start
            if n_len >= self.minGapSize:
                c_len = n_start - c_start
                if c_len >= self.minConSize:
                    id = s_id + "_c" + str(contig_count + 1)
                    if contig_ids:
                        id = contig_ids[contig_count]
                    self.contigs.append(Feature(c_start, n_start, value, id))
                    contig_count += 1
                    last_contig = n_start
                elif contig_count == 0:
                    self.seq_start = i
                c_start = i
                #contig_count += 1

        if last_contig < slen:
            if slen - c_start > self.minConSize:
                id = s_id + "_c" + str(contig_count + 1)
                if contig_ids:
                    id = contig_ids[contig_count]
                self.contigs.append(Feature(c_start, slen, value, id))
            else:
                self.seq_end = last_contig

        self.get_contig_lengths_list()
        assert self.get_contig_length() + self.get_gap_length(
        ) == self.get_length()
コード例 #14
0
ファイル: Phonology.py プロジェクト: Krolov18/HPSG
 def __init__(self, symbole, **traits):
     self.__symbole = symbole
     self.__traits = set(map(lambda x: Feature(x[0], x[1]), traits.items()))
     recup = self.__memory.get(symbole)
     if recup == traits:
         print('Cette combinaison traits-symbole existe déjà.')
     else:
         self.__memory[symbole] = traits
コード例 #15
0
ファイル: FeatureHandler.py プロジェクト: chriswtanner/CRETE
    def saveCharFeatures(self, fileOut):
        feature = Feature()
        if len(self.charEmb) == 0:
            self.loadCharEmbeddings()
        xuidPairs = self.getAllXUIDPairs()
        xuids = set()
        for (xuid1, xuid2) in xuidPairs:
            xuids.add(xuid1)
            xuids.add(xuid2)
        for xuid in xuids:
            charEmb = []
            numCharsFound = 0
            for t in self.corpus.XUIDToMention[xuid].tokens:
                lemma = self.getBestStanToken(t.stanTokens).lemma.lower()
                for char in lemma:
                    if char == "ô":
                        char = "o"
                    if char in self.charEmb:
                        if numCharsFound == 20:
                            break
                        else:
                            charEmb += self.charEmb[char]
                            numCharsFound += 1
                    else:
                        print("* WARNING: we don't have char:", str(char),
                              "of len:", len(char))
                        #exit(1)
            while len(charEmb) < 400:  # 20 chars * 20 dim
                charEmb.append(0.0)
            feature.setSingle(self.corpus.XUIDToMention[xuid].UID, charEmb)

        # go through all pairs to compute relational data
        if self.saveRelationalFeatures:
            proc = 0
            completed = set()
            for xuid1, xuid2 in xuidPairs:
                uid1, uid2 = sorted([
                    self.corpus.XUIDToMention[xuid1].UID,
                    self.corpus.XUIDToMention[xuid2].UID
                ])
                if (uid1, uid2) in completed or (uid2, uid1) in completed:
                    continue
                completed.add((uid1, uid2))
                flatv1 = feature.singles[uid1]
                flatv2 = feature.singles[uid2]
                (dp, cs) = self.getDPCS(flatv1, flatv2)
                feature.addRelational(uid1, uid2, dp)
                feature.addRelational(uid1, uid2, cs)
                if proc % 1000 == 0:
                    print("\tprocessed",
                          proc,
                          "of",
                          len(xuidPairs),
                          "(%2.2f)" % float(100.0 * proc / len(xuidPairs)),
                          end="\r")
                proc += 1
        pickle_out = open(fileOut, 'wb')
        pickle.dump(feature, pickle_out)
コード例 #16
0
 def __init__(self,csvFileName):
     self.csvFileName = csvFileName
     self.table = []
     df = pd.read_csv(csvFileName)
     t_flag = 1
     index= []
     for i in range(1,len(df)+1):
         index.append(i)
     for col in df.columns:
         if df[col].dtype == "float64" or df[col].dtype == "int64":
             f = Feature(col,df[col].values)
             self.table.append(f)
             b = f.getSampels() == index
             if b.all():
                 t_flag = 0            
     if t_flag:
         f = Feature("TimeStamp",index)
         self.table.append(f)
コード例 #17
0
    def LoadDigitData(self, file_path):
        feature = []
        for line in open(file_path):
            line = line.strip()
            line_feature = [ord(ch) - ord('0') for ch in line]
            feature.extend(line_feature)

        self.dim = len(feature)
        return Feature(np.array(feature))
コード例 #18
0
ファイル: Features.py プロジェクト: pokey/smartAutocomplete
    def _feature(self, i):
        def context(input):
            line = input.words[input.index][2]
            inputId = (input.input.path, line[0])
            if inputId != self.cachedInputId:
                raise Exception('Unexpected call to feature')
            return self.cachedScopeChain[self.cachedDepth - 1 - i]

        return Feature('{}scope'.format(i), context, word)
コード例 #19
0
ファイル: Main.py プロジェクト: haliliya/naiveBayes
def create_feature(feature_description):
    feature_name = feature_description[0]
    if "{" in feature_description[1] and "}" in feature_description[1]:
        feature_type = "CATEGORICAL"
    else:
        feature_type = "NUMERIC"
    feature_possible_values = (feature_description[1].replace("{", "").replace("}", "")).split(",")
    feature = Feature(feature_name, feature_type, feature_possible_values)
    # append feature to features_list
    features_list.append(feature)
コード例 #20
0
def decorate_wall(obj, options):

    if options.options.windows == "window_line":
        spaced_points = vg.extrude(
            obj.bottom(), Map(spacing=V3(0, math.ceil(obj.height / 2), 0)))
        for vec in spaced_points:
            obj.features.append(Feature("window", vec,
                                        options=options.options))

    elif options.options.windows == "window_line_double":
        spaced_points = vg.extrude(
            obj.bottom(), Map(spacing=V3(0, math.ceil(obj.height / 2), 0)))
        spaced_points2 = vg.extrude(spaced_points, Map(spacing=V3(0, 1, 0)))
        for vec in spaced_points + spaced_points2:
            obj.features.append(Feature("window", vec,
                                        options=options.options))

    elif options.options.windows == "window_slits":
        spaced_points = vg.points_spaced(obj.bottom(), Map(every=5))
        spaced_points = vg.extrude(
            spaced_points, Map(spacing=V3(0, math.ceil(obj.height / 2), 0)))
        spaced_points2 = vg.extrude(spaced_points, Map(spacing=V3(0, 1, 0)))
        for vec in spaced_points + spaced_points2:
            obj.features.append(Feature("spacing", vec))

    else:
        spaced_points = vg.points_spaced(obj.bottom(), Map(every=3))
        spaced_points = vg.extrude(
            spaced_points, Map(spacing=V3(0, math.ceil(obj.height / 2), 0)))
        for vec in spaced_points:
            obj.features.append(Feature("window", vec,
                                        options=options.options))

    mid_points = vg.middle_of_line(obj.bottom(),
                                   Map(center=True, max_width=2, point_per=10))
    for vec in mid_points:
        obj.features.append(
            Feature(
                "door", vec,
                Map(cardinality=obj.cardinality,
                    door_inside=options.options.door_inside)))

    return obj
コード例 #21
0
def add_ego_net_features_to_ego(ego, ego_feature_file, ego_net_features):
    '''Reads a one-line file of features for the ego node'''
    line_list = ego_feature_file.readline().split()  # read one line
    # i is the index, digit is the value
    for i, digit in enumerate(line_list):
        # in order to add a feature we must create a Feature instance
        ego.add_feature(
            i,
            Feature(ego_net_features[i][1], ego_net_features[i][0],
                    int(digit)))
    return ego
コード例 #22
0
ファイル: FeatureHandler.py プロジェクト: chriswtanner/CRETE
    def saveWordNetFeatures(self, fileOut):
        feature = Feature()

        synSynToScore = {}
        xuidPairs = self.getAllXUIDPairs()
        print("calculating wordnet features for", len(xuidPairs),
              "unique pairs")
        i = 0
        completed = set()
        for xuid1, xuid2 in xuidPairs:
            uid1 = self.corpus.XUIDToMention[xuid1].UID
            uid2 = self.corpus.XUIDToMention[xuid2].UID
            if (uid1, uid2) in completed or (uid2, uid1) in completed:
                continue
            completed.add((uid1, uid2))
            textTokens1 = self.corpus.XUIDToMention[xuid1].text
            textTokens2 = self.corpus.XUIDToMention[xuid2].text
            bestScore = -1
            for t1 in textTokens1:
                syn1 = wn.synsets(t1)
                if len(syn1) == 0:
                    continue
                syn1 = syn1[0]
                for t2 in textTokens2:
                    syn2 = wn.synsets(t2)
                    if len(syn2) == 0:
                        continue
                    syn2 = syn2[0]
                    curScore = -1
                    if (syn1, syn2) in synSynToScore:
                        curScore = synSynToScore[(syn1, syn2)]
                    elif (syn2, syn1) in synSynToScore:
                        curScore = synSynToScore[(syn2, syn1)]
                    else:  # calculate it
                        curScore = wn.wup_similarity(syn1, syn2)
                        # don't want to store tons.  look-up is cheap
                        synSynToScore[(syn1, syn2)] = curScore
                        if curScore != None and curScore > bestScore:
                            bestScore = curScore

            feature.addRelational(uid1, uid2, bestScore)
            i += 1
            if i % 1000 == 0:
                print("\tprocessed",
                      i,
                      "of",
                      len(xuidPairs),
                      "(%2.2f)" % float(100.0 * i / len(xuidPairs)),
                      end="\r")

        pickle_out = open(fileOut, 'wb')
        pickle.dump(feature, pickle_out)
        print("")
コード例 #23
0
ファイル: FeatureHandler.py プロジェクト: chriswtanner/CRETE
    def saveLemmaFeatures(self, fileOut):
        feature = Feature()
        if len(self.gloveEmb) == 0:  # don't want to wastefully load again
            self.loadGloveEmbeddings()
        xuidPairs = self.getAllXUIDPairs()
        xuids = set()
        for (xuid1, xuid2) in xuidPairs:
            xuids.add(xuid1)
            xuids.add(xuid2)
        for xuid in xuids:

            sumEmb = [0] * 300
            for t in self.corpus.XUIDToMention[xuid].tokens:
                lemma = self.getBestStanToken(t.stanTokens).lemma.lower()
                if lemma not in self.gloveEmb:
                    print("* ERROR: no emb for", lemma)
                    continue
                curEmb = self.gloveEmb[lemma]
                sumEmb = [x + y for x, y in zip(sumEmb, curEmb)]
            #print("saving lemma for:", xuid, ": (", self.corpus.XUIDToMention[xuid].UID, ")")
            feature.setSingle(self.corpus.XUIDToMention[xuid].UID, sumEmb)

        if self.saveRelationalFeatures:
            # go through all pairs to compute relational data
            proc = 0
            completed = set()
            for xuid1, xuid2 in xuidPairs:
                uid1, uid2 = sorted([
                    self.corpus.XUIDToMention[xuid1].UID,
                    self.corpus.XUIDToMention[xuid2].UID
                ])
                if (uid1, uid2) in completed or (uid2, uid1) in completed:
                    continue
                completed.add((uid1, uid2))
                flatv1 = feature.singles[uid1]
                flatv2 = feature.singles[uid2]

                (dp, cs) = self.getDPCS(flatv1, flatv2)
                feature.addRelational(uid1, uid2, dp)
                feature.addRelational(uid1, uid2, cs)
                if proc % 1000 == 0:
                    print("\tprocessed",
                          proc,
                          "of",
                          len(xuidPairs),
                          "(%2.2f)" % float(100.0 * proc / len(xuidPairs)),
                          end="\r")
                proc += 1
        pickle_out = open(fileOut, 'wb')
        pickle.dump(feature, pickle_out)
コード例 #24
0
ファイル: Document.py プロジェクト: jaindeepali/Adler
    def parse_all_docs(self):
        fob = Feature(self.exp)

        cob = Category()
        cob.get_category_done_list()

        stime = time.time()
        print "Parsing documents in " + self.exp
        print "Start time: " + time.ctime(stime)

        with open(self.listing_document_path, 'r') as f:
            lines = [line.strip() for line in f]
            lines = [line for line in lines[2:] if line]

            document_id = ''
            for line in lines:
                elements = line.split(' ')
                if elements[0] == '#':
                    document_id = elements[3]
                    continue

                c_id, category = self._get_category(elements[0])

                if category == 'NA':
                    continue

                if c_id in cob.category_done_list and \
                  not self.ignore_duplicate_category:
                    continue

                sample = self._get_features(fob, elements[1:])
                sample['Category'] = category
                sample['Id'] = document_id

                self.samples = self.samples.append(sample, ignore_index=True)

                print "Document #" + document_id + " parsed"

            self.samples = self.samples.fillna(0)

        etime = time.time()
        print "Documents in " + self.exp + ' parsed'
        print "End time: " + time.ctime(etime)
        print "Time taken: " + str(etime - stime) + " seconds"

        cob.update_category_done_list([self.category_id1, self.category_id2])

        fob.destroy_list()
コード例 #25
0
def decorate_roof_shape(obj, options=Map()):
    settings = options.options

    if settings.roof_shape_color_pattern == "RainbowGlass":
        material = Texture1D.COMMON_TEXTURES.RainbowGlass
    elif settings.roof_shape_color_pattern == "OldStoneWall":
        material = Texture1D.COMMON_TEXTURES.OldStoneWall
    elif settings.roof_shape_color_pattern == "WoodBlends":
        material = Texture1D.COMMON_TEXTURES.WoodBlends
    elif settings.roof_shape_color_pattern == "Glow":
        material = Texture1D.COMMON_TEXTURES.Glow
    else:
        material = Blocks.match(settings.roof_material)

    if not material:
        material = obj.material

    boundaries = vg.bounds(options.corner_vectors)

    min_radius = min(boundaries.x_radius, boundaries.z_radius)

    pos = boundaries.center
    if settings.roof_shape_object == "cylinder":
        func = vg.cylinder
        height = min_radius * (settings.roof_shape_height_multiplier or 1)
    elif settings.roof_shape_object == "cone":
        func = vg.cone
        height = min_radius * (settings.roof_shape_height_multiplier or 1)
    elif settings.roof_shape_object == "box":
        func = vg.box
        height = min_radius * (settings.roof_shape_height_multiplier or 1)
    else:  # sphere
        func = vg.oblate_sphere
        height = None
        if settings.roof_shape_floating:
            pos = vg.up(boundaries.center, min_radius)

    sides = func(pos, min_radius, tight=settings.roof_shape_tight, height=height,
                 options=Map(min_y_pct=.5, x_radius=boundaries.x_radius, z_radius=boundaries.z_radius))

    roof_lists = list()
    roof_lists.append(Map(blocks=sides, material=material))
    obj.features.append(Feature("roof", boundaries.center, Map(block_lists=roof_lists)))

    obj.points_edges = []
    return obj
コード例 #26
0
ファイル: BamCoverage.py プロジェクト: moira-kelly/GAEMR
    def __initialize_windows(self,bam,window):
        window_coverage = {}
        lengths = self.__get_header_info(bam)
        
        for seq in sorted(lengths.iterkeys()):

            length = lengths[seq]

            for i in xrange(0,length,window):
                start = i
                end = i + window
                if end > length:
                    end = length
                if seq not in window_coverage:
                    window_coverage[seq] = []
                window_coverage[seq].append(Feature(start,end,0,seq))

        return window_coverage,lengths
コード例 #27
0
def preprocess(paths, classes):
    f = Feature()
    X = []
    y = []
    start = time.clock()
    for index, path in enumerate(paths):
        print('Preprocessing', index, path)
        ar = f.getFeature(path)
        if ar.all() == 0:
            continue
        X.append(ar)
        y.append(classes[index])

    test_time = time.clock() - start
    print("Preprocessing Total time: {0:.2f}".format(test_time))
    X = np.array(X)
    y = np.array(y)
    return X, y
コード例 #28
0
def decorate_roof_triangular(obj, options=Map()):
    settings = options.options

    material = Blocks.match(settings.roof_material)
    if material:
        obj.material = material

    p1, p2, radius, ends, sides = vg.best_points_for_triangular_roof(
        options.corner_vectors)
    chop_pct = settings.roof_triangular_chop_pct or 0

    radius += settings.roof_triangular_overhang

    height = radius
    if round(height) == int(height):
        height += 1

    if settings.roof_triangular_stairs or settings.roof_triangular_end_cap_in:
        # It's a more complex roof, build it as a Feature
        roof_lists = vg.prism_roof(
            p1,
            p2,
            height=height,
            radius=radius,
            chop_pct=chop_pct,
            sloped=settings.roof_triangular_sloped,
            material=obj.material,
            endpoint_out=settings.roof_triangular_end_cap_out)

        obj.features.append(Feature("roof", p1, Map(block_lists=roof_lists)))

    else:
        # Nothing fancy, color each block all the same type
        roof = vg.triangular_prism(p1,
                                   p2,
                                   height=height,
                                   radius=radius,
                                   chop_pct=chop_pct,
                                   sloped=settings.roof_triangular_sloped)
        obj.points.update(roof)

    obj.points_edges = []

    return obj
コード例 #29
0
def decorate_castle_outer_wall(obj, options):
    height = options.options.roof_battlement_height
    spacing = options.options.roof_battlement_space

    p1 = options.p1
    p2 = options.p2

    # TODO: Add in inner and outer, create walkway and arrow slits

    # TODO: Add X,Z outward from center as option
    roof_line = vg.getLine(p1.x, p1.y + height, p1.z, p2.x, p2.y + height, p2.z)
    obj.points.extend(vg.points_spaced(roof_line, Map(every=spacing)))

    mid_points = vg.middle_of_line(obj.bottom(), Map(center=True, max_width=2, point_per=10))
    for vec in mid_points:
        obj.features.append(
            Feature("door", vec, Map(cardinality=obj.cardinality, door_inside=options.options.door_inside)))

    return obj
コード例 #30
0
def get_audio_contents(c, filename):
    if c is not None:
        ctype, cstring = str(c).split(',')
        decoded = base64.b64decode(cstring)
        music['filepath'] = os.path.dirname(
            os.path.realpath(__file__)) + "/resource/temp/" + filename
        try:
            file = open(music.get('filepath'), 'wb')
            file.write(decoded)
            file.close()
        except:
            print("something went wrong")
        # record all necessary information into dictionary
        music['name'] = filename
        music['binary'] = c
        music_feature = Feature(music['filepath'])
        # get all extracted feature
        music['energy_feature'] = (Feature.sync_frames(
            music_feature,
            music_feature.extract_energy_features())).mean(axis=0)
        music['timbre_feature'] = (Feature.sync_frames(
            music_feature,
            music_feature.extract_timbre_features())).mean(axis=0)
        music['melody_feature'] = (Feature.sync_frames(
            music_feature,
            music_feature.extract_melody_features())).mean(axis=0)
        music['rhythm_feature'] = (Feature.sync_frames(
            music_feature, music_feature.extract_rhythm_features()))[:-3]
        # data preparation for prediction
        data = misc.series_to_supervised(
            np.transpose(music_feature.get_all_features()), 3, 1)
        data = data.values.reshape(data.values.shape[0], 4, 146)
        predict = model.predict(data)
        # save prediction into dictionary
        music['arousal_predict'] = predict[:, 0]
        music['valance_predict'] = predict[:, 1]
        music['duration'] = [
            i for i in misc.frange(0.5,
                                   len(music['timbre_feature']) * 0.5, 0.5)
        ]
        '''['%d:%2.1f' % (int((i + 1.5) / 60), (i + 1.5) % 60) for i in