示例#1
0
def triangle_area(vertices):

    side_a = dist(vertices[0], vertices[1])
    side_b = dist(vertices[1], vertices[2])
    side_c = dist(vertices[2], vertices[0])
    s = 0.5 * (side_a + side_b + side_c)

    return np.sqrt(s * (s - side_a) * (s - side_b) * (s - side_c))
示例#2
0
def elbow(centers, clusters):
    sum = 0
    sum2 = 0
    for klaster in range(len(clusters)):
        for tweet in clusters[klaster]:
            sum = sum + distance.dist(centers[klaster], tweet, type='euclidean')
            sum2 = sum2 + distance.dist(centers[klaster], tweet, type='cosine')
    return sum, sum2
示例#3
0
def distTriangleMat(coordslist,
                    full_matrix=True,
                    pbc=False,
                    cell_a=0,
                    cell_b=0,
                    cell_c=0,
                    cell_alpha=90,
                    cell_beta=90,
                    cell_gamma=90):
    """
    function to calculate distance between ALL atoms, return a OD matrix, 2 dimensional.\n
    # input requirement\n
    coordlist: atomic coordinates, standardized and widely used in this package\n
    full_matrix: [bool] output a fully diagonal matrix or not. If set as false, half of elements in output matrix will be zero cuz have not been calculated\n
    pbc: [bool] if use periodic boundary condition correction. Useful in periodic system, set to False if called in an isolated system calculation\n
    other six parameters: MUST be set reasonable values if set pbc as True, these are cell parameters\n
    """

    natom = np.shape(coordslist)[0]
    # dont know how many columns the coordlist has, either 3 or 4, use -1, -2 and -3 to visit
    distmat = np.zeros((natom, natom))
    print('DISTANCE| distance matrix generated, size: ' + str(natom) + 'x' +
          str(natom) + '.')
    for i in range(natom):

        if pbc:
            distlist = dist(atomlist=coordslist[i + 1:][:],
                            pointxyz=coordslist[i][-3:],
                            point_in_coord=True,
                            pbcFlag=True,
                            pbc_a=cell_a,
                            pbc_b=cell_b,
                            pbc_c=cell_c,
                            pbc_alpha=cell_alpha,
                            pbc_beta=cell_beta,
                            pbc_gamma=cell_gamma)

        else:
            distlist = dist(atomlist=coordslist[i + 1:][:],
                            pointxyz=coordslist[i][-3:])

        print('DISTANCE| calculate distance list from atom ' + str(i) +
              ' to other atoms...')

        for j in range(natom - i - 1):

            distmat[i][i + j + 1] = distlist[j]
            if full_matrix:
                distmat[i + j + 1][i] = distmat[i][i + j + 1]

    return distmat
示例#4
0
def crime_main():
    '''
    This function reads in the 2015 and 2016 Washington DC crime data, drops
    unnecessary data, adds distance to nationals park column using the distance
    module (see distance.py), and converts time stamps into datetime objects
    by applying date_time() function.

    Returns crime2015 and crime2016 dataframes.
    '''

    cols = [
        'METHOD', 'OFFENSE', 'WARD', 'PSA', 'BLOCK_GROUP', 'LATITUDE',
        'LONGITUDE', 'START_DATE', 'END_DATE'
    ]

    crime2015 = pd.read_csv('Crime2015.csv', usecols=cols)

    crime2016 = pd.read_csv('Crime2016.csv', usecols=cols)
    '''
    Nationals Park is in Washington's 6th Ward.

    There is a significant distance to the Ward border to the 2nd Ward to the
    North East, the border of which is along I-695, limiting any foot traffic
    spillover in to the 2nd Ward

    The Potomac and Anacostia river limit any foot traffic from the
    stadium to the South-West and South, respectively and form the border
    between the 6th and 8th Wards. Source: Google Maps

    As such, we can safely drop any crime that did not occur in the 6th
    Ward before performing any further manipulation.
    '''

    crime2015, crime2016 = (crime2015[crime2015.WARD == 6].reset_index(
        drop=True), crime2016[crime2016.WARD == 6].reset_index(drop=True))

    from distance import dist  # see distance.py

    point = (38.873181, -77.007546)  # approx. lat, lon of Nationals Stadium

    crime2015 = dist(crime2015, 'LATITUDE', 'LONGITUDE', point)

    crime2016 = dist(crime2016, 'LATITUDE', 'LONGITUDE', point)

    crime2015 = date_time(crime2015)

    crime2016 = date_time(crime2016)

    return crime2015, crime2016
示例#5
0
def image_stats(file, correct, language="eng", tessconfig=""):
    tess = {}
    tess_out, tess["time"] = readimage.tess_ocr("img.png")
    tess_out = " ".join(tess_out.split()).strip()
    tess["dist"] = distance.dist(correct, tess_out)
    tess["per"] = round((len(correct) - tess["dist"]) / len(correct), 4)
    tess["tpc"] = round(tess["time"] / len(correct) * 1000, 4)

    cune = {}
    cune_out, cune["time"] = readimage.cune_ocr("img.png")
    cune_out = " ".join(cune_out.split()).strip()
    cune["dist"] = distance.dist(correct, cune_out)
    cune["per"] = round((len(correct) - cune["dist"]) / len(correct), 4)
    cune["tpc"] = round(cune["time"] / len(correct) * 1000, 4)
    return tess, cune
示例#6
0
    def nearestdistanceprob(self):

        distance = []
        d = np.empty([0])
        probd = np.empty([0])

        for i in range(self.n.shape[0]):
            D = dist(self.sample, self.n[i])
            distance.append([D.euclidean(), self.l[i], self.v[i]])
            d= np.append(d,D.euclidean())

        distance.sort(key=lambda x: x[0])
        d.sort()

        distance = distance[:self.k]
        d = d[:self.k]

        for i in range(len(d)):
            if d[i] == 0:
                probd = [0] * len(d)
                probd[i] = 100
                return distance, probd

        d = np.asfarray(d, float)
        d = np.reciprocal(d)
        for i in range(len(d)):
            probd = np.append(probd, (100 / sum(d)) * (d[i]))

        return distance, probd
示例#7
0
def xrd(
    wavelength,
    maxangle,
    dist2scr,
    scrresol,
    if_angle_resol,
    atomcoords):
    natom = np.shape(atomcoords)[0]
    print('XRD| main subroutine activated, totally '+str(natom)+' in simulation box. An accelerated version will come soon.')
    print('XRD| periodic boundary condition method is not implemented, for accuracy, please use SUPERCELL instead.')
    wavelength_ = wavelength * 1E-9
    print('XRD| units conversion: wavelength value input: '+str(wavelength)+' -> '+str(wavelength_)+' (m)')

    scr2d = xrd_madescr(angle_max=maxangle, dist=dist2scr, resol=scrresol, give2d=True)
    scrcoords = xrd_madescr(angle_max=maxangle, dist=dist2scr, resol=scrresol)

    scr_width = np.shape(scr2d)[0]
    print('DISTANCE| i will be iteratively called...')
    print('DIFFRACTION| i will be iteratively called too...')
    print('DIFFRACTION| time complexity estimation: totally '+str(scr_width**2 * scr_width**2 * natom)+' steps.')

    #exit()
    for ix in range(scr_width):
        for iy in range(scr_width):
            pixelcoord = scrcoords[ix+iy*scr_width][:]
            dist2atoms = dist(atomlist=atomcoords, pointxyz=pixelcoord)
            for idist in dist2atoms:
                scr2d[ix][iy] += diffraction(wavelength=wavelength_, 
                                             dist=idist,
                                             anglemode=if_angle_resol,
                                             )
    
    return scr2d
示例#8
0
    def nverdict(self):

        distance = []

        for i in range(self.n.shape[0]):
            D = dist(self.sample, self.n[i])
            distance.append([D.euclidean(), self.l[i], self.v[i]])

        distance.sort(key=lambda x: x[0])

        y = np.empty([0])

        for i in range(self.k):
            y = np.append(y, (distance[i][2]))

        a = 0
        b = 0
        s = 0

        for i in range(len(y)):
            if y[i] == y[0]:
                a += 1
            else:
                b += 1
                s = y[i]
        if a > b:
            r = y[0]
        else:
            r = s
        return r
 def get_min_dist(current_node: geometry.Node,
                  node_list: list[geometry.Node]) -> geometry.Node:
     d = {}
     for node in node_list:
         d[node] = distance.dist(current_node, node)
     min_node = heapq.nsmallest(1, d.items(), key=lambda x: x[1])
     return min_node
示例#10
0
 def get_k_nearest_neighbor(self, vertex: Node.Node, k: int) -> list:
     distance_list = {}
     for node in self.vertices:
         if node != vertex:
             distance_list[node] = distance.dist(node, vertex)
     k_nearest_neighbor = heapq.nsmallest(k, distance_list.items(), key=lambda x: x[1])
     self.neighbors[vertex] = k_nearest_neighbor
     return k_nearest_neighbor
示例#11
0
文件: models.py 项目: oxygenum44/zpi
 def closest_centroids(self, centroids, type_dist):
     assigned_centroids = np.zeros((self.m, 1))
     for i in range(0, self.m):
         distances = np.zeros((self.k, 1))
         for j in range(0, self.k):
             distances[j] = d.dist(self.data[i],
                                   centroids[j],
                                   type=type_dist)
         ix = np.argmin(distances)
         assigned_centroids[i] = ix
     return assigned_centroids
示例#12
0
def closest_center(centers):
    closest_list = []
    print(centers)
    for center in range(len(centers)):
        min_distance = 9999999999999999
        closest = None
        for center2 in range(len(centers)):
            distance_current = distance.dist(centers[center], centers[center2], type='cosine')
            if distance_current < min_distance and center != center2:
                closest = center2
                min_distance = distance_current
        closest_list.append(closest)
    return closest_list
示例#13
0
def angles(A):
   a_a = A[0]
   b_a = A[1]
   c_a = A[2]
   D_ab = [a_a, b_a]
   ab = distance.dist(D_ab)
   D_bc = [b_a, c_a]
   bc = distance.dist(D_bc)
   D_ca = [c_a, a_a]
   ca = distance.dist(D_ca)
#   if ((ab <= 3.2700) and (bc <= 3.2700) and (ca <= 3.2700)):
   ab_2 = numpy.square(ab)
   bc_2 = numpy.square(bc)
   ca_2 = numpy.square(ca)
   a_ang_rad = numpy.arccos((ca_2 + ab_2 - bc_2)/(2.0 * ca * ab))
   a_ang_deg = numpy.degrees(a_ang_rad)
   b_ang_rad = numpy.arccos((ab_2 + bc_2 - ca_2)/(2.0 * ab * bc))
   b_ang_deg = numpy.degrees(b_ang_rad)
   c_ang_rad = numpy.arccos((bc_2 + ca_2 - ab_2)/(2.0 * bc * ca))
   c_ang_deg = numpy.degrees(c_ang_rad)
   ang = [a_ang_deg, b_ang_deg, c_ang_deg, ab, bc, ca]
   return ang
示例#14
0
    def nearestn(self):

        distance = []

        for i in range(self.n.shape[0]):
            D = dist(self.sample, self.n[i])
            distance.append([D.euclidean(), self.l[i], self.v[i]])

        distance.sort(key=lambda x: x[0])
        y = np.empty([0])

        for i in range(self.k):
            y = np.append(y, (distance[i][2]))

        return y
示例#15
0
文件: models.py 项目: oxygenum44/zpi
 def compute_centroids(self, closest_centroids, type_dist):
     centroids = np.zeros((self.k, self.n))
     for i in range(0, self.k):
         min_dist_tweet = None
         min_dist = math.inf
         tweets_same_centroid = (closest_centroids == i).squeeze()
         for tweet_i in self.data[tweets_same_centroid]:
             dist = 0
             for tweet_j in self.data[tweets_same_centroid]:
                 dist += d.dist(tweet_i, tweet_j, type_dist)
             if dist < min_dist:
                 min_dist = dist
                 min_dist_tweet = tweet_i
         centroids[i] = min_dist_tweet
     return centroids
示例#16
0
    def nearestdistance(self):

        distance = []
        d = np.empty([0])

        for i in range(self.n.shape[0]):
            D = dist(self.sample, self.n[i])
            distance.append([D.euclidean(), self.l[i], self.v[i]])
            d= np.append(d,D.euclidean())

        distance.sort(key=lambda x: x[0])
        d.sort()

        distance = distance[:self.k]
        d = d[:self.k]

        return distance, d
示例#17
0
文件: models.py 项目: oxygenum44/zpi
 def closest_centroids(self, centroids, type_dist):
     assigned_centroids = np.zeros((self.m, 1))
     for i in range(0, self.m):
         distances = np.zeros((self.k, 1))
         found_same = False
         found_same_index = -1
         for j in range(0, self.k):
             distances[j] = d.dist(self.data[i],
                                   centroids[j],
                                   type=type_dist)
             if np.array_equal(self.data[i], centroids[j]):
                 found_same = True
                 found_same_index = j
         if found_same:
             assigned_centroids[i] = found_same_index
         else:
             assigned_centroids[i] = np.argmin(distances)
     return assigned_centroids
示例#18
0
    def hclassprob(self):

        distance = []
        d = np.empty([0])
        probd = np.empty([0])

        for i in range(self.n.shape[0]):
            D = dist(self.sample, self.n[i])
            distance.append([D.euclidean(), self.l[i], self.v[i]])
            d= np.append(d,D.euclidean())

        distance.sort(key=lambda x: x[0])
        d.sort()
        y = np.empty([0])

        for i in range(self.k):
            y = np.append(y ,(distance[i][2]))

        for i in range(len(y)):
            if y[i] == 'M':
                y[i] = 1
            else:
                y[i] = 0

        y = y.astype(np.float)

        d = d[:self.k]

        for i in range(len(d)):
            if d[i] == 0:
                probd = [0] * len(d)
                probd[i] = 100
                MP = sum(np.multiply(y, probd))
                return MP

        d = np.asfarray(d, float)
        d = np.reciprocal(d)
        for i in range(len(d)):
            probd = np.append(probd, (100 / sum(d)) * (d[i]))

        MP = sum(np.multiply(y, probd))
        return MP
示例#19
0
def updateTravel():
    global tTime
    newDist = distance.dist()
    app.distWidget.config(text=newDist[0])
    app.distWidget.after(1800000, updateTravel)
示例#20
0
        pmid_idx[pmid] = len(mats)
        mats.append(D)

    # Compute distance matrix
    print('Computing distance matrix...')
    n = len(mats)
    # dist_mat = np.zeros((n, n), dtype=np.float32)
    dist_mat = np.memmap('dists.dat',
                         dtype=np.float32,
                         mode='w+',
                         shape=(n, n))

    total = ((n * n) - n) / 2
    for i, j in tqdm(indices(n), total=total):
        dist_mat[i, j] = dist(mats[i], mats[j])
    dist_mat = symmetrize(dist_mat)

    # np.save(dist_mat, 'dists.npy', allow_pickle=False)

    print('Clustering...')
    _, labels = dbscan(dist_mat,
                       eps=1.,
                       min_samples=5,
                       metric='precomputed',
                       n_jobs=-1)
    print(len(labels))

    # Alternatively, OPTICS instead of DBSCAN
    # opt = optics(dist_mat, eps=1., minpts=5, data_type='distance_matrix')
    # opt.process()
示例#21
0
    indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.8, 0.3)

    for i in range(len(boxes)):
        if i in indexes:
            x, y, w, h = boxes[i]
            label = str(classes[class_ids[i]])
            area = w * h
            confidence = confidences[i]
            #color = colors[class_ids[i]]
            color1 = (0, 255, 0)
            color2 = (0, 0, 255)
            if (label in objects):
                count[frame_id - 1] = count[frame_id - 1] + 1
                z = 512 - (y + h)
                d = distance.dist(z)
                if (x > 250):
                    cv2.rectangle(frame, (x, y), (x + w, y + h), color2, 2)
                    cv2.rectangle(frame, (x, y - 30), (x + 150, y), (0, 0, 0),
                                  -1)
                    cv2.putText(frame, label, (x, y - 5), font2, 1, color2, 3)
                    cv2.rectangle(frame, (x, y + h), (x + 100, y + h + 20),
                                  (0, 0, 0), -1)
                    cv2.putText(frame, "dist-" + str(d) + "m", (x, y + h + 15),
                                font2, 1, color2, 3)
                    if (flag[frame_id] == 0):
                        cv2.rectangle(frame, (0, 0), (400, 40), (0, 0, 0), -1)
                        cv2.putText(frame, "DO NOT OVERTAKE", (10, 30), font,
                                    1, color2, 3)
                        flag[frame_id] = 1
                else:
n = df.shape[0] - 1  # number of patients
f = list(df["f"].astype('int'))  # frequency of visit for every patients
et = list(
    df["et"].astype('int'))  # earliest service start time for each patient
lt = list(df["lt"].astype('int'))  # latest service start time for each patient
sd = list(df["sd"].astype('int'))  # service duration for each patient
q = list(df["Q'"].astype(
    'int'))  # qulification of first nurse required for each patient
Q = list(df_n["Q"].astype('int'))  # qulification of each nurse
m = df_n.shape[0]  # number of nurses
bigM = 10000  # infinitely large number
X, Y = list(df["x"]), list(
    df["y"])  # coordinates X and Y of each patient and depot
depot = [X[0], Y[0]]  # depot coordinates

grid = distance.dist(X, Y, bigM)  # get distance matrix

start_time = timeit.default_timer()

M = gb.Model("master_problem")  # initialize the model

d, x, y, z, s = {}, {}, {}, {}, {}  # initialize decision variables

decision_variables.decisionVariables(M, gb, m, n, t, d, x,
                                     y)  # add decision variables to model

objective_function.objectiveFunction(M, gb, d,
                                     n)  # add objective function to model

master_constraints.masterConstraints(M, d, x, y, m, n, t, q, Q, f,
                                     gb)  # add master constraints to model
示例#23
0
from distance import dist, wer

print(dist(hyp='', gold=''))
print(dist(hyp='a a a', gold='a a a'))
print(dist(hyp='a b', gold='a a a'))
print(dist(hyp='a b c a', gold='a a a'))
print(dist(hyp='a b c a d a a', gold='a a d a'))

print('zero wer', wer(hyp='a a a', gold='a a a') == 0)
print('si wer', wer(hyp='a b', gold='a a a') == 2.0/3)
示例#24
0
def cluster_and_trace(data):
    """Data is clustered for each window and cluster traces are computed.
       Args:
           data (DataFrame): matrix (n x t) with n time series for t time steps

       Returns:
           dict: A dictionary with eps as keys and TraceGraphLayer as value
       """

    tend = len(data.index) - conf.wnd + 1

    assert tend >= 0, 'Loaded time shorter than window size'
    traces = {}
    for eps in utils.epsRange():
        traces[eps] = []

    time_step_index = 0
    start = 0
    dist = None

    # do for each time window cluster and tracing
    for time_step_date in tqdm(range(start, tend, conf.tstep)):
        current_data_slice = data.iloc[slice(time_step_date, time_step_date +
                                             conf.wnd), :]
        if conf.verbose:
            print(time_step_index, time_step_date, current_data_slice.index[0],
                  '-', current_data_slice.index[-1], ',',
                  current_data_slice.shape[0])
        assert current_data_slice.shape[0] == conf.wnd

        with PhaseTimer('clt distance'):
            #get distance between time series to calc multiDBSCAN
            #distance only needed for DBSCAN and corrnorm
            dist = distance.dist(current_data_slice)

        with PhaseTimer('clt clustering'):
            # cluster data
            clusters = cluster.multiDBSCAN(current_data_slice, dist,
                                           current_data_slice.index[0])

        with PhaseTimer('clt tracing'):
            for eps in clusters.keys():
                labels = clusters[eps]
                trace = traces[eps]

                trace.append(TraceGraphLayer(labels, current_data_slice))

                if (time_step_index > 0):
                    # get time series labels from previous time step
                    labels_pre = trace[time_step_index - 1].labels
                    # amount of clusters
                    n_clusters = max(labels) + 1
                    nClusters_pre = max(labels_pre) + 1

                    counts = np.zeros(nClusters_pre)
                    transitions = np.zeros([nClusters_pre, n_clusters])
                    representatives = []
                    for _ in range(nClusters_pre):
                        representatives.append([])

                    for i in range(len(labels_pre)):
                        if labels_pre[i] != -1:
                            counts[labels_pre[i]] += 1

                            #representatives are reps of a cluster indicating the index of the time series
                            representatives[labels_pre[i]].append(i)
                            if labels[i] != -1:
                                transitions[labels_pre[i]][labels[i]] += 1

                    trace[time_step_index - 1].counts = counts
                    trace[time_step_index - 1].transitions = transitions
                    trace[time_step_index - 1].nextLabels = labels
                    trace[time_step_index -
                          1].next_data_slice = current_data_slice

                    if time_step_index > 1:
                        trace[time_step_index -
                              2].next2_data_slice = current_data_slice
                    if time_step_index > 2:
                        trace[time_step_index -
                              3].next3_data_slice = current_data_slice

                #needed for OffstreamRatio 2x
                if L2TRANSITIONS and (time_step_index > 1):
                    labels_pre2 = trace[time_step_index - 2].labels
                    n_clusters = max(labels) + 1
                    n_clusters_pre2 = max(labels_pre2) + 1

                    transitions2 = np.zeros([n_clusters_pre2, n_clusters])
                    for i in range(len(labels_pre2)):
                        if labels_pre2[i] != -1:
                            if labels[i] != -1:
                                transitions2[labels_pre2[i]][labels[i]] += 1

                    trace[time_step_index - 2].transitions2 = transitions2

        time_step_index += 1
    return traces
示例#25
0
def topol_gen(
    gro_file, 
    param_file, 
    d_tol = 0.1, 
    verbosity = 'debug'
    ):
    """
    main function of\n
    conversion from gro and configuration file to topology file\n
    # input requirement:\n
    gro_file: GROMACS structure file\n
    param_file: configuration file that contains setting on how to make topology file\n
    d_tol: tolerance parameter that controls the precision of judging connectivity between
    atoms, where basic criteria are read from configuration file
    """
    # unlike structure file, this may be the only place that will need parse configuration file
    # that contains settings about generating topol information from gro.
    [_, bond_dict, angle_dict, dihe_dict] = parse_conf_file(filename = param_file)
    if verbosity == 'debug':
        print('SIMUPKGS_DEBUG_MODE| bond:')
        print(bond_dict)
        print('SIMUPKGS_DEBUG_MODE| angle:')
        print(angle_dict)
        print('SIMUPKGS_DEBUG_MODE| dihedral:')
        print(dihe_dict)
    # read-in structure and parse file, convert into array
    data_import = coords_io.readcoords(
                                        filename='template.gro', 
                                        f_type='gro', 
                                        convert=True
                                        )
    natom = coords_io.np.shape(data_import)[0]
    # in GROMACS, coordinates are in unit nm, not Angstrom!

    dist_matrix = []
    pair_matrix = []
    for iatom in range(natom):

        look_at_this = data_import[iatom][1::]
        [dist_array_iatom, pair_array_iatom] = dist(
                                                    atomlist = data_import, 
                                                    pointxyz = look_at_this, 
                                                    point_in_unit = 'Angstrom',
                                                    set_in_unit = 'Angstrom',
                                                    dist_in_unit = 'Angstrom',
                                                    pbcFlag = False,
                                                    append_ele = data_import[iatom][0]
                                                    )
        dist_matrix.append(dist_array_iatom)
        pair_matrix.append(pair_array_iatom)

    # dist_matrix: (Natom, Natom) shaped
    # pair_matrix: (Natom, Natom) shaped
    if verbosity == 'debug':
        print('SIMUPKGS_DEBUG_MODE| dist_matrix_output: (off)')
        #print(dist_matrix)
        print('SIMUPKGS_DEBUG_MODE| pair_matrix_output: (off)')
        #print(pair_matrix)

        print('SIMUPKGS| bond_bool_matrix initialization...')
    
    # generate connectivity matrix
    bond_bool_matrix = np.zeros(shape = (natom, natom))

    for iatom in range(natom):
        for jatom in range(iatom+1, natom):
            # skip diagonal element
            # extract distance info.
            pairLength = dist_matrix[iatom][jatom]
            pairName = pair_matrix[iatom][jatom] # ['C', 'O']
            if verbosity == 'everything':
                print('SIMUPKGS_DEBUG_MODE| check parameters input in following get_data_from_dict:\n'
                     +'                     handle = '+str(pairName)+'\n'
                     +'                     dataFrom = bond_dict')
            crit_bond = get_data_from_dict(handle = pairName, dataFrom = bond_dict)
            if crit_bond:
                if (pairLength >= crit_bond[1]-d_tol) and (pairLength <= crit_bond[1]+d_tol):
                    # suppose there is a bond!
                    if verbosity == 'debug':
                        print('SIMUPKGS_DEBUG_MODE| one bond is determined by distance with tolerence value '+str(d_tol))
                        print('                     '+str(pairName)+': critical bond length = {} Angstrom'.format(str(crit_bond[1])))
                        print('                     atomic index pair: {}-{}'.format(iatom, jatom))
                    bond_bool_matrix[iatom][jatom] = 1
                    bond_bool_matrix[jatom][iatom] = 1
    # bond_bool_matrix is generated

    bond_print = []
    angle_print = []
    dihe_print = []

    for iatom in range(natom):
        for jatom in range(natom):

            if jatom == iatom:
                continue

            if bond_bool_matrix[iatom][jatom]:
                # find i-j bond
                # double count is avoided by check wether j is larger than i
                if iatom > jatom:
                    # bond is already counted
                    pass
                else:
                    # this bond is not counted yet!
                    # >>>
                    bond2append = [iatom, jatom]
                    get_this_bond = [data_import[iatom][0], data_import[jatom][0]]
                    write_this_bond = get_data_from_dict(handle = get_this_bond, dataFrom = bond_dict)
                    if write_this_bond:
                        bond2append += write_this_bond
                        bond_print.append(bond2append)

                for katom in range(natom):

                    if (katom == iatom) or (katom == jatom):
                        continue

                    if bond_bool_matrix[jatom][katom]:
                        # find j-k bond and i-j-k angle

                        # for angle counting
                        if katom < iatom:
                            # this angle is already counted
                            pass
                        else:
                            # this angle is not counted yet!
                            # >>>
                            angle2append = [iatom, jatom, katom]
                            get_this_angle = [data_import[iatom][0], data_import[jatom][0], data_import[katom][0]]
                            write_this_angle = get_data_from_dict(handle = get_this_angle, dataFrom = angle_dict)
                            if write_this_angle:
                                angle2append += write_this_angle
                                angle_print.append(angle2append)

                        for latom in range(natom):

                            if (latom == iatom) or (latom == jatom) or (latom == katom):
                                continue

                            if bond_bool_matrix[katom][latom]:
                                # find k-l bond and j-k-l angle and i-j-k-l dihedral
                                if latom < iatom:
                                    # this dihedral is already counted
                                    pass
                                else:
                                    # this dihedral is not counted yet!
                                    # >>>
                                    dihe2append = [iatom, jatom, katom, latom]
                                    get_this_dihe = [
                                        data_import[iatom][0], 
                                        data_import[jatom][0], 
                                        data_import[katom][0],
                                        data_import[latom][0]
                                        ]
                                    write_this_angle = get_data_from_dict(handle = get_this_dihe, dataFrom = dihe_dict)
                                    if write_this_angle:
                                        dihe2append += write_this_angle
                                        dihe_print.append(dihe2append)

    return [bond_print, angle_print, dihe_print]
示例#26
0
def image_processor(input_img, sd, cameraprop, stream_type,
                    threshold_parameters):
    #declar inerfunctions
    def threshold(img, threshold):

        lower_limit = np.array(threshold[0:3], np.uint8)
        upper_limit = np.array(threshold[3:7], np.uint8)

        hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        frame_threshed = cv2.inRange(hsv_img, lower_limit, upper_limit)

        return frame_threshed

    def clear_noize(img, max_noize_size):
        img = morphology.label(img)  # create labels in segmented image
        cleaned = morphology.remove_small_objects(img,
                                                  min_size=max_noize_size,
                                                  connectivity=2)

        img = np.zeros((cleaned.shape))  # create array of size cleaned
        img[cleaned > 0] = 255
        img = np.uint8(img)

        return img

    def detect_shapes(img, thresholded_img, sides_num, min_area,
                      approx_epsilon):
        detected_shapes_img = cv2.cvtColor(img, cv2.COLOR_RGB2RGBA)

        contours, _ = cv2.findContours(thresholded_img, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
        # Searching through every region selected to
        # find the required polygon.
        detected_shapes = []
        for cnt in contours:
            approx = cv2.approxPolyDP(
                cnt, approx_epsilon * cv2.arcLength(cnt, True), True)

            # Checking if the no. of sides of the selected region is 7.
            if (len(approx)
                    == sides_num) and (detected_shapes_img.shape[0] - 1) * (
                        detected_shapes_img.shape[1] - 1) > cv2.contourArea(
                            cnt) and min_area < cv2.contourArea(cnt):
                cv2.drawContours(detected_shapes_img, [approx], 0, (0, 0, 255),
                                 1)
                detected_shapes.append(approx)

        return detected_shapes_img, detected_shapes

    #function code
    thresholded_img = clear_noize(threshold(input_img, threshold_parameters),
                                  400)

    detected_shapes_img, detected_shapes = detect_shapes(
        input_img, thresholded_img, 4, 200, 0.05)

    if len(detected_shapes) > 0:
        M = cv2.moments(thresholded_img)
        center = [int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])]

        #calc distance, angles, force
        th, ch, k = 2, 1, 1
        x_angle, y_angle = distance.PixelsToAngles(center[0], center[1],
                                                   cameraprop)
        dist = distance.dist(y_angle, th, ch)
        hood_angle, velocity = distance.force(
            th, ch, dist)  #need to set robot and target height

        sd.putValue('shooter', (velocity, hood_angle, x_angle, y_angle))
    else:
        sd.putValue('shooter', (-1, -1, -1, -1))

        center = (-1, -1)

    if stream_type.value == False:
        return cv2.circle(detected_shapes_img, tuple(center), 2, (255, 0, 0))
    else:
        return thresholded_img
示例#27
0
def is_failure(b1, b2, b3):
    return dist(b1, b2) + dist(b2, b3) > dist(b1, b3)
示例#28
0
def freeEner_isolated(atomlist,
                      enerinfo='0 J',
                      temp=0,
                      pressure=101325,
                      freqlist=[],
                      unituse='2'):
    #--------------------constants definition-----------------------
    R = 8.314
    N_A = 6.02E23
    kb = R / N_A
    h = 6.626E-34
    #--------------------input preprocessing------------------------
    enerinfo = [word for word in enerinfo.split(' ') if word != '']
    ener0 = float(enerinfo[0])
    enerUnit = enerinfo[1]
    if enerUnit == 'j' or enerUnit == 'J':
        ener0 = ener0
    elif enerUnit == 'kj' or enerUnit == 'kJ' or enerUnit == 'KJ' or enerUnit == 'Kj':
        ener0 *= 1E3
    elif enerUnit == 'eV' or enerUnit == 'EV' or enerUnit == 'ev':
        ener0 *= 1.6E-19
    elif enerUnit == 'a.u.' or enerUnit == 'au' or enerUnit == 'A.U.' or enerUnit == 'AU':
        ener0 *= 27.2113838565563 * 1.6E-19
    else:
        print(
            'FreeEner| energy unit can not be recognized, please input unit such as J/kJ/eV/a.u., quit.'
        )
        exit()
    natom = np.shape(atomlist)[0]
    masslist = []

    symbol_ref = 'Ar'
    for iatom in range(natom):

        if type(atomlist[iatom][0]) != type(symbol_ref):
            print('FreeEner| ***error*** invalid format of atomlist. quit.')
            exit()
        masslist.append(eleinfo.getElement(atomlist[iatom][0]))

    mass = np.sum(masslist)
    masslist_kg = [mass / N_A / 1000 for mass in masslist]
    mass_kg = mass / N_A / 1000
    #----------------rotation interia calculation---------------------------------
    coords = centercoords(atomlist,
                          masspower=True,
                          totmass=mass,
                          masslist=masslist)
    # mass center corrected to 0, 0, 0.
    center = [0, 0, 0]
    r = dist(atomlist=coords, pointxyz=center)  # unit in m

    rot_interia = 0
    for iatom in range(natom):

        rot_interia += masslist_kg[iatom] * r[iatom]**2

    #----------------------------------vibrations-----------------------------------
    E_viblist = [freq * 100 * 3E8 * h for freq in freqlist]
    ZPE = 0
    E_vib = 0  # averaged vibrational energy, not obtained from 2*ZPE
    q_vib = 1
    S_vib = 0
    for e_vib in E_viblist:

        ZPE += 0.5 * e_vib
        if temp == 0:
            continue
        e_vib_norm = e_vib / kb / temp
        q_vib *= 1 / (1 - math.exp(-e_vib_norm))
        E_vib += e_vib * math.exp(-e_vib_norm)
        S_vib += e_vib_norm * math.exp(-e_vib_norm) / (
            1 - math.exp(-e_vib_norm)) - math.log(1 - math.exp(-e_vib_norm))

    # exception of 0K input
    if temp == 0:

        print(
            'FreeEner| 0K-temperature input detected! ZPE correction mode is activated, mass, bond length, pressure'
            + '          frequencies information will be discarded.')
        U = ener0 + ZPE  # unit in J
        if unituse == '1':
            ener0 *= N_A
            U *= N_A
            unituse = ' J/mol\n'
        elif unituse == '2':
            ener0 /= 1.6E-19
            U /= 1.6E-19
            unituse = ' eV\n'
        elif unituse == '3':
            ener0 /= 1.6E-19 * 27.2113838565563
            U /= 1.6E-19 * 27.2113838565563
            unituse = ' a.u.\n'
        else:
            print('FreeEner| wrong output unit input, quit.')
            exit()

        print('FreeEner| ZPE correction mode output information:' +
              '          electronic energy    = ' + str(ener0) + unituse +
              '          zero-point corrected = ' + str(U) + unituse)
        exit()

    E_vib /= q_vib

    # partition functions
    q_trans = ((2 * math.pi * mass_kg * kb * temp) / (h**2))**1.5
    q_rot = 8 * math.pi**2 * kb * temp * rot_interia / h
    if masslist[0] == masslist[-1]:
        q_rot /= 2

    print('FreeEner| partition functions information:\n' +
          '          translation q_trans (volume omitted) = ' + str(q_trans) +
          '\n' + '          rotation q_rot                       = ' +
          str(q_rot) + '\n' +
          '          vibration q_vib                      = ' + str(q_vib) +
          '\n' + '          overall Q = q_trans*q_rot*q_vib      = ' +
          str(q_trans * q_rot * q_vib))

    print(
        'FreeEner| energy calculation starts. Ideal gas assumption is used:\n'
        + '          E_trans + E_rot = 5/2NkT')
    E_rigid = 5 / 2 * kb * temp  # E_rigid = E_trans + E_rot
    U = ener0 + ZPE + E_rigid + E_vib
    H = U + kb * temp

    # entropy calculation, ideal stastical mechanics is used.
    S_trans = R * (5 / 2 + math.log(R * temp / pressure * q_trans))
    S_rot = R * (3 / 2 + math.log(
        math.sqrt(math.pi) / 2 *
        (8 * math.pi**2 * kb * temp / h**2)**1.5 * math.sqrt(q_rot)))
    S_vib = S_vib
    S = S_trans + S_rot + S_vib

    # J·mol-1
    ener0 *= N_A
    ZPE *= N_A
    E_rigid *= N_A
    E_vib *= N_A
    U *= N_A
    H *= N_A

    # g_elec = 1
    # q_elec = g_elec*math.exp(-ener0/kb/temp) too large to calculate
    A = -N_A * kb * temp * math.log(q_trans * q_rot * q_vib) + ener0

    G = H - temp * S

    if unituse == '1':
        unituse = ' J/mol\n'
    elif unituse == '2':
        # eV
        ener0 /= 1.6E-19 * 6.02E23
        ZPE /= 1.6E-19 * 6.02E23
        E_rigid /= 1.6E-19 * 6.02E23
        E_vib /= 1.6E-19 * 6.02E23
        U /= 1.6E-19 * 6.02E23
        H /= 1.6E-19 * 6.02E23
        G /= 1.6E-19 * 6.02E23
        A /= 1.6E-19 * 6.02E23
        unituse = ' eV\n'
    elif unituse == '3':
        # a.u.
        ener0 /= 1.6E-19 * 6.02E23 * 27.2113838565563
        ZPE /= 1.6E-19 * 6.02E23 * 27.2113838565563
        E_rigid /= 1.6E-19 * 6.02E23 * 27.2113838565563
        E_vib /= 1.6E-19 * 6.02E23 * 27.2113838565563
        U /= 1.6E-19 * 6.02E23 * 27.2113838565563
        H /= 1.6E-19 * 6.02E23 * 27.2113838565563
        G /= 1.6E-19 * 6.02E23 * 27.2113838565563
        A /= 1.6E-19 * 6.02E23 * 27.2113838565563
        unituse = ' a.u.\n'

    print(
        '\nFreeEner| energies information:\n' +
        '          electronic energy              = ' + str(ener0) +
        str(unituse) + '          zero-point energy              = ' +
        str(ZPE) + str(unituse) +
        '          translation + rotation         = ' + str(E_rigid) +
        str(unituse) + '          vibrational energy             = ' +
        str(E_vib) + str(unituse) +
        '          -------------------------------------------------------------\n'
        + '          total energy (internal energy) = ' + str(U) +
        str(unituse) + '          total enthalpy                 = ' + str(H) +
        str(unituse))

    print(
        'FreeEner| entropies information:\n' +
        '          translation                    = ' + str(S_trans) +
        ' J/mol/K\n' + '          rotation                       = ' +
        str(S_rot) + ' J/mol/K\n' +
        '          vibration                      = ' + str(S_vib) +
        ' J/mol/K\n' +
        '          -------------------------------------------------------------\n'
        + '          total entropy                  = ' + str(S) +
        ' J/mol/K\n')

    print('FreeEner| free energy information:\n' +
          '          Gibbs free energy     = ' + str(G) + str(unituse) +
          '          Helmholtz free energy = ' + str(A) + str(unituse) +
          '          temperature           = ' + str(temp) + ' K\n' +
          '          pressure              = ' + str(pressure) + ' Pa')
示例#29
0
#Status: Working/Tested

import time
import calendar
from weather import WeatherClass
import distance

try:
    #python3
    from tkinter import *
except:
    #python2
    from Tkinter import *

time1 = time.localtime(time.time())
tTime = distance.dist()
'''Widget class'''


class Application(Frame):
    def createTime(self):  #Create Time widget
        self.Time = Label(self.top_right,
                          fg='white',
                          background='black',
                          font=self.labelfont)
        self.Time.pack(side='right')
        self.Time.config(borderwidth=0)
        self.Time.config(highlightthickness=0)

    def createCal(self):  #Create calendar widget
        self.Cal = Text(self.bottom_left,
示例#30
0
                dat = np.loadtxt(f)
            else:
                syns = cw.go(fname_list,
                             gain_pot,
                             gain_dep,
                             post_thresh_hi,
                             post_thresh_lo,
                             depr_len,
                             pot_len,
                             st=0.5)
                dat = np.zeros((len(syns), col))

                new_syns = [syn[0] for syn in syns]
                for i, syn in enumerate(new_syns):

                    dat[i, 0] = dst.dist(syn.fname)
                    dat[i, 1] = syn.weight[-1]
                    dat[i, 2] = max(syn.Ca)
                    dat[i, 3], dat[i, 4] = dst.duration(syn.Ca, syn.dt)

                np.savetxt(fnams[2 * k + j] + '.txt',
                           dat,
                           header="distance weight ca tltp tltd",
                           comments='')
                print(fnams[2 * k + j] + '.txt')
            if j % 2:  #Pre-Post
                ax[0].plot(dat[:, 0],
                           1000 * dat[:, 3],
                           shape[k],
                           color='k',
                           label=labels[k])
示例#31
0
def average_distance(tweet, klaster):
    suma = 0
    for tweet_itr in klaster:
        suma = suma + distance.dist(tweet, tweet_itr, type='cosine')
    return suma / (len(klaster) - 1) if len(klaster) != 1 else 0.01
示例#32
0
if __name__ == '__main__':
    with PhaseTimer('total'):
        s = loader.load()

        if conf.tstart is not None and conf.tend is not None:
            if conf.wnd is not None:
                # sliding window clustering
                tend = len(s.index) - conf.wnd + 1
                wndSize = conf.wnd
            else:
                # cluster entire loaded data
                tend = 1
                wndSize = len(s.index)
        else:
            # just cluster this single window
            tend = 1
            wndSize = conf.wnd

        assert tend >= 0, 'Loaded time shorter than window size'
        for t in tqdm(range(0, tend, conf.tstep)):
            p = s.iloc[slice(t, t + wndSize), :]
            print(t, p.index[0], '-', p.index[-1], ',', p.shape[0])
            assert p.shape[0] == wndSize

            dist = distance.dist(p)

            #print('clustering')
            multiDBSCAN(p, dist, p.index[0], True)

        if not conf.plotFile:
            plt.show()