Exemplo n.º 1
0
 def __init__(self, data=None):
     self.data=data # optional property
     self.wc=0 # count number of occurences of this char: useful for generating suggestions
     self.word_end = False # to mark a node as end of a word
     # if word_end is a boolean then trie can store only unique words
     # initializing with 0 and incrementing on every duplicate word will make this trie compatible with duplicates
     self.childs = dic(lambda:None) # map for child nodes
def BinTreeBuilder(n):
    nodes = (2**(n - 1)) - 1
    BT = dic(list)
    for i in range(1, nodes + 1):
        #add the connection to the graph in both directions so it is an undirected graph
        BT[i].append(i * 2)
        BT[i * 2].append(i)
        BT[i].append((i * 2) + 1)
        BT[(i * 2) + 1].append(i)
    return BT
Exemplo n.º 3
0
def judge_753(s):
    judge = 1
    flag = dic(int)
    for i in range(len(s)):
        if s[i] != '7' and s[i] != '5' and s[i] != '3':
            judge = 0
            break
        else:
            flag[int(s[i])] = 1

    if flag[3] != 1 or flag[5] != 1 or flag[7] != 1:
        judge = 0
    return judge
def RandGraphBuilder(n):
    #create a 2d list to keep track of vertices already visited
    Rvisited = [0] * n
    for i in range(n):
        Rvisited[i] = [0] * n
    #G is the dictionary that is the adjacency list
    G = dic(list)
    for i in range(0, n):
        for j in range(0, n):
            if not Rvisited[i][j] and not Rvisited[j][i]:
                Rvisited[i][j] = 1
                Rvisited[j][i] = 1
                e = random.randint(0, 1)
                if e:
                    G[i].append(j)
                    G[j].append(i)
    return G
Exemplo n.º 5
0
'''
@Author: rishi
https://cses.fi/problemset/task/1666
'''

from collections import defaultdict as dic

cities, n = map(int, input().split())
roads = []
for i in range(n):
    roads.append(list(map(int, input().split())))

# print(roads)
# cities = 10
# roads = format_2d_int_list("2 5 5 6 1 4 6 8 2 6 3 6 1 10 8 9 2 3 5 8".replace(" ",","), 10, 2)
d = dic(list)
for i, j in roads:
    d[i].append(j)
    d[j].append(i)

visited = {}
for i in range(1, cities + 1):
    visited[i] = False

ans = []
for i in range(1, cities + 1):
    temp = []
    if not visited[i]:
        q = [i]
        visited[i] = True
        while q:
def getCorrelationData(allAtomSurvivalData, repetitionsPerVariation):
    from collections import OrderedDict as dic
    import numpy as np
    import math
    atomNum = allAtomSurvivalData.shape[0]
    repNum = allAtomSurvivalData.shape[1]
    correlationErrors = dic()
    correlationAverages = dic()
    correlationErrors['Key List'] = ''
    correlationAverages['Key List'] = ''
    # initialize the average dicts
    for atomsLoadedInc in range(1, atomNum + 1):
        for atomSurvivedInc in range(0, atomNum):
            name = 'Load ' + str(atomsLoadedInc) + ', atom ' + str(atomSurvivedInc) + ' survived'
            correlationErrors[name] = []
            correlationAverages[name] = []
    # data that doesn't discriminate between locations.
    for atomsLoadedInc in range(1, atomNum + 1):
        # + 1 because all atoms could survive.
        for atomSurvivedInc in range(0, atomsLoadedInc + 1):
            name = 'Load ' + str(atomsLoadedInc) + ', ' + str(atomSurvivedInc) + ' atoms survived'
            correlationErrors[name] = []
            correlationAverages[name] = []
    # holds the averages over wells
    for atomsLoadedInc in range(1, atomNum + 1):
        name = 'Load ' + str(atomsLoadedInc) + ', average single atom survival'
        correlationErrors[name] = []
        correlationAverages[name] = []
    # holds the average over all wells and loading scenarios.
    name = 'Total average single atom survival'
    correlationErrors[name] = []
    correlationAverages[name] = []
    # Start sorting data.
    for variationInc in range(0, int(repNum / repetitionsPerVariation)):
        totalNumberLoadedList = []
        totalNumberSurvivedList = []
        for repInc in range(0, repetitionsPerVariation):
            totalAtomsLoaded = 0
            totalAtomsSurvived = 0
            holeFlag = False
            for atomInc in range(0, atomNum):
                if allAtomSurvivalData[atomInc][variationInc * repetitionsPerVariation + repInc] == 0:
                    if holeFlag:
                        totalAtomsSurvived = math.nan
                        totalAtomsLoaded = math.nan
                        break
                    totalAtomsLoaded += 1
                elif allAtomSurvivalData[atomInc][variationInc * repetitionsPerVariation + repInc] == 1:
                    if holeFlag:
                        totalAtomsSurvived = math.nan
                        totalAtomsLoaded = math.nan
                        break
                    totalAtomsSurvived += 1
                    totalAtomsLoaded += 1
                else:
                    # no atom loaded here.
                    if totalAtomsLoaded > 0:
                        holeFlag = True
            totalNumberLoadedList = np.append(totalNumberLoadedList, totalAtomsLoaded)
            totalNumberSurvivedList = np.append(totalNumberSurvivedList, totalAtomsSurvived)
        # initialize entries in temporary dictionary. Sanity, mostly, probably a way around this.
        tempCorrelationData = dic()
        for atomsLoadedInc in range(1, atomNum + 1):
            for atomSurvivedInc in range(0, atomNum):
                name = 'Load ' + str(atomsLoadedInc) + ', atom ' + str(atomSurvivedInc) + ' survived'
                tempCorrelationData[name] = []
        # data that doesn't discriminate between locations.
        for atomsLoadedInc in range(1, atomNum + 1):
            # + 1 because all atoms could survive.
            for atomSurvivedInc in range(0, atomsLoadedInc + 1):
                name = 'Load ' + str(atomsLoadedInc) + ', ' + str(atomSurvivedInc) + ' atoms survived'
                tempCorrelationData[name] = []
        # holds the averages over wells
        for atomsLoadedInc in range(1, atomNum + 1):
            name = 'Load ' + str(atomsLoadedInc) + ', average single atom survival'
            tempCorrelationData[name] = []
        # holds the average over all wells and loading scenarios.
        tempCorrelationData['Total average single atom survival'] = []
        # get data for specific particles surviving.
        for atomInc in range(0, atomNum):
            for repInc in range(0, repetitionsPerVariation):
                if math.isnan(totalNumberLoadedList[repInc]) or totalNumberLoadedList[repInc] == 0:
                    # no atoms loaded or hole so throw the data out.
                    continue
                name = 'Load ' + str(int(totalNumberLoadedList[repInc])) \
                       + ', atom ' + str(atomInc) + ' survived'
                name2 = 'Load ' + str(int(totalNumberLoadedList[repInc])) + ', average single atom survival'
                # make sure *THIS* atom was loaded originally.
                if allAtomSurvivalData[atomInc][variationInc * repetitionsPerVariation + repInc] != -1:
                    value = allAtomSurvivalData[atomInc][variationInc * repetitionsPerVariation + repInc]
                    tempCorrelationData[name] = np.append(tempCorrelationData[name], value)
                    tempCorrelationData[name2] = np.append(tempCorrelationData[name2], value)
                    tempCorrelationData['Total average single atom survival'] \
                        = np.append(tempCorrelationData['Total average single atom survival'], value)
        # get indescriminatory data.
        for repInc in range(0, repetitionsPerVariation):
            if math.isnan(totalNumberLoadedList[repInc]) or int(totalNumberLoadedList[repInc]) == 0:
                # throw away holes
                continue
            for atomsSurvivedInc in range(0, int(totalNumberLoadedList[repInc]) + 1):
                name = 'Load ' + str(int(totalNumberLoadedList[repInc])) + ', ' + str(atomsSurvivedInc) \
                       + ' atoms survived'
                if totalNumberSurvivedList[repInc] == atomsSurvivedInc:
                    tempCorrelationData[name] = np.append(tempCorrelationData[name], 1)
                else:
                    tempCorrelationData[name] = np.append(tempCorrelationData[name], 0)
        # calculate averages
        for atomsLoadedInc in range(1, atomNum + 1):
            for atomSurvivedInc in range(0, atomNum):
                name = 'Load ' + str(atomsLoadedInc) + ', atom ' + str(atomSurvivedInc) + ' survived'
                correlationAverages[name] = np.append(correlationAverages[name], np.mean(tempCorrelationData[name]))
                correlationErrors[name] = np.append(correlationErrors[name], np.std(tempCorrelationData[name])
                                                    / np.sqrt(len(tempCorrelationData[name])))
            name2 = 'Load ' + str(atomsLoadedInc) + ', average single atom survival'

            correlationAverages[name2] = np.append(correlationAverages[name2], np.mean(tempCorrelationData[name2]))
            correlationErrors[name2] = np.append(correlationErrors[name2],
                                                 np.std(tempCorrelationData[name2])
                                                 / np.sqrt(len(tempCorrelationData[name2])))
        totalName = 'Total average single atom survival'
        correlationAverages[totalName] = np.append(correlationAverages[totalName],
                                                   np.mean(tempCorrelationData[totalName]))
        correlationErrors[totalName] = np.append(correlationErrors[totalName],
                                                   np.std(tempCorrelationData[totalName])
                                                 / np.sqrt(len(tempCorrelationData[totalName])))
        # data that doesn't discriminate between locations.
        for atomsLoadedInc in range(1, atomNum + 1):
            # + 1 because all atoms could survive.
            for atomSurvivedInc in range(0, atomsLoadedInc + 1):
                name = 'Load ' + str(atomsLoadedInc) + ', ' + str(atomSurvivedInc) + ' atoms survived'
                correlationAverages[name] = np.append(correlationAverages[name], np.mean(tempCorrelationData[name]))
                correlationErrors[name] = np.append(correlationErrors[name], np.std(tempCorrelationData[name])
                                                    / np.sqrt(len(tempCorrelationData[name])))
    correlationErrors['Key List'] = list(correlationErrors.keys())
    correlationAverages['Key List'] = list(correlationAverages.keys())
    return correlationAverages, correlationErrors
Exemplo n.º 7
0
import pandas
import math
import numpy as np
from collections import OrderedDict as dic

from django.contrib.auth.hashers import make_password
from .models import Noeud, Personne, Specialite, Document, Ecrit, EmpSpe, NoeudSpe

EXCEL_FILE = "delire/data.xlsx"

SHEETS = dic([ ("PersonneClient", Personne), ("Noeud", Noeud), ("PersonneAPHP", Personne),
		   ("Spécialité", Specialite), ("Document", Document), ("Ecrit", Ecrit),
		   ("EmpSpe", EmpSpe), ("NoeudSpe", NoeudSpe) ])

def getDF(file, sheet):
	return pandas.read_excel(file, sheet_name=sheet)
	
def treatRow(row, labels, table, vars):
	kwargs = {}
	for y in labels:
		data = row.loc[row.index[0], y]
		
		if type(data) == float or type(data) == np.float64:

			if math.isnan(data):
				continue
				
		if data in vars:  # Variables en maj dans tableau
			data = vars[data]
		
		kwargs[y] = data
Exemplo n.º 8
0
from collections import defaultdict as dic
s = int(input())

flag = dic(int)
flag[s - 1] = 1


def f(n, i):
    if flag[n - 1] == 1:
        print(i)
        exit()
    else:
        flag[n - 1] = 1
        if n % 2 == 1:
            f(3 * n + 1, i + 1)
        else:
            f(int(n / 2), i + 1)


if s % 2 == 1:
    f(3 * s + 1, 2)
else:
    f(int(s / 2), 2)
Exemplo n.º 9
0
from collections import defaultdict as dic
import speech_recognition as sr

h=dic(int)

from monkeylearn import MonkeyLearn
print(" \n \nSPEAK ANYTHINK AND SAY EXIT TO GET OUT OF LOOP\n \n")

input_value="go"

while (input_value!="exit" ):
   ml = MonkeyLearn('4bacc60b35272eb0a59ebc4c4809addf754dfa24')
   data = []
   # input_value=input()
   r = sr.Recognizer()
   with sr.Microphone() as source:
       audio = r.listen(source)
       # audio=str(audio)
   try :
       print(r.recognize_google(audio),"\n")
   except Exception as e:
       print("speak again please \n")
       continue
   # print(r.recognize_google(audio))
   input_value=r.recognize_google(audio)
   data.append(input_value)
   model_id = 'cl_pi3C7JiL'
   result = ml.classifiers.classify(model_id, data)
   part1=result.body[0]['classifications'][0]['tag_name']
   part2=result.body[0]['classifications'][0]['confidence']*100
   print(part1,part2)
Exemplo n.º 10
0
def atomAnalysis(date, runNumber, analysisLocations, picturesPerExperiment, repetitions):
    """
    :param date:
    :param runNumber:
    :param analysisLocations:
    :param picturesPerExperiment:
    :param repetitions:
    :return:
    """
    import numpy as np
    from astropy.io import fits
    from collections import OrderedDict as dic
    from dataAnalysisFunctions import (normalizeData, binData, guessGaussianPeaks, fitDoubleGaussian,
                                       calculateAtomThreshold, getAtomData)
    baseData = dic()
    baseData['Dictionary Key'] = ''
    baseData['Date'] = date
    baseData['Run Number'] = runNumber
    baseData['Warnings'] = ''
    baseData['Repetitions'] = repetitions
    baseData['Pictures Per Repetition'] = picturesPerExperiment
    # Save niawg info
    niawgLogLocation = '//andor/share/Data and documents/Data repository/NIAWG Logging Files/Individual Experiments/'
    with open(niawgLogLocation + 'Horizontal Script.txt') as horizontalScriptFile:
        baseData['Horizontal NIAWG Script'] = horizontalScriptFile.read()
    with open(niawgLogLocation + 'Vertical Script.txt') as verticalScriptFile:
        baseData['Vertical NIAWG Script'] = verticalScriptFile.read()
    with open(niawgLogLocation + 'Intensity Script.txt') as intensityScriptFile:
        baseData['Intensity Script'] = intensityScriptFile.read()
    with open(niawgLogLocation + "Parameters.txt") as niawgParametersFile:
        baseData['NIAWG Parameters'] = niawgParametersFile.read()
    # paths for files
    dataRepositoryPath = "C:\\Users\\Mark\\Documents\\Quantum Gas Assembly Control\\Data\\Camera Data\\"
    # dataRepositoryPath = "\\\\andor\\share\\Data and documents\\Data repository\\"
    todaysDataPath = dataRepositoryPath + date + "\\Raw Data\\data_" + str(runNumber) + ".fits"
    keyPath = dataRepositoryPath + date + "\\Raw Data\\key_" + str(runNumber) + ".txt"
    # Load Key
    baseData['Key'] = np.array([])
    with open(keyPath) as keyFile:
        for line in keyFile:
            baseData['Key'] = np.append(baseData['Key'], float(line.strip('\n')))
    # Load Fits File & Get Dimensions
    baseData['Intensity Script'] = todaysDataPath

    # Get the array from the fits file. That's all I care about.
    fitsInfo = fits.open(todaysDataPath, "append")
    rawData = fitsInfo[0].data
    baseData['NIAWG Parameters'] = str(rawData)
    fitsInfo.close()
    # ##########################################################################
    #
    #       Loop for each atom to analyze
    #
    numberAtomsToAnalyze = np.array(analysisLocations).shape[0]
    atomLocationList = '#'
    baseData['Analysis Location List'] = ''
    for atomInc in range(0, int(numberAtomsToAnalyze / 2)):
        print('Analyzing atom #' + str(atomInc))
        tempData = dic()
        tempData['Dictionary Key'] = ''
        tempData['Atom Location'] = np.array([analysisLocations[2 * atomInc], analysisLocations[2 * atomInc + 1]])
        # Loop through each picture
        for picInc in range(picturesPerExperiment):
            picStr = 'Pic ' + str(picInc + 1)
            # ### Figure out the threshold
            tempData[picStr + ' Camera Signal'] = normalizeData(rawData, tempData['Atom Location'],
                                                                picInc, picturesPerExperiment)
            # Get Binned Data
            binCenters, binnedData = binData(5, tempData[picStr + ' Camera Signal'])
            # Make educated Guesses for Peaks
            guess1, guess2 = guessGaussianPeaks(binCenters, binnedData)
            # Calculate Atom Threshold
            # define the fitting function
            guess = np.array([max(binnedData), guess1, 30, max(binnedData), guess2, 30])
            try:
                gaussianFitVals = fitDoubleGaussian(binCenters, binnedData, guess)
                tempData[picStr + ' Threshold'], \
                    tempData[picStr + ' Threshold Fidelity'] = calculateAtomThreshold(gaussianFitVals)
            except RuntimeError:
                # fit failed, no atoms.
                # Assume no atoms.
                tempData[picStr + ' Threshold'] = max(tempData[picStr + ' Camera Signal']) + 1
                tempData[picStr + ' Threshold Fidelity'] = 0
            # Get Data in final form for exporting
            tempData[picStr + ' Atom Data'] = getAtomData(tempData[picStr + ' Camera Signal'],
                                                          tempData[picStr + ' Threshold'])
            if tempData[picStr + ' Threshold Fidelity'] < 0.95:
                baseData['Warnings'] += 'Bad Fit Fidelity for Picture #' + str(picInc) + '; '
        tempData['Dictionary Key'] = list(tempData.keys())
        atomLocationList += str(analysisLocations[2 * atomInc]) + ", " + str(analysisLocations[2 * atomInc + 1])
        baseData[str(analysisLocations[2 * atomInc]) + ", " + str(analysisLocations[2 * atomInc + 1])] = tempData
        atomLocationList += '#'
    baseData['Analysis Location List'] = atomLocationList
    print('Prepping Data')
    outputName = dataRepositoryPath + baseData['Date'] + "\\" + "run_" + str(baseData['Run Number']) + "_data.csv"
    baseData['Dictionary Key'] = list(baseData.keys())
    csvText = ''
    for keyHeader, value in baseData.items():
        if isinstance(value, str):
            # don't iterate through the string, just add it.
            csvText += '\n:X:' + keyHeader + ':X: ' + str(value)
            continue
        if isinstance(value, dict):
            # iterate through that! Assume no nested dictionaries.
            csvText += '\n:X:[' + keyHeader + ']:X:'
            for subHeader, subValue in value.items():
                if subHeader == "Raw Data":
                    # want to put this on last.
                    continue
                if isinstance(subValue, str):
                    # don't iterate through the string, just add it.
                    csvText += '\n\t;' + subHeader + '; ' + str(subValue)
                    continue
                try:
                    csvText += '\n\t;' + subHeader + '; ' + ", ".join(str(x) for x in subValue)
                except TypeError:
                    # catch integers.
                    csvText += '\n\t;' + subHeader + '; ' + str(subValue)
            continue
        try:
            csvText += '\n:X:' + keyHeader + ':X: ' + ", ".join(str(x) for x in value)
        except TypeError:
            # catch integers.
            csvText += '\n:X:' + keyHeader + ':X: ' + str(value)
    print("Writing Data to " + str(outputName) + "...")
    with open(outputName, "w") as record_file:
        record_file.write(csvText)
    print('Complete!')
    return "Finished"
Exemplo n.º 11
0
from collections import defaultdict as dic

n = int(input())

v = list(map(int, input().split()))
dict1 = dic(int)
dict2 = dic(int)

for i in range(n):
    if i % 2 == 1:
        dict1[v[i]] += 1
    else:
        dict2[v[i]] += 1


def search_second_larger(dict):
    if max(dict) == min(dict):
        return
    largest = max(dict.values())
    ret = min(dict.values())
    for k, v in dict.items():
        if v > ret and v <= largest and k != max(dict):
            ret = v
    return ret


max_1 = max(dict1.values())
second_larger_1 = search_second_larger(dict1)
max_2 = max(dict2.values())
second_larger_2 = search_second_larger(dict2)