def createMoreLists(dbFilename, regex=r'omni_\d+_classroom', prefix='omni/classroom'): print('Creating additional lists...') # open database rirDb = json.load(open(dbFilename)) rirs = sorted(list(rirDb.keys())) train = RirSet('train') test = RirSet('test') dev = RirSet('dev') train.load(ListDir) test.load(ListDir) dev.load(ListDir) # old, but maybe useful once I found a better way to separate hard and easy RIRs # print('Splitting train set into hard and easy RIRs according to length.') # TODO: use reverberation time (RT60) # easy = RirSet('train.easy') # hard = RirSet('train.hard') # for rir in rirs: # if rir in train: # if rirDb[rir]['length'] > 2.5: # hard.add(rir) # else: # easy.add(rir) # easy.save(ListDir) # hard.save(ListDir) print('Creating subsets with room impulse responses matching \'{}\' in {}'.format(regex, prefix)) mustMatch = re.compile(regex) subdir, filenamePrefix = os.path.split(prefix) subdir = os.path.join(ListDir, subdir) subTrain = RirSet(filenamePrefix + '.train') subTest = RirSet(filenamePrefix + '.test') subDev = RirSet(filenamePrefix + '.dev') for rir in rirs: if mustMatch.match(rir): if rir in train: subTrain.add(rir) elif rir in test: subTest.add(rir) else: assert rir in dev subDev.add(rir) util.createDirectory(subdir) subTrain.save(subdir) subTest.save(subdir) subDev.save(subdir)
def main(): if not util.checkIfFileExists("ERREUR/"): util.createDirectory("ERREUR") loginFailed = False # Login/Signup loop while True: accountData = receiveMessageFromClient() if accountData == -1: loginFailed = True break if accountData.get("command") == "login": if logIn(accountData.get("username"), accountData.get("password")): break elif accountData.get("command") == "signup": if createAccount(accountData.get("username"), accountData.get("password")): break # Main menu loop while True: if loginFailed: break commandData = receiveMessageFromClient() if commandData == -1: break if commandData.get("command") == "sendMail": sender = commandData.get("sender") recipient = commandData.get("recipient") subject = commandData.get("subject") body = commandData.get("body") sendMail(sender, recipient, subject, body) elif commandData.get("command") == "checkMails": username = commandData.get("username") checkMails(username) elif commandData.get("command") == "checkStats": username = commandData.get("username") sendStats(username) elif commandData.get("command") == "quit": break
def createMoreLists(dbFilename, regex=r"omni_\d+_classroom", prefix="omni/classroom"): print("Creating additional lists...") # open database rirDb = json.load(open(dbFilename)) rirs = sorted(list(rirDb.keys())) train = RirSet("train") test = RirSet("test") dev = RirSet("dev") train.load(ListDir) test.load(ListDir) dev.load(ListDir) # old, but maybe useful once I found a better way to separate hard and easy RIRs # print('Splitting train set into hard and easy RIRs according to length.') # TODO: use reverberation time (RT60) # easy = RirSet('train.easy') # hard = RirSet('train.hard') # for rir in rirs: # if rir in train: # if rirDb[rir]['length'] > 2.5: # hard.add(rir) # else: # easy.add(rir) # easy.save(ListDir) # hard.save(ListDir) print("Creating subsets with room impulse responses matching '{}' in {}".format(regex, prefix)) mustMatch = re.compile(regex) subdir, filenamePrefix = os.path.split(prefix) subdir = os.path.join(ListDir, subdir) subTrain = RirSet(filenamePrefix + ".train") subTest = RirSet(filenamePrefix + ".test") subDev = RirSet(filenamePrefix + ".dev") for rir in rirs: if mustMatch.match(rir): if rir in train: subTrain.add(rir) elif rir in test: subTest.add(rir) else: assert rir in dev subDev.add(rir) util.createDirectory(subdir) subTrain.save(subdir) subTest.save(subdir) subDev.save(subdir)
def main(dbFilename='db.json', deleteBefore=False, sources=[]): # open db if os.path.isfile(dbFilename) and not deleteBefore: rirDb = json.load(open(dbFilename)) else: rirDb = {} util.createDirectory(ImportDir, deleteBefore=deleteBefore) util.createDirectory(DownloadDir) def insertIntoDb(file, identifier, info): onlineDbId = info['source'].lower() id = '{}_{}'.format(onlineDbId, identifier) if id in rirDb: return False info['id'] = id info['filename'] = os.path.join(ImportDir, id + '.wav') rirDb[id] = info # copy file (or write as wav file) if isinstance(file, str): shutil.copyfile(file, info['filename']) else: assert len(file) == 2 x, fs = file sf.write(info['filename'], x, fs) return True if 'ace' in sources: Ace.importRirs(DownloadDir, insertIntoDb) if 'air' in sources: Air.importRirs(DownloadDir, insertIntoDb) if 'mardy' in sources: Mardy.importRirs(DownloadDir, insertIntoDb) if 'omni' in sources: Omni.importRirs(DownloadDir, insertIntoDb) if 'rwcp' in sources: Rwcp.importRirs(DownloadDir, insertIntoDb) # more sources could be found here: http://www.dreams-itn.eu/index.php/dissemination/science-blogs/24-rir-databases # save db with open(dbFilename, 'w') as dbFile: json.dump(rirDb, dbFile, sort_keys=True, indent=4) print('Database size: {}'.format(len(rirDb)))
def createLists(dbFilename): print("Splitting RIRs into sets...") sets = [RirSet("train", 0.8), RirSet("test", 0.1), RirSet("dev", 0.1)] # open database rirDb = json.load(open(dbFilename)) rirs = sorted(list(rirDb.keys())) # to distribute the RIRs to the set we could to a shuffle, but as they are in alphabetical order and just going over them guaranties that we distribute the different conditions (mostly) equally on the different sets sets[0].add(rirs[0]) for i in range(1, len(rirs)): si = np.argmin([s.missing(i) for s in sets]) sets[si].add(rirs[i]) # safe set files util.createDirectory(ListDir) for s in sets: s.save(ListDir)
def main(dbFilename, targetFs, force=False): util.createDirectory(NormalizeDir) rirDb = json.load(open(dbFilename)) bar = util.ConsoleProgressBar() bar.start('Normalize RIRs') i = 0 for rirId, rir in rirDb.items(): targetFilename = os.path.join(NormalizeDir, rir['id'] + '.wav') if not force: if rir['filename'] == targetFilename and \ rir['fs'] == targetFs and \ targetFilename: continue x, fs_x = sf.read(os.path.join(ImportDir, rir['id'] + '.wav'), dtype='float32') y, fs_y = x, fs_x if fs_y != targetFs: y = resample(y, targetFs / fs_y, 'sinc_best') fs_y = targetFs rir['length_org'] = len(y) / fs_y y = util.trimSilence(y, 0.001, trimRight=False) y = util.normalizeAmplitude(y) sf.write(targetFilename, y, fs_y) rir['filename'] = targetFilename rir['fs'] = fs_y rir['length'] = len(y) / fs_y i += 1 bar.progress(i / len(rirDb)) bar.end() with open(dbFilename, 'w') as dbFile: json.dump(rirDb, dbFile, sort_keys=True, indent=4)
def main(dbFilename, targetFs, force=False): util.createDirectory(NormalizeDir) rirDb = json.load(open(dbFilename)) bar = util.ConsoleProgressBar() bar.start('Normalize RIRs') i = 0 for rirId, rir in rirDb.items(): targetFilename = join(NormalizeDir, rir['id'] + '.wav') if not force: if rir['filename'] == targetFilename and \ rir['fs'] == targetFs and \ targetFilename: continue x, fs_x = sf.read(join(ImportDir, rir['id'] + '.wav'), dtype='float32') y, fs_y = x, fs_x if fs_y != targetFs: y = resample(y, targetFs / fs_y, 'sinc_best') fs_y = targetFs rir['length_org'] = len(y) / fs_y y = util.trimSilence(y, 0.001, trimRight=False) y = util.normalizeAmplitude(y) sf.write(targetFilename, y, fs_y) rir['filename'] = targetFilename rir['fs'] = fs_y rir['length'] = len(y) / fs_y i += 1 bar.progress(i / len(rirDb)) bar.end() with open(dbFilename, 'w') as dbFile: json.dump(rirDb, dbFile, sort_keys=True, indent=4)
def createLists(dbFilename): print('Splitting RIRs into sets...') sets = [ RirSet('train', 0.8), RirSet('test', 0.1), RirSet('dev', 0.1), ] # open database rirDb = json.load(open(dbFilename)) rirs = sorted(list(rirDb.keys())) # to distribute the RIRs to the set we could to a shuffle, but as they are in alphabetical order and just going over them guaranties that we distribute the different conditions (mostly) equally on the different sets sets[0].add(rirs[0]) for i in range(1, len(rirs)): si = np.argmin([s.missing(i) for s in sets]) sets[si].add(rirs[i]) # safe set files util.createDirectory(ListDir) for s in sets: s.save(ListDir)
def createUserDirectory(username): util.createDirectory(username)
def saveGraphByType(cm, index, fig, dst, typee): dst = dst + 'byType\\' + typee util.createDirectory(dst) fileName = str(index) + '_' + cm[0] + '_' + cm[1] + '.png' picDst = dst + '\\' + fileName fig.savefig(picDst)
def saveGraphByMethod(cm, index, fig, dst): dst = dst + 'byMethod\\' + cm[1] util.createDirectory(dst) fileName = str(index) + '_' + cm[0] + '_' + cm[1] + '.png' picDst = dst + '\\' + fileName fig.savefig(picDst)
def saveGraph(cm, index, fig, dst): dst = dst + 'All\\' util.createDirectory(dst) fileName = str(index) + '_' + cm[0] + '_' + cm[1] + '.png' picDst = dst + fileName fig.savefig(picDst)
import graph as g import util as util import catagorize as cat import time, csv import minimumGas as mg start = time.time() data = util.readFile( 'cmStatus_src\\result\\contractMethodTxStatusRange_Filter.csv') #data = util.filterOogRate(data,30) print("Data Loaded") dst = 'cmPic2' util.createDirectory(dst) dst += '\\Step' util.createDirectory(dst) util.createDirectory(dst + '\\byMethod') util.createDirectory(dst + '\\byType') csv_write = open('cmPic2\\minimumGas.csv', 'w+', newline='') csv_writer = csv.writer(csv_write, delimiter=',') #csv_writer.writerow(['No', 'Contract', 'Method', 'Type', 'Minimum Gas', 'Recommend Gas']) index = 0 for cm in data: _ = g.plotAll(cm, data[cm], index, dst + '\\') result = mg.calMinimumGas(cm, data[cm]) #csv_writer.writerow([index, cm[0], cm[1], result[1], result[0], ]) index += 1 if (index % 20 == 0):