Beispiel #1
0
def multiChorusSections(intvs, dur):
    """avoid intersection of tuned intervals"""

    def key(x):
        # timestamp with precision of 0.01s
        return int(x * 100)

    # value 1=chorus begin -1=chorus end
    boundaries = defaultdict(int)
    for intv in intvs:
        boundaries[key(intv[0])] += 1
        boundaries[key(intv[1])] -= 1
    intervals, labels = [[0, 0]], [CLF_NON_TARGET_LABEL]
    state = 0  # 0:others >0:chorus
    for bdr in sorted(boundaries.keys()):
        t = bdr / 100.0
        intervals[-1][1] = t
        intervals.append([t, 0])
        state += boundaries[bdr]
        if state == 0:
            labels.append(CLF_NON_TARGET_LABEL)
        elif state > 0:
            labels.append(CLF_TARGET_LABEL)
        else:
            logger.error(f"invalid state, boundaries={boundaries}")
    intervals[-1][1] = dur
    mirexFmt = (np.array(intervals), np.array(labels, dtype="U16"))
    logger.debug(f"multi chorus sections, output=\n{mirexLines(mirexFmt)}")
    return mergeIntervals(mirexFmt)
 def loadFeature(self, i):
     # <ddir>/<orig_name>-<id>.pkl
     pklPath = self.getPklPath(i)
     try:
         with open(pklPath, "rb") as f:
             feature = pickle.load(f)
         return feature
     except FileNotFoundError as e:
         logger.error(
             f'file "{pklPath}" not found, build the dataset first.')
         raise e
 def __call__(self):
     try:
         with Pool(self.num_workers) as p:
             N = len(self.dataset)
             metrics = list(tqdm(p.imap(self.eval, range(N)), total=N))
     except RuntimeError as e:
         # CUDA RuntimeError
         metrics = [self.eval(i) for i in tqdm(range(N))]
         logger.error(f"[RuntimeError] ", e)
     titles = [sample["title"] for sample in self.dataset]
     return np.array(metrics), titles
 def getResult(self, algoName, titles=None):
     try:
         idx = self.algoNames.index(algoName)
     except ValueError:
         logger.error(f"{algoName} results not found in {self.datasetName}")
         return None
     metrics = self.metricsList[idx]
     _titles = self.titlesList[idx]
     titles = _titles if titles is None else titles
     res = [metrics[_titles.index(title)] for title in titles]
     return np.array(res), titles
 def loadData(self, dataFile):
     if os.path.exists(dataFile):
         with open(dataFile, "rb") as f:
             X, y = pickle.load(f)
             logger.info(
                 f"<{self.__class__.__name__}> load data from '{dataFile}'")
             logger.info(
                 f"target(chorus)/total={sum(np.array(y)==CLF_TARGET_LABEL)}/{len(y)}"
             )
     else:
         logger.error(f"build dataset for classifier first")
         raise FileNotFoundError(dataFile)
     return X, y