Ejemplo n.º 1
0
 async def info(self, ctx, *, member: converters.I_MemberConverter = None):
     """Get information on a member."""
     if member is None:
         member = ctx.author
     embed = util.Embed()
     if member.nick:
         embed.set_author(name=f"{member.display_name} ({member})",
                          icon_url=member.avatar_url)
     else:
         embed.set_author(name=str(member), icon_url=member.avatar_url)
     embed.description = "(" + member.mention + ")"
     # Example: "Sun, 28 Mar 2021 18:54:22 UTC"
     embed.add_field(name="ID", value=member.id)
     embed.add_field(name="Joined", value=util.timestamp(member.joined_at))
     # ^ member.joined_at can be None but this is so uncommon I'm ignoring.
     #   if it causes problems I'll add an exception.
     embed.add_field(name="Account Created",
                     value=util.timestamp(member.created_at))
     if member.bot:
         embed.add_field(
             name="Bot Invite Link",
             value="[Invite Bot]("  # more below
             f"{discord.utils.oauth_url(member.id, scopes=('bot', 'applications.commands'))})"
         )
     await ctx.send(embed=embed)
Ejemplo n.º 2
0
class Backup(object):

    time_stamp = timestamp()

    def __init__(self, data, name="data", root_folder=path.expanduser("~/Desktop"), label=""):

        self.data = data
        self.folder = "{}/HC_{}_{}".format(root_folder, label, self.time_stamp)
        self.file_name = "{}/HC_{}_{}_{}.p".format(self.folder, name, label, self.time_stamp)

        self.run()

    def create_folders(self):

        if not path.exists(self.folder):
            makedirs(self.folder)

    def save_data(self):

        with open(self.file_name, "wb") as f:
            pickle.dump(self.data, f)

    def run(self):

        self.create_folders()
        self.save_data()
Ejemplo n.º 3
0
def get_crash_log(time_list):
    log_file = PATH("%s/crash_log/%s.txt" % (os.getcwd(), utils.timestamp()))
    f = open(log_file, "w")
    for time in time_list:
        cash_log = utils.shell("dumpsys dropbox --print %s" %
                               time).stdout.read()
        f.write(cash_log)
    f.close()
Ejemplo n.º 4
0
def screenshot():
    path = PATH("%s/screenshot" % os.getcwd())
    utils.shell("screencap -p /data/local/tmp/tmp.png").wait()
    if not os.path.isdir(path):
        os.makedirs(path)

    utils.adb("pull /data/local/tmp/tmp.png %s" %
              PATH("%s/%s.png" % (path, utils.timestamp()))).wait()
    utils.shell("rm /data/local/tmp/tmp.png")
def line_chart():
    data = top()
    cpu_data = []
    mem_data = []

    #去掉cpu占用率中的百分号,并转换为int型
    for cpu in data[0]:
        cpu_data.append(string.atoi(cpu.split("%")[0]))

    #去掉内存占用中的单位K,并转换为int型,以M为单位
    for mem in data[1]:
        mem_data.append(string.atof(mem.split("K")[0])/1024)

    #横坐标
    labels = []
    for i in range(1, times + 1):
        labels.append(str(i))

    #自动设置图表区域宽度
    if times <= 50:
        xArea = times * 40
    elif 50 < times <= 90:
        xArea = times * 20
    else:
        xArea = 1800

    c = XYChart(xArea, 800, 0xCCEEFF, 0x000000, 1)
    c.setPlotArea(60, 100, xArea - 100, 650)
    c.addLegend(50, 30, 0, "arialbd.ttf", 15).setBackground(Transparent)

    c.addTitle("cpu and memery info(%s)" %pkg_name, "timesbi.ttf", 15).setBackground(0xCCEEFF, 0x000000, glassEffect())
    c.yAxis().setTitle("The numerical", "arialbd.ttf", 12)
    c.xAxis().setTitle("Times", "arialbd.ttf", 12)

    c.xAxis().setLabels(labels)

    #自动设置X轴步长
    if times <= 50:
        step = 1
    else:
        step = times / 50 + 1

    c.xAxis().setLabelStep(step)

    layer = c.addLineLayer()
    layer.setLineWidth(2)
    layer.addDataSet(cpu_data, 0xff0000, "cpu(%)")
    layer.addDataSet(mem_data, 0x008800, "mem(M)")

    path = PATH("%s/chart" %os.getcwd())
    if not os.path.isdir(path):
        os.makedirs(path)

    #图片保存至脚本当前目录的chart目录下
    c.makeChart(PATH("%s/%s.png" % (path, utils.timestamp())))
Ejemplo n.º 6
0
def generate_sample_data(size, filter, query):
    logging.info('Getting a sample of items')
    sample = get_sample(size, filter, query)
    return {
        dfk.SAMPLE_TIMESTAMP: timestamp(),
        dfk.SAMPLE_FILTER: filter,
        dfk.SAMPLE_QUERY: query,
        dfk.SAMPLE_SIZE: size,
        dfk.SAMPLE_DOIS: [s[dfk.CR_ITEM_DOI].lower() for s in sample],
        dfk.SAMPLE_SAMPLE: sample
    }
Ejemplo n.º 7
0
def get_post_creation_hour(post):
    """
    Calculates the creation hour of the post
    :param post: the current post
    :return: returns the calculated hour
    """
    post_timestamp = datetime.strptime(utils.timestamp(post),
                                       '%a %b %d %H:%M:%S %z %Y')
    timestamp_hour = post_timestamp.time().hour
    for i in range(0, 24):
        if i <= timestamp_hour < i + 1:
            hour = i + 1
            return hour
Ejemplo n.º 8
0
def write_csv(*list):
    path = PATH("{}/fps_data".format(os.getcwd()))
    if not os.path.isdir(path):
        os.makedirs(path)
    f = open(PATH("%s/fps-%s.csv" % (path, utils.timestamp())), "w")
    times = list[0]
    fps = list[1]
    jankniess = list[2]

    for i in xrange(0, len(fps) - 1):
        f.write("{0},{1},{2},\n".format(str(times[i]), str(fps[i]),
                                        str(jankniess[i])))

    f.close()
Ejemplo n.º 9
0
def record():
    utils.shell("screenrecord /data/local/tmp/video.mp4")
    input_key = raw_input("Please press the Enter key to stop recording:\n")
    if input_key == "":
        utils.adb("kill-server")

    print "Get Video file..."
    utils.adb("start-server")
    time.sleep(1.5)

    path = PATH("%s/video" % os.getcwd())
    if not os.path.isdir(path):
        os.makedirs(path)

    utils.adb("pull /data/local/tmp/video.mp4 %s" %
              PATH("%s/%s.mp4" % (path, utils.timestamp()))).wait()
Ejemplo n.º 10
0
    def save(self):
        os.makedirs(self.pickle_folder, exist_ok=True)
        os.makedirs(self.json_folder, exist_ok=True)

        file_name = "{}".format(utils.timestamp())

        # Save a summary of parameters in json
        with open("{}/{}.json".format(self.json_folder, file_name), "w") as f:

            param = self.parameters.dict()
            param.update({"name": file_name})

            json.dump(param, f, indent=2)

        with open("{}/{}.p".format(self.pickle_folder, file_name), "wb") as f:
            pickle.dump(self, f)

        return file_name
Ejemplo n.º 11
0
def determine_threshold_on_labeled_patients(dataset_pc, model, options, epoch='last', description=None):
    # Create eval folder
    eval_dir = os.path.join(
        options['train']['samplesDir'],
        model.network.__name__,
        model.model_dir,
        'eval-' + str(epoch) + '-' + str(utils.timestamp()).replace(":", "-")
    )
    if description is not None:
        eval_dir += "-" + str(description)
    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)

    sample_dir = os.path.join(eval_dir, 'samples_val_PC')
    if not os.path.exists(sample_dir):
        os.makedirs(sample_dir)

    if not isinstance(dataset_pc, list):
        dataset_pc = [dataset_pc]

    eval_pc_val = None
    patients_pc_val = None
    for i, ds in enumerate(dataset_pc):
        if i == 0:
            eval_pc_val, patients_pc_val = _evaluate(ds, model, sample_dir, options, split="VAL")
        else:
            _eval_pc_val, _patients_pc_val = _evaluate(ds, model, sample_dir, options, split="VAL")
            eval_pc_val = merge_eval_dictionaries(eval_pc_val, _eval_pc_val)
            patients_pc_val += [_patients_pc_val]

    print("Computing DICE curve for Lesion Validation samples")
    eval_pc_val['bestDiceScore'], eval_pc_val['bestThreshold'] = Metrics.compute_dice_curve_recursive(
        eval_pc_val['diffs'].flatten(),
        eval_pc_val['labelmaps'].flatten(),
        plottitle="DICE vs Thresholds Curve for Lesion Testing Validation Samples",
        filename=os.path.join(eval_dir, 'dicePC_VAL.png'),
        granularity=10
    )
    return eval_pc_val['bestDiceScore'], eval_pc_val['bestThreshold']
    paths = args.datasets.split(',')
    data = read_dataset(paths[0])

    if dfk.SAMPLE_SAMPLE in data:
        all_dois = []
        all_data = []
        size = 0
        for path in paths:
            data = read_dataset(path)
            filter = data.get(dfk.SAMPLE_FILTER)
            query = data.get(dfk.SAMPLE_QUERY)
            size = size + data.get(dfk.SAMPLE_SIZE)
            all_dois.extend(data.get(dfk.SAMPLE_DOIS, []))
            all_data.extend(data.get(dfk.SAMPLE_SAMPLE, []))
        dataset = {dfk.SAMPLE_TIMESTAMP: timestamp(),
                   dfk.SAMPLE_FILTER: filter,
                   dfk.SAMPLE_QUERY: query,
                   dfk.SAMPLE_SIZE: size,
                   dfk.SAMPLE_DOIS: list(set(all_dois)),
                   dfk.SAMPLE_SAMPLE: all_data}
    else:
        all_dois = []
        all_data = []
        for path in paths:
            data = read_dataset(path)
            all_dois.extend(data.get(dfk.DATASET_DOIS, []))
            all_data.extend(data.get(dfk.DATASET_DATASET, []))
        dataset = {dfk.DATASET_DOIS: list(set(all_dois)),
                   dfk.DATASET_DATASET: all_data}
    return similar_sample


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
            description='add similar records to existing sample')
    parser.add_argument('-v', '--verbose', help='verbose output',
                        action='store_true')
    parser.add_argument('-s', '--sample', help='input sample file',
                        type=str, required=True)
    parser.add_argument('-e', '--extend',
                        help='the number of similar items per item to add',
                        type=int, required=True)
    parser.add_argument('-o', '--output', help='output sample file',
                        type=str, required=True)
    args = parser.parse_args()

    init_logging(args.verbose)

    sample_data = read_sample_data(args.sample)
    similar_records = \
        search_similar_items(sample_data[dfk.SAMPLE_SAMPLE], args.extend)
    logging.debug('Final similar items size: {}'.format(len(similar_records)))
    extended_sample_data = sample_data
    extended_sample_data[dfk.SAMPLE_TIMESTAMP] = timestamp()
    extended_sample_data[dfk.SAMPLE_SIZE] = \
        sample_data[dfk.SAMPLE_SIZE] + len(similar_records)
    extended_sample_data[dfk.SAMPLE_SAMPLE] = \
        sample_data[dfk.SAMPLE_SAMPLE] + similar_records
    save_sample_data(extended_sample_data, args.output)
Ejemplo n.º 14
0
def evaluate(datasetPC, gan, options, epoch='last', description=None):
    _time = {'evaluation': time.time()}

    # Variables
    histogram_range = (0.01, 0.075)
    num_slices = options["sliceEnd"] - options["sliceStart"]

    # Create eval folder
    eval_dir = os.path.join(
        options['train']['samplesDir'],
        gan.network.__name__,
        gan.model_dir,
        'eval-' + str(epoch) + '-' + str(utils.timestamp()).replace(":", "-")
    )
    if description is not None:
        eval_dir += "-" + str(description)
    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)

    # EVALUATE LESION SAMPLES #
    sample_dir = os.path.join(eval_dir, 'samples_test_PC')
    if not os.path.exists(sample_dir):
        os.makedirs(sample_dir)

    eval_pc, patients_pc = _evaluate(datasetPC, gan, sample_dir, options, split="TEST")

    print("Computing histogram for lesion testing difference images")
    eval_pc['diffHistogram'], _ = np.histogram(eval_pc['diffs'], bins='auto', range=histogram_range)
    utils.plot_histogram_with_labels(eval_pc['diffs'], eval_pc['labelmaps'], 'auto', histogram_range,
                                     "Histogram of difference images in the lesion testing dataset",
                                     exportPDF=os.path.join(eval_dir, 'testing_lesions_diffimages_histogram.pdf'))
    print("Done.")
    if "epistemic_variance" in eval_pc and len(eval_pc["epistemic_variance"]) > 0:
        print("Computing uncertainty histogram for lesion testing difference images")
        percentil_99 = np.percentile(eval_pc['epistemic_variance'][eval_pc['epistemic_variance'] >= 0], 99.8)
        _range = (1e-5, percentil_99)
        eval_pc['uncertaintyHistogram'], _ = np.histogram(eval_pc['epistemic_variance'], bins=50, range=_range)
        utils.plot_histogram_with_labels(eval_pc['epistemic_variance'], eval_pc['labelmaps'], 50, _range,
                                         "Histogram of Epistemic Variances images in the lesion testing dataset",
                                         exportPDF=os.path.join(eval_dir, 'testing_lesions_epistemic_variances_histogram.pdf'))
        print("Done.")

    print("Computing ROC curve for Lesion samples")
    _time['ROC'] = time.time()
    eval_pc['diff_AUC'], _fpr, _tpr, _threshs = Metrics.compute_roc(eval_pc['diffs'].flatten(), eval_pc['labelmaps'].astype(bool).flatten(),
                                                                    plottitle="ROC Curve for Lesion Testing Samples",
                                                                    filename=os.path.join(eval_dir, 'rocPC.png'))
    _time['ROC'] = time.time() - _time['ROC']
    print('Done in {} seconds'.format(_time['ROC']))
    if should(options, "exportROC"):
        _tmp = {"fpr": _fpr, "tpr": _tpr, "threshs": _threshs}
        np.save(os.path.join(eval_dir, 'rocPC.npy'), _tmp, allow_pickle=True)

    print("Computing Precision-Recall curve for Lesion samples")
    _time['PRC'] = time.time()
    eval_pc['diff_AUPRC'], _precisions, _recalls, _threshs = Metrics.compute_prc(
        eval_pc['diffs'].flatten(),
        eval_pc['labelmaps'].astype(bool).flatten(),
        plottitle="Precision-Recall Curve for Lesion Testing Samples",
        filename=os.path.join(eval_dir, 'prcPC.png')
    )
    _time['PRC'] = time.time() - _time['PRC']
    print('Done in {} seconds'.format(_time['PRC']))
    if should(options, "exportPRC"):
        _tmp = {"precisions": _precisions, "recalls": _recalls, "threshs": _threshs}
        np.save(os.path.join(eval_dir, 'prcPC.npy'), _tmp, allow_pickle=True)
    # Quickly determine thresholds for different precisions to get the maximal possible recall
    idx_precision70 = np.argmax(_precisions <= 0.7)
    diffs_thresholded_at_precision70 = filter_3d_connected_components(np.squeeze(eval_pc['diffs'] > _threshs[idx_precision70]))

    print("Computing DICE curve for Lesion samples")
    _time['DiceCurve'] = time.time()
    eval_pc['bestDiceScore'], eval_pc['bestThreshold'] = Metrics.compute_dice_curve_recursive(
        eval_pc['diffs'].flatten(), eval_pc['labelmaps'].flatten(),
        plottitle="DICE vs Thresholds Curve for Lesion Testing Samples",
        filename=os.path.join(eval_dir, 'dicePC.png'),
        granularity=10
    )
    _time['DiceCurve'] = time.time() - _time['DiceCurve']
    print('Done in {} seconds'.format(_time['DiceCurve']))

    if options["threshold"] == 'bestdice':
        diffs_thresholded = eval_pc['diffs'] > eval_pc['bestThreshold']
    else:
        diffs_thresholded = eval_pc['diffs'] > options["threshold"]
        diffs_thresholded_at_precision70 = diffs_thresholded
    diffs_thresholded = filter_3d_connected_components(np.squeeze(diffs_thresholded))

    eval_pc['thresholdType'] = options["threshold"]
    eval_pc['DiceScore'] = Metrics.dice(diffs_thresholded, eval_pc['labelmaps'])
    eval_pc['DiceScorePerPatient'] = []
    eval_pc['PrecisionPerPatient'] = []
    eval_pc['RecallPerPatient'] = []
    for p, patient in enumerate(patients_pc):
        subvolume_prediction = diffs_thresholded[p * num_slices:(p + 1) * num_slices, :, :]
        subvolume_groundtruth = eval_pc['labelmaps'][p * num_slices:(p + 1) * num_slices, :, :]
        eval_pc['DiceScorePerPatient'] += [Metrics.dice(subvolume_prediction, subvolume_groundtruth.astype(bool))]
        eval_pc['PrecisionPerPatient'] += [Metrics.precision(subvolume_prediction, subvolume_groundtruth.astype(bool))]
        eval_pc['RecallPerPatient'] += [Metrics.recall(subvolume_prediction, subvolume_groundtruth.astype(bool))]

        # Choose a different operating point from the Precision Recall Curve!
        # e.g. determine the threshold at 20% Precision and base don that, this lesion detection rate
        _TPs, _FPs, _FNs = compute_detection_rate(np.squeeze(diffs_thresholded_at_precision70[p * num_slices:(p + 1) * num_slices, :, :]),
                                                  np.squeeze(subvolume_groundtruth.astype(bool)))
        eval_pc['TPCC'] += _TPs
        eval_pc['FPCC'] += _FPs
        eval_pc['FNCC'] += _FNs
    eval_pc['DiceScorePerPatientMean'] = np.mean(np.array(eval_pc['DiceScorePerPatient']))
    eval_pc['DiceScorePerPatientStd'] = np.std(np.array(eval_pc['DiceScorePerPatient']))
    eval_pc['PrecisionPerPatientMean'] = np.mean(np.array(eval_pc['PrecisionPerPatient']))
    eval_pc['PrecisionPerPatientStd'] = np.std(np.array(eval_pc['PrecisionPerPatient']))
    eval_pc['RecallPerPatientMean'] = np.mean(np.array(eval_pc['RecallPerPatient']))
    eval_pc['RecallPerPatientStd'] = np.std(np.array(eval_pc['RecallPerPatient']))

    # Threshold diffs and compute Confusion matrix, TPR, FPR and VolumeDifference
    eval_pc['TP'], eval_pc['FP'], eval_pc['TN'], eval_pc['FN'] = Metrics.confusion_matrix(
        diffs_thresholded, eval_pc['labelmaps'].astype(bool))
    eval_pc['TPR'] = Metrics.tpr(diffs_thresholded, eval_pc['labelmaps'].astype(bool))
    eval_pc['FPR'] = Metrics.tpr(diffs_thresholded, eval_pc['labelmaps'].astype(bool))
    eval_pc['VD'] = Metrics.vd(diffs_thresholded, eval_pc['labelmaps'].astype(bool))
    if eval_pc['TPCC'] + eval_pc['FNCC'] > 0:
        eval_pc['TPRCC'] = eval_pc['TPCC'] / (eval_pc['TPCC'] + eval_pc['FNCC'])
    else:
        eval_pc['TPRCC'] = 0.0
    if eval_pc['TPCC'] + eval_pc['FPCC'] > 0:
        eval_pc['PrecisionCC'] = eval_pc['TPCC'] / (eval_pc['TPCC'] + eval_pc['FPCC'])
    else:
        eval_pc['PrecisionCC'] = 0.0

    for idx in range(0, eval_pc['x'].shape[0]):
        tmp = image_utils.augment_prediction_and_groundtruth_to_image(eval_pc['x'][idx],
                                                                      diffs_thresholded[idx],
                                                                      eval_pc['labelmaps'][idx])
        p = math.floor(float(idx) / num_slices)
        s = datasetPC.options.sliceStart + (idx % (datasetPC.options.sliceEnd - datasetPC.options.sliceStart))
        imwrite(os.path.join(sample_dir, '{}_{}_vis.png'.format(p, s)), np.squeeze(cv2.normalize(tmp, None, 0, 255)).astype('uint8'))

    # Store evalPC to disk

    eval_pc.pop('x')
    eval_pc.pop('diffs')
    eval_pc.pop('labelmaps')
    eval_pc.pop('l1reconstructionErrors')
    eval_pc.pop('l2reconstructionErrors')
    eval_pc.pop('reconstructions')
    eval_pc.pop('diffHistogram')

    np.save(os.path.join(eval_dir, 'evalPC.npy'), eval_pc)

    _time['evaluation'] = time.time() - _time['evaluation']

    # Export to TXT
    f = open(os.path.join(eval_dir, 'evalPC.txt'), "w")
    f.write(str(eval_pc))
    f.close()