Esempio n. 1
0
def get_address_and_values_from_bytes(bytes_data):
    data_module_num = bytes_data[:2]
    data_others = bytes_data[2:]

    # 获得地址起始值
    sensor_module_num = Convert.byte2_to_uint16(data_module_num)
    address_begin = get_module_address_from_id(sensor_module_num)

    values = Convert.convert_to_uint16_data('bytes', data_others)
    return address_begin, values
Esempio n. 2
0
def get_module_id_and_timestamp_from_bytes(bytes_data):
    data_module_num = bytes_data[:2]
    data_others = bytes_data[2:]

    # 获得传感器模块号
    sensor_module_id = Convert.byte2_to_uint16(data_module_num)
    # 获得时间戳数值
    time_stamp = []
    Convert.convert_to_real_data(
        time_stamp, Sensor_Module_Config['time_stamp'][1],
        data_others[(Sensor_Module_Config['time_stamp'][0] - 6) * 2:])
    time_stamp = time_stamp[0]
    return sensor_module_id, time_stamp
Esempio n. 3
0
def get_system_parameter_address_and_values():
    address_begin = Sys_Parameter_Address
    values = []
    for i in System_Parameter:
        values = Convert.convert_to_uint16_data(System_Parameter_Config[i][1],
                                                System_Parameter_Config[i][2],
                                                values)
    return address_begin, values
Esempio n. 4
0
    def __init__(
        self,
        images_dir,
        csv_dir=None,
        seed=23,
        train_test_split=0.9,
        train_test="train",
        mode='concatenate',
        flip_proba=0.5,
        noise_magnitude=None,
        **kwargs,
    ):
        self.images_dir = Path(images_dir).expanduser()
        self.seed = seed
        self.train_test_split = train_test_split
        self.train_test = train_test.lower()
        assert self.train_test in ["train", "test"]
        self.mode = mode.lower()
        assert self.mode in ["concatenate", "scan", "mask"]
        self.flip_proba = flip_proba
        assert flip_proba <= 1.
        self.noise_magnitude = noise_magnitude

        paths = sorted(glob(str(self.images_dir / "*.*")))
        size = len(paths)
        random.seed(self.seed)
        test_idx = random.sample(range(size),
                                 int(size * (1 - self.train_test_split)))
        train_idx = list(set(range(size)) - set(test_idx))
        iterator = train_idx if self.train_test == 'train' else test_idx
        self.paths = sorted([paths[i] for i in iterator])
        self.__len = len(self.paths)

        self.csv_dir = csv_dir
        if csv_dir:
            self.csv_dir = Path(csv_dir).expanduser()
            self.ground_truth = pd.read_csv(self.csv_dir)

            train_ids = [int(get_id_from_path(paths[i])) for i in train_idx]
            train_survival_time = self.ground_truth.loc[
                self.ground_truth.PatientID.isin(train_ids), 'SurvivalTime']
            self.scaler = StandardScaler().fit(
                train_survival_time.values.reshape(-1, 1))

        if self.mode != "concatenate":
            transform = [
                Convert(),
                T.RandomHorizontalFlip(self.flip_proba),
                T.RandomVerticalFlip(self.flip_proba),
                T.ToTensor(),
            ]
            if self.mode == 'scan' and self.noise_magnitude is not None:
                transform.append(T.Lambda(lambda x: x + torch.randn_like(x)), )
            self.transform = T.Compose(transform)
Esempio n. 5
0
def get_sensor_address_and_values(module_id):
    if module_id in Sensor_Module_Id_List:
        address_begin = Sensor_Module_Address_Dict[module_id]

        sensor_config = deepcopy(Sensor_Module_Config)
        sensor_config['module_id'][2] = module_id
        sensor_config['install_num'][2] = Sensor_Module_InstallNum_Dict[
            module_id]
        values = []
        for i in Sensor_Module:
            values = Convert.convert_to_uint16_data(sensor_config[i][1],
                                                    sensor_config[i][2],
                                                    values)
        return address_begin, values
    else:
        pass
Esempio n. 6
0
from banner import *
from utils import modinv, Convert
banner()
try:
    c = int(input("==> c = "))
    p = int(input("==> p = "))
    q = int(input("==> q = "))
    dp = int(input("==> dp = "))
    dq = int(input("==> dq = "))

    slowprint("\n[+] Please Wait ...\n")

    def chinese_remainder_theorem(p, q, dp, dq, chipher_text):
        q_inv = modinv(p, q)
        m1 = pow(chipher_text, dp, p)
        m2 = pow(chipher_text, dq, q)
        h = (q_inv * (m1 - m2)) % p
        return m2 + h * q

    Convert(chinese_remainder_theorem(p, q, dp, dq, c))
except ValueError:
    slowprint("\n[-] c,p,q,dp,dq Must Be Integar Number")
except AssertionError:
    slowprint("\n[-] Wrong Data")
except KeyboardInterrupt:
    exit()
Esempio n. 7
0
def crt(n, a):
   sum = 0
   prod = functools.reduce(lambda a, b: a*b, n)
   for i,j in zip(n,a):
   	p = prod // i
   	sum += j * gmpy.invert(p,i) * p
   return sum % prod

try:
    c1 = int(input("==> c1 = "))
    c2 = int(input("==> c2 = "))
    c3 = int(input("==> c3 = "))
    n1 = int(input("==> n1 = "))
    n2 = int(input("==> n2 = "))
    n3 = int(input("==> n3 = "))
    
    N = [n1, n2, n3]
    C = [c1, c2, c3]
    e = len(N)
    a = crt(N,C)
    for n,c in zip(N, C):
    	assert a % n == c
    m = gmpy.root(a,e)[0]
    Convert(m)
except ValueError:
    slowprint("\n[-] c1,c2,c3,n1,n2,n3 Must Be Integar Number")
except AssertionError:
    slowprint("\n[-] Wrong Data")
except KeyboardInterrupt:
    exit()
Esempio n. 8
0
from banner import *
from utils import modinv, Convert
banner()

try:

    c = int(input("==> c = "))
    p = int(input("==> p = "))
    q = int(input("==> q = "))
    e = int(input("==> e = "))
    n = p * q
    phi = (p - 1) * (q - 1)
    d = modinv(e, phi)
    decrypt = pow(c, d, n)
    Convert(decrypt)

except ImportError:
    slowprint("\n[-] Module not setup ")
except ValueError:
    slowprint("\n[-] p,q,e,c Must be Integer Number")
except AssertionError:
    slowprint("\n[-] Wrong Data")
except KeyboardInterrupt:
    exit()
Esempio n. 9
0
def solve_single_request(bytes_data):
    address = Convert.byte2_to_uint16(bytes_data[1:3], little_endian=False)
    values = Convert.bytes_to_uint16(bytes_data[3:], little_endian=False)
    return address, values
Esempio n. 10
0
def get_Pi_timestamp_address_and_values():
    address_begin = Pi_Time_stamp_Address
    values = Convert.convert_to_uint16_data('uint32', int(time.time()))
    return address_begin, values
Esempio n. 11
0
def get_timestamp_address_and_values():
    address_begin = Sys_Parameter_Address + System_Parameter_Config[
        'time_stamp'][0]
    values = Convert.convert_to_uint16_data(
        System_Parameter_Config['time_stamp'][1], int(time.time()))
    return address_begin, values
Esempio n. 12
0
    def __getitem__(self, idx, seed=None):
        path = self.paths[idx]

        scanner = np.load(path)
        scan, mask = scanner["scan"], scanner["mask"]

        if self.train_test == "train":
            random.seed(seed)
            v_flip = random.random() < self.flip_proba
            h_flip = random.random() < self.flip_proba

            if self.mode == 'concatenate':
                scans = []
                masks = []
                to_pil = Convert()
                to_tens = T.ToTensor()
                for img, msk in zip(scan, mask):
                    img, msk = to_pil(img), to_pil(np.uint8(msk))
                    if v_flip:
                        img = TF.hflip(img)
                        msk = TF.hflip(msk)
                    if h_flip:
                        img = TF.vflip(img)
                        msk = TF.vflip(msk)
                    img, msk = to_tens(img).unsqueeze(0).float(), to_tens(
                        msk).unsqueeze(0).float()
                    if self.noise_magnitude is not None:
                        noise = torch.randn_like(img)
                        img += noise
                    scans.append(img)
                    masks.append(msk)
                scans = torch.cat(scans, dim=1)
                masks = torch.cat(masks, dim=1)
                output = torch.cat([scans, masks], dim=0)

            elif self.mode == 'mask':
                output = np.expand_dims(mask, axis=1)
                output = torch.cat([self.transform(x) for x in output], dim=0)
            elif self.mode == 'scan':
                output = np.expand_dims(scan, axis=1)
                output = torch.cat([self.transform(x) for x in output], dim=0)

        else:
            if self.mode == 'mask':
                output = torch.Tensor(mask).unsqueeze(1).float()
            elif self.mode == 'scan':
                output = torch.Tensor(scan).unsqueeze(1).float()
            elif self.mode == 'concatenate':
                scan = torch.Tensor(scan).unsqueeze(0).float()
                mask = torch.Tensor(mask).unsqueeze(0).float()
                output = torch.cat([scan, mask], dim=0)

        output = {"images": output}
        patient_id = int(get_id_from_path(path))
        if self.csv_dir:
            patient_info = self.ground_truth.loc[self.ground_truth.PatientID ==
                                                 patient_id]
            output['y'] = self.scaler.transform([[patient_info.iloc[0, 1]]])[0]
            output['info'] = np.array(
                [patient_id, int(patient_info.iloc[0, 2])])
        else:
            output['info'] = np.array([patient_id, 1.])

        return output
Esempio n. 13
0
    parser.add_argument('-o',
                        '--output_root',
                        help="generated source files root")
    parser.add_argument(
        '-q',
        '--quant',
        help=
        "quantization granularity: 0(default) for per-tensor, 1 for per-channel",
        type=int,
        default=0)
    args = parser.parse_args()

    if args.input_root is None or args.name is None or args.output_root is None:
        parser.print_help()
        quit()

    print(
        f'\nGenerating {args.output_root}/{args.name} on {args.target_chip}...',
        end='')
    convert = Convert(target_chip=args.target_chip,
                      input_root=args.input_root,
                      json_file_name=args.json_file_name,
                      output_root=args.output_root,
                      name=args.name,
                      quant=args.quant)
    convert()
    print(' Finish\n')

else:
    print(f'Not supported python == {python_version} on {system_type}')