Beispiel #1
0
def main(args):
    
    setup_logger(args)
    # Set up model
    state_dict = torch.load(args.checkpoint)
    config = state_dict['config']
    model = TemporalFusionTransformer(config).cuda()
    model.load_state_dict(state_dict['model'])
    model.eval()
    model.cuda()

    # Set up dataset
    test_split = TFTDataset(args.data, config)
    data_loader = DataLoader(test_split, batch_size=args.batch_size, num_workers=4)

    scalers = pickle.load(open(args.tgt_scalers, 'rb'))
    cat_encodings = pickle.load(open(args.cat_encodings, 'rb'))

    if args.visualize:
        # TODO: abstract away all forms of visualization.
        visualize_v2(args, config, model, data_loader, scalers, cat_encodings)

    quantiles, perf_dict = inference(args, config, model, data_loader, scalers, cat_encodings)
    quantiles = {'test_p10': quantiles[0].item(), 'test_p50': quantiles[1].item(), 'test_p90': quantiles[2].item(), 'sum':sum(quantiles).item()}
    finish_log = {**quantiles, **perf_dict}
    dllogger.log(step=(), data=finish_log, verbosity=1)
    print('Test q-risk: P10 {} | P50 {} | P90 {}'.format(*quantiles))
    print('Latency:\n\tAverage {:.3f}s\n\tp90 {:.3f}s\n\tp95 {:.3f}s\n\tp99 {:.3f}s'.format(
        perf_dict['latency_avg'], perf_dict['latency_p90'], perf_dict['latency_p95'], perf_dict['latency_p99']))
import logging
import log_helper
import winreg
import enum

from system_utils import is_x64os

logger = log_helper.setup_logger(name="registry_helper", level=logging.DEBUG, log_to_file=False)


__doc__ = """File contains registry-related functions, for creating, enumerating, editing and removing Windows 
registry keys and values. enums and dictionaries with registry-related integer values, imported from winreg module
Also is_x64os() helper function provided
"""

HIVES_MAP = {
    "HKEY_CLASSES_ROOT": winreg.HKEY_CLASSES_ROOT,
    "HKEY_CURRENT_USER": winreg.HKEY_CURRENT_USER,
    "HKEY_LOCAL_MACHINE": winreg.HKEY_LOCAL_MACHINE,
    "HKEY_USERS": winreg.HKEY_USERS,
    "HKEY_PERFORMANCE_DATA": winreg.HKEY_PERFORMANCE_DATA,
    "HKEY_CURRENT_CONFIG": winreg.HKEY_CURRENT_CONFIG,
    "HKEY_DYN_DATA": winreg.HKEY_DYN_DATA
}


class RegistryKeyType(enum.IntEnum):
    # Binary data in any form
    REG_BINARY = 0

    # 32-bit number
import sys
import argparse
import logging
import winreg
import log_helper
import system_fingerprint
import hardware_fingerprint
import telemetry_fingerprint
import random_utils
import registry_helper

from registry_helper import RegistryKeyType, Wow64RegistryEntry
from system_utils import is_x64os, platform_version

logger = log_helper.setup_logger(name="antidetect",
                                 level=logging.INFO,
                                 log_to_file=False)


def generate_telemetry_fingerprint():
    """
    IDs related to Windows 10 Telemetry
    All the telemetry is getting around the DeviceID registry value
    It can be found in the following kays:
    HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\SQMClient
    HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Diagnostics\DiagTrack\SettingsRequests
    """
    windows_ver = platform_version()
    if not windows_ver.startswith("Windows-10"):
        logger.warning("Telemetry ID replace available for Windows 10 only")
        return
Beispiel #4
0
# parser.add_argument("--epochs",default=EPOCHS)

# args = parser.parse_args()

# EPOCHS = int(args.epochs)
''' ============================================================================= save name '''
SAVE_NAME = f'test'
''' ============================================================================= init '''
### gpu 지정
GPU_ID = set_gpu(GPU_ID)

### log
now = datetime.datetime.now()
save_name = SAVE_NAME + ' ' + now.strftime('%y%m%d_%p:%I:%M:%S')
save_path = LOG_DIR + save_name
log = setup_logger(save_path, save_name)

### code copy
pylist = glob.glob('./*.py')
for p in pylist:
    filename = os.path.split(p)[-1]
    shutil.copy(p, os.path.join(save_path, filename))

###
# import imgaug
# imgaug.random.seed(0)
# np.random.seed(0)
# tf.random.set_seed(0)
# random.seed(0)

### mixed precision
Beispiel #5
0
import sys
import logging
import log_helper
import random
import uuid
import string
import random_utils

logger = log_helper.setup_logger(name="system_fingerpring", level=logging.INFO, log_to_file=False)


class WinFingerprint:
    """
    """
    EDITIONS = {
        7: [["Starter", "Starter"],
            ["HomeBasic", "Home Basic"],
            ["HomePremium", "Home Premium"],
            ["Professional", "Professional"],
            ["ProfessionalN", "Professional N"],
            ["ProfessionalKN", "Professional KN"],
            ["Enterprise", "Enterprise"],
            ["Ultimate", "Ultimate"]],
        8: [["Core", "Core"],
            ["Pro", "Pro"],
            ["ProN", "Pro N"],
            ["Enterprise", "Enterprise"],
            ["EnterpriseN", "Enterprise N"],
            ["OEM", "OEM"],
            ["withBing", "with Bing"]],
        10: [["Home", "Home"],
Beispiel #6
0
def main(args):
    # Enable CuDNN autotuner
    nproc_per_node = torch.cuda.device_count()
    if args.affinity != 'disabled':
        affinity = gpu_affinity.set_affinity(
                args.local_rank,
                nproc_per_node,
                args.affinity
            )
        print(f'{args.local_rank}: thread affinity: {affinity}')


    torch.backends.cudnn.benchmark = True

    ### INIT DISTRIBUTED
    if args.distributed_world_size > 1:
        args.local_rank = int(os.environ.get('LOCAL_RANK', args.local_rank))
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend='nccl', init_method='env://')
        args.distributed_world_size = int(os.environ['WORLD_SIZE'])
        args.distributed_rank = dist.get_rank()
        print_once(f'Distributed training with {args.distributed_world_size} GPUs')
        torch.cuda.synchronize()

    if args.seed:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)

    setup_logger(args)

    config = CONFIGS[args.dataset]()
    if args.overwrite_config:
        config.__dict__.update(json.loads(args.overwrite_config))

    dllogger.log(step='HPARAMS', data={**vars(args), **vars(config)}, verbosity=1)

    model = TemporalFusionTransformer(config).cuda()
    if args.ema_decay:
        model_ema = ModelEma(model, decay=args.ema_decay)

    print_once('Model params: {}'.format(sum(p.numel() for p in model.parameters())))
    criterion = QuantileLoss(config).cuda()
    optimizer = FusedAdam(model.parameters(), lr=args.lr)
    if args.use_amp:
        model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic")
    if args.distributed_world_size > 1:
        #model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
        model = DDP(model)

    train_loader, valid_loader, test_loader = load_dataset(args, config)

    global_step = 0
    perf_meter = PerformanceMeter()

    for epoch in range(args.epochs):
        start = time.time()
        dllogger.log(step=global_step, data={'epoch': epoch}, verbosity=1)

        model.train() 
        for local_step, batch in enumerate(train_loader):
            perf_meter.reset_current_lap()
            batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in batch.items()}
            predictions = model(batch)
            targets = batch['target'][:,config.encoder_length:,:]
            p_losses = criterion(predictions, targets)
            loss = p_losses.sum()

            if args.use_amp:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()
            if not args.grad_accumulation or (global_step+1) % args.grad_accumulation == 0:
                if args.clip_grad:
                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
                optimizer.step()
                optimizer.zero_grad()
                if args.ema_decay:
                    model_ema.update(model)

            if args.distributed_world_size > 1:
                dist.all_reduce(p_losses)
                p_losses /= args.distributed_world_size
                loss = p_losses.sum()

            torch.cuda.synchronize()
            ips = perf_meter.update(args.batch_size * args.distributed_world_size,
                    exclude_from_total=local_step in [0, len(train_loader)-1])

            log_dict = {'P10':p_losses[0].item(), 'P50':p_losses[1].item(), 'P90':p_losses[2].item(), 'loss': loss.item(), 'items/s':ips}
            dllogger.log(step=global_step, data=log_dict, verbosity=1)
            global_step += 1

        validate(args, config, model_ema if args.ema_decay else model, criterion, valid_loader, global_step)

        if validate.early_stop_c >= args.early_stopping:
            print_once('Early stopping')
            break

    ### TEST PHASE ###
    state_dict = torch.load(os.path.join(args.results, 'checkpoint.pt'), map_location='cpu')
    if isinstance(model, DDP):
        model.module.load_state_dict(state_dict['model'])
    else:
        model.load_state_dict(state_dict['model'])
    model.cuda().eval()

    tgt_scalers = pickle.load(open(os.path.join(args.data_path, 'tgt_scalers.bin'), 'rb'))
    cat_encodings = pickle.load(open(os.path.join(args.data_path,'cat_encodings.bin'), 'rb'))

    unscaled_predictions, unscaled_targets, _, _ = predict(args, config, model, test_loader, tgt_scalers, cat_encodings)
    losses = QuantileLoss(config)(unscaled_predictions, unscaled_targets)
    normalizer = unscaled_targets.abs().mean()
    quantiles = 2 * losses / normalizer

    if args.distributed_world_size > 1:
        quantiles = quantiles.cuda()
        dist.all_reduce(quantiles)
        quantiles /= args.distributed_world_size

    quantiles = {'test_p10': quantiles[0].item(), 'test_p50': quantiles[1].item(), 'test_p90': quantiles[2].item(), 'sum':sum(quantiles).item()}
    finish_log = {**quantiles, 'average_ips':perf_meter.avg, 'convergence_step':validate.conv_step}
    dllogger.log(step=(), data=finish_log, verbosity=1)
Beispiel #7
0
import os
import sys
import argparse
import logging
import enum
import log_helper
import validatable_record
import generate_fingerprint as gen

from soft_assert import SoftAssert
from test_type import TestType
from validation_type import ValidationType


logger = log_helper.setup_logger(name="fingerprint_test", level=logging.DEBUG, log_to_file=False)


class ApplicationType(enum.IntEnum):
    APP_PROTOTYPE = 0
    APP_PRODUCTION = 1
    APP_PROOF = 2


TEST_TYPE_MAP = {
    "telemetry": TestType.telemetry_fingerprint,
    "network": TestType.network_fingerprint,
    "system": TestType.windows_fingerprint,
    "hardware": TestType.hardware_fingerprint,
    "font": TestType.font_fingerprint
}
Beispiel #8
0
import os
import sys
import argparse
import logging
import log_helper
import subprocess
import psutil
import getpass

logger = log_helper.setup_logger(name="win10_cleaner",
                                 level=logging.DEBUG,
                                 log_to_file=True)
"""
We use PowerShell for most of operations. Python provides convenient wrapper for its output
"""
POWERSHELL_COMMAND = r'C:\WINDOWS\system32\WindowsPowerShell\v1.0\powershell.exe'
"""
Disable Cortana by killing the process and renaming Cortana directory in SystemApps
so that RintimeBroker did not start it again
"""


def find_cortana_directory(name, path):
    """
    :param name: Name of Cortana executable
    :param path: Path to Windows Store applications directory
    :return: Path to the directory contains Cortana
    """
    for root, dirs, files in os.walk(path):
        if name in files:
            return root
import sys
import logging
import log_helper
import random
import registry_helper
from registry_helper import Wow64RegistryEntry

logger = log_helper.setup_logger(name="font_fp",
                                 level=logging.INFO,
                                 log_to_file=False)

__doc__ = "The script deletes N random fonts from the system"


def delete_random_font(fonts_delete):
    """
    Delete several random fonts from the system
    :param fonts_delete: Fonts to delete
    """
    hive = "HKEY_LOCAL_MACHINE"
    fonts64 = registry_helper.enumerate_key(
        hive, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts")

    for _ in range(0, fonts_delete):
        delete_font = random.choice(fonts64)
        logger.info("Delete font {0}".format(delete_font))
        registry_helper.delete_value(
            hive, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts",
            delete_font)

Beispiel #10
0
import os
import sys
import stat
import subprocess
import argparse
import logging
import zipfile
import log_helper

logger = log_helper.setup_logger(name="qt_builder",
                                 level=logging.DEBUG,
                                 log_to_file=False)


QT_CONFIG_PARAMS = "-mp -opensource -confirm-license -nomake tests -nomake examples -no-compile-examples" \
                   " -release -shared -pch -no-ltcg -accessibility -no-sql-sqlite -opengl desktop -no-openvg" \
                   " -no-nis -no-iconv -no-evdev -no-mtdev -no-inotify -no-eventfd -largefile -no-system-proxies" \
                   " -qt-zlib -qt-pcre -no-icu -qt-libpng -qt-libjpeg -qt-freetype -no-fontconfig -qt-harfbuzz" \
                   " -no-angle -incredibuild-xge -no-plugin-manifests -qmake -qreal double -rtti -strip -no-ssl" \
                   " -no-openssl -no-libproxy -no-dbus -no-audio-backend -no-wmf-backend -no-qml-debug -no-direct2d" \
                   " -directwrite -no-style-fusion -native-gestures -skip qt3d -skip qtactiveqt -skip qtandroidextras" \
                   " -skip qtcanvas3d -skip qtconnectivity -skip qtdeclarative -skip qtdoc -skip qtenginio" \
                   " -skip qtgraphicaleffects -skip qtlocation -skip qtmacextras -skip qtmultimedia" \
                   " -skip qtquickcontrols -skip qtquickcontrols2 -skip qtscript -skip qtsensors -skip qtserialbus" \
                   " -skip qtserialport -skip qtwayland -skip qtwebchannel -skip qtwebengine -skip qtwebsockets" \
                   " -skip qtwebview -skip qtx11extras -skip qtxmlpatterns"


###########################################################################
def on_rm_error(*args):
    """
Beispiel #11
0
import os
import sys
import stat
import argparse
import logging
import log_helper

logger = log_helper.setup_logger(name="simple_archive",
                                 level=logging.DEBUG,
                                 log_to_file=False)


###########################################################################
def on_rm_error(*args):
    """
    In case the file or directory is read-only and we need to delete it
    this function will help to remove 'read-only' attribute
    :param args: (func, path, exc_info) tuple
    """
    # path contains the path of the file that couldn't be removed
    # let's just assume that it's read-only and unlink it.
    _, path, _ = args
    logger.warning("OnRmError: {0}".format(path))
    os.chmod(path, stat.S_IWRITE)
    os.unlink(path)


def environment_value(environment_name):
    """
    :param environment_name: Name of the environment variable
    :return: Value of the environment variable or the empty string if not exists
Beispiel #12
0
import logging
import log_helper
import random
import uuid
import string
import random_utils

logger = log_helper.setup_logger(name="hardware_fingerpring",
                                 level=logging.DEBUG,
                                 log_to_file=False)


class HardwareFingerprint:
    """
    """
    def __init__(self):
        self.hw_profile_guid = ("{%s}" % str(uuid.uuid4()))
        self.performance_guid = ("{%s}" % str(uuid.uuid4()))
        self.machine_guid = str(uuid.uuid4())
        self.win_update_guid = str(uuid.uuid4())
        self.system_client_id = self.__random_system_client_id()

    def random_hw_profile_guid(self):
        """
        :return: Hardware profile GUID
        """
        return self.hw_profile_guid

    def random_performance_guid(self):
        """
        :return: Performance\BootCKCLSettings and Performance\BShutdownCKCLSettings GUID
Beispiel #13
0
import os
import sys
import argparse
import logging
import log_helper
import registry_helper

logger = log_helper.setup_logger(name="windows_uninstall",
                                 level=logging.DEBUG,
                                 log_to_file=False)


def environment_value(environment_name):
    """
    :param environment_name: Name of the environment variable
    :return: Value of the environment variable or the empty string if not exists
    """
    try:
        return os.environ[environment_name]
    except KeyError:
        return ''


class WinUninstallApplication:
    """
    Application for uninstalling Windows applications based on the applications list
    """

    # Necessary commands
    WMIC_ENUMERATE_COMMAND = 'wmic product get name'
    WMIC_UNINSTALL_COMMAND = 'wmic product where name="{0}" call uninstall'
Beispiel #14
0
import os
import sys
import re
import argparse
import logging
import log_helper

__dec__ = """Bulk files processor. 
Does not intended to be universal, change the code every time you need different conditions.
Allows rename or remove, non-recursive or recursive 
"""

logger = log_helper.setup_logger(name="bulk_rename",
                                 level=logging.DEBUG,
                                 log_to_file=False)


class FileProcessor:
    def __init__(self, input_dir):
        """
        Counter is for mass renaming with a new index
        """
        self.counter = 0
        self.input_dir = input_dir

    def rename(self):
        """
        Rename file with leading 2 digits to leading 3 digits
        """
        for item in os.listdir(self.input_dir):
            if re.match(u"[0-9]{2}\\D.+\.jpg", item):
Beispiel #15
0
import os
import sys
import argparse
import logging
import log_helper
import xml.etree.ElementTree as XmlTree
import xml.dom.minidom as minidom

logger = log_helper.setup_logger(name="dictionary_merge",
                                 level=logging.DEBUG,
                                 log_to_file=False)


class BaseDictionaryProvider:
    """
    Base class for all dictionary providers
    """
    def __init__(self, file_path):
        self.file_path = file_path
        if not os.path.isfile(self.file_path):
            raise RuntimeError("File does not exist: %s" % self.file_path)

        self.dictionary_name = os.path.splitext(
            os.path.basename(self.file_path))[0]

    def name(self):
        """
        :return: Dictionary name
        """
        return self.dictionary_name
Beispiel #16
0
import logging
import string
import log_helper
import identity_data
import random
import time
import datetime
import itertools
import platform

logger = log_helper.setup_logger(name="random_utils", level=logging.DEBUG, log_to_file=False)


__doc__ = """Service functions for generation random values and sequences with given format.
Hostname, user name and MAC address, randomly selected from lists imported from identity_data module,
random unix time, random string sequences. Helper functions for writing special values to Windows registry
Also is_x64os() helper function provided
"""


def is_x64os():
    """
    :return: True if system is 64-bit, False otherwise
    """
    return platform.machine().endswith('64')


def random_hostname():
    """
    :return: random host name from the list
    """