Exemplo n.º 1
0
def single_res_training(
    epochs,
    model,
    optimizer,
    init_lr,
    lr_decay,
    train_loader,
    test_loader,
    loss_function,
):
    logger = getlogger()
    writer = SummaryWriter()
    logger.info("start training")
    for epoch in range(epochs):
        model.train()
        decay_lr(optimizer, epoch, lr_decay, 0.001)

        pbar = config_pbar(f"[EPOCH {epoch+1}]", train_loader)
        for i, (x, y) in pbar:
            x_val = x.cuda().float()
            y_val = y.cuda() - 1

            if i == 0:
                x_image = vutils.make_grid(x_val[:1], scale_each=True)
                writer.add_image("train", x_image, i)

            optimizer.zero_grad()
            output = model(x_val)

            loss = loss_function(output, y_val)
            pbar.postfix[0]["loss"] = loss.item()
            loss.backward()
            optimizer.step()

        writer.add_scalar("training_loss", loss.item(), epoch + 1)

        if ((epoch + 1) % 10 != 0):
            continue

        model.eval()
        with torch.no_grad():
            hit = 0
            for i, (x, y) in tqdm(enumerate(test_loader)):
                x_val = torch.squeeze(x)
                x_val = x_val.cuda().float()
                y_val = y.cuda() - 1

                if i == 0:
                    x_image = vutils.make_grid(x_val[:1], scale_each=True)
                    writer.add_image("val", x_image, i)
                output = model(x_val)
                output = nn.Softmax(dim=1)(output)
                output = torch.mean(output, dim=0)
                output = output.cpu().detach().numpy()
                if np.argmax(output) == y_val:
                    hit = hit + 1
            logger.info("hit : {}/{}, rate: {}%\n".format(
                hit, len(test_loader), float(hit / len(test_loader) * 100)))
Exemplo n.º 2
0
def _build_cub_dataset(
        imagepath: str,
        labelpath: str,
        low_ratio: int,
        mean: list,
        std: list,
        batch_size: int = 128,
        shuffle=True,
        num_workers=6,
        drop_last=True,
        input_shape=(256, 256),
        crop_size=(224, 224),
        is_kd=False,
        is_test=False,
):
    logger = getlogger()
    params = {
        "batch_size": batch_size,
        "shuffle": shuffle,
        "num_workers": num_workers,
        "drop_last": drop_last
    }

    logger.info("SPECIFIC PREPROCESS")
    preprocess = build_transforms(
        input_shape=input_shape,
        crop_size=crop_size,
        mean=mean,
        std=std,
        low_ratio=low_ratio,
        is_test=is_test,
    )

    if is_kd is True:
        logger.info("BASIC TRANSFORM")
        basic_transform = build_transforms(
            input_shape=input_shape,
            crop_size=crop_size,
            mean=mean,
            std=std,
        )
    else:
        basic_transform = None

    dataset = CUB200_2011(
        imagepath=imagepath,
        labelpath=labelpath,
        preprocess=preprocess,
        basic_transform=basic_transform,
        is_kd=is_kd,
        is_test=is_test,
    )

    return data.DataLoader(dataset, **params)
Exemplo n.º 3
0
def Test(model, dataloaders):
    logger = getlogger()
    logger.info("Initiating Test...")
    _test_convnet(model)

    logger.info("...Model test complete")
    logger.info("\nValidating loaders...")

    # for dataloader in dataloaders:
    # _test_dataloader(dataloader)
    logger.info("...Done")
Exemplo n.º 4
0
def _distillation_transform(
    input_shape: tuple,
    crop_size: tuple,
    mean,
    std,
    flip_prob=0.5,
    low_ratio=None,
    is_test=False,
):
    logger = getlogger()
    transform_list = []
    transform_list.append(transforms.Resize(input_shape, interpolation=Image.BICUBIC))

    if low_ratio is not None:
        transform_list.append(transforms.Resize(low_ratio, interpolation=Image.BICUBIC))
        transform_list.append(transforms.Resize(input_shape, interpolation=Image.BICUBIC))

    if is_test is True:
        transform_list.append(transforms.TenCrop(crop_size))
        transform_list.append(transforms.Lambda(lambda crops:
                                                torch.stack([transforms.ToTensor()(crop) for crop in crops])))
        transform_list.append(transforms.Lambda(lambda images:
                                                torch.stack([transforms.Normalize(mean=mean, std=std)(image)
                                                             for image in images])))
        for t in transform_list:
            logger.info("Transform : {}".format(t))
        logger.info("\n")
        return transforms.Compose(transform_list)

    else:
        transform_list.append(transforms.RandomHorizontalFlip(flip_prob))
        transform_list.append(transforms.RandomCrop(crop_size))

    transform_list.append(transforms.ToTensor())
    transform_list.append(transforms.Normalize(mean=mean, std=std))

    for t in transform_list:
        logger.info("Transform : {}".format(t))
    logger.info("\n")

    transform = transforms.Compose(transform_list)

    return transform
Exemplo n.º 5
0
#3rd library
import pysam, HTSeq
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import pylab
import brewer2mpl

#my own
from utils import getlogger, call_sys

#global settings
#logger
date = time.strftime(' %Y-%m-%d', time.localtime(time.time()))
logger = getlogger(fn=os.getcwd() + "/" + date.strip() + "_" +
                   os.path.basename(__file__) + ".log")
#data
REPEAT_GTF = "/picb/molsysbio/usr/caoyaqiang/1.Projects/17.Alu/1.GenomeReference/1.hg38/6.Repeats_Annotation/hg38_sorted_repeatmasker.gtf"
CHROM_SIZE = "/home/caoyaqiang/caoyaqiang_han/1.Projects/17.Alu/1.GenomeReference/1.hg38/1.hg38_Sequence/hg38.chrom.sizes"
RMSK = "/picb/molsysbio/usr/caoyaqiang/1.Projects/17.Alu/1.GenomeReference/1.hg38/6.Repeats_Annotation/rmsk.txt"
SUBFAMILY_SIZE = "/picb/molsysbio/usr/caoyaqiang/1.Projects/17.Alu/1.GenomeReference/1.hg38/6.Repeats_Annotation/subfam.size"
REPEATS_LOCUS = "/picb/molsysbio/usr/caoyaqiang/1.Projects/17.Alu/1.GenomeReference/1.hg38/6.Repeats_Annotation/1.GenomicLocation/hg38_rep_locus.txt"


def iteres_stat_filter(bam,
                       stat_root="0.iteres/1.stat/",
                       filter_root="0.iteres/2.filter/"):
    sample = os.path.splitext(os.path.split(bam)[1])[0]
    if not os.path.exists(os.path.join(stat_root, sample)):
        os.makedirs(os.path.join(stat_root, sample))
        stat_pre = os.path.join(stat_root, sample, sample)
Exemplo n.º 6
0
import zerorpc

from agent.config import MASTER_URL
from .storage import Storage
from utils import getlogger
from .config import LOG_PATH
from .storage import *
import uuid

logger = getlogger(__name__, LOG_PATH)


class Agentserver:
    def __init__(self):
        self.store = Storage()

    def handle(self, msg):
        logger.info(type(msg))
        try:
            if msg['type'] in {'register', 'heartbeat'}:
                print(1)
                payload = msg['payload']
                print(2)
                info = {'hostname': payload['hostname'], 'ip': payload['ip']}
                print(3)
                self.store.reg_hb(payload['id'], info)
                print(4)
                logger.info('{}'.format(self.store.agents))
                return 'ack {}'.format(msg)

            elif msg['type'] == 'result':
Exemplo n.º 7
0
def main(args):
    logger = getlogger()
    logger.info("\nGENERATING MODEL")
    model = build_model(
        model_type=args.model_type,
        experiment_type=args.experiment_type,
        lr=args.lr,
        lr_decay=args.lr_decay,
        num_classes=args.num_classes,
        batch_size=args.batch_size,
        epochs=args.epochs,
        pretrain_path=args.pretrain_path,
        desc=args.desc,
        save=args.save,
    )
    logger.info(model)

    logger.info("GENERATING TRAINLOADER")
    train_loader = build_dataloader(
        imagepath=args.imagepath,
        dataset_type=args.dataset_type,
        labelpath=args.train_label,
        low_ratio=args.low_ratio,
        batch_size=args.batch_size,
        mean=args.mean,
        std=args.std,
        is_kd=args.kd,
        is_test=False,
    )

    logger.info("GENERATING TESTLOADER")
    test_loader = build_dataloader(
        imagepath=args.imagepath,
        dataset_type=args.dataset_type,
        labelpath=args.test_label,
        low_ratio=args.low_ratio,
        batch_size=1,
        mean=args.mean,
        std=args.std,
        is_kd=args.kd,
        is_test=True,
    )

    default_optimizer = optim.SGD(
        model.parameters(),
        lr=args.lr,
        momentum=0.9,
        weight_decay=0.0005,
    )

    logger.info(default_optimizer)
    Test(model, [train_loader, test_loader])

    Baseline(
        TrainingConfig(
            model=model,
            epochs=args.epochs,
            optimizer=default_optimizer,
            init_lr=args.lr,
            train_loader=train_loader,
            valid_loader=test_loader,
            criterion=nn.CrossEntropyLoss(),
        ))
Exemplo n.º 8
0
from subprocess import Popen, PIPE
from utils import getlogger


logger = getlogger(__name__, '/Users/quyixiao/PycharmProjects/mschedule/logs/exec.log')

class Executor:
    def run(self, script, timeout=None):
        proc = Popen(script, shell=True, stdout=PIPE)
        code = proc.wait(timeout)
        txt = proc.stdout.read()

        logger.info("{} {}".format(code, txt))
        return code,txt

Exemplo n.º 9
0
from subprocess import PIPE, Popen
from utils import getlogger

logger = getlogger(__name__, 'd:/executor.log')


class Executor:
    def run(self, script, timeout=None):
        process = Popen(script, shell=True, stdout=PIPE)
        code = process.wait()  #状态码
        output = process.stdout.read()  #脚本执行输出
        return code, output
Exemplo n.º 10
0
import zerorpc
import threading

from .msg import Message
from utils import getlogger
from .config import LOG_PATH
from .state import *
from .executor import Executor

logger = getlogger('__name__', LOG_PATH)


class Agentclient:
    def __init__(self, url, message: Message):
        self.message = message
        self.client = zerorpc.Client()
        self.event = threading.Event()
        self.url = url
        self.state = WATTING  #标记任务是否完成
        self.executor = Executor()

    def start(self, itime=3):
        try:  # 记录异常日志

            self.event.clear()
            self.client.connect(self.url)
            reg = self.client.message(self.message.reg)
            print('reg: ', reg)
            logger.info(reg)
            while not self.event.wait(itime):
                server_message = self.client.message(self.message.heartbeat)
Exemplo n.º 11
0
from .storage import Storage
from utils import getlogger

# agent[msg['payload']['id']] = msg['payload']['hostname'], msg.get('payload').get('ip')

logger = getlogger(__name__, f'/tmp/{__name__}.log')


class ConnectionManager:
    def __init__(self):
        self.store = Storage()

    def sendmsg(self, msg):  # RPC 对外的接口,其中处理客户端传来的数据
        try:
            if msg['type'] in {'register', 'heartbeat'}:
                self.store.reg_hb(**msg['payload'])
            elif msg['type'] == 'result':
                self.store.result(msg['payload'])
            logger.info(msg)
            return f"ACK: {msg}"
        except Exception as e:
            logger.error(e)
            return 'Bas Request'

    def add_task(self, msg: dict):
        return self.store.add_task(msg)

    def get_task(self, agent_id):
        return self.store.get_task(agent_id)

    def get_agents(self):
Exemplo n.º 12
0
from utils import getlogger
from .storage import Storage
from .state import *
import uuid

logger = getlogger(__name__, 'd:/mastercm.log')


class ConnectionManager:
    def __init__(self):
        self.store = Storage()

    def handle(self, msg):
        try:
            if msg['type'] in ('reg', 'heartbeat'):
                payload = msg['payload']
                info = {'hostname': payload['hostname'], 'ip': payload['ip']}
                self.store.reg_hb(payload['id'], info)
                return "ack {}".format(msg)

            elif msg['type'] == 'result':
                payload = msg['payload']
                agent_id = payload['agent_id']
                task_id = payload['id']
                state = SUCCEED if payload['output'] == 0 else FAILED
                output = payload['output']

                task = self.store.get_task_by_agentid(task_id)
                t = task.targets[agent_id]
                t.state = state
                t.output = output
Exemplo n.º 13
0
import zerorpc
from .msg import Message
import threading
from utils import getlogger
from .state import *
from .executor import Executor

logger = getlogger(__name__, 'd:/agentcm.log')


class ConnectionManager:
    def __init__(self, master_url, message: Message):
        self.master_url = master_url
        self.message = message  #这是一个对象,才能调实例方法
        self.client = zerorpc.Client()
        self.event = threading.Event()
        self.state = WAITING  #Agent的工作状态
        self.exe = Executor()  #脚本执行

    def start(self, timeout=5):
        try:
            self.event.clear()  #重置event
            self.client.connect(self.master_url)  #连接
            self._send(self.message.reg())  #发送注册消息

            while not self.event.wait(timeout):
                self._send(self.message.heartbeat())
                if self.state == WAITING:  #Agent等待状态主动去Master拉任务
                    self._get_task(self.message.id)

        except Exception as e: