Пример #1
0
def run_command(command, noprint=True):
    if waitGPU is not None:
        waitGPU.wait(nproc=0, interval=1, ngpu=1)
    command = " ".join(command.split())
    if noprint:
        command = "{} > /dev/null".format(command)
    print(command)
    subprocess.Popen(command, stderr=subprocess.STDOUT, stdout=None, shell=True)
Пример #2
0
def run_command(command, on_gpu, noprint):
    if not on_gpu:
        while True:
            time.sleep(1)
            if os.getloadavg()[0] < 4:
                break
    elif waitGPU is not None:
        try:
            waitGPU.wait(nproc=0, interval=1, ngpu=1, gpu_ids=[0, 1, 2, 3])
        except:
            print("Failed to run `waitGPU.wait` --> no automatic scheduling on GPU")
    command = " ".join(command.split())
    if noprint:
        command = "{} > /dev/null".format(command)
    print(command)
    subprocess.Popen(command, stderr=subprocess.STDOUT, stdout=None, shell=True)
Пример #3
0
def run_command(command, on_gpu, noprint):
    if not on_gpu:
        while True:
            time.sleep(1)
            if psutil.getloadavg()[0] < 12:
                break
    elif waitGPU is not None:
        waitGPU.wait(nproc=0, interval=1, ngpu=1, gpu_ids=[0, 1, 2, 3])
    command = " ".join(command.split())
    if noprint:
        command = "{} > /dev/null".format(command)
    print(command)
    subprocess.Popen(command,
                     stderr=subprocess.STDOUT,
                     stdout=None,
                     shell=True)
import waitGPU
waitGPU.wait(utilization=40, available_memory=8000, interval=20)

import torch
import argparse
import os

from generative.gan import GAN
from generative.acgan import ACGAN

def parse_args():
    """parsing and configuration"""
    parser = argparse.ArgumentParser(description="Generative Models for MNIST")

    # for training generative model
    parser.add_argument('--gan-type', type=str, default='ACGAN', help='The type of GAN',
                        choices=['GAN', 'ACGAN'])
    parser.add_argument('--dataset', type=str, default='MNIST', help='The name of dataset')
    parser.add_argument('--mode', type=str, default='evaluate', help='Which function to run',
                        choices=['train', 'evaluate', 'reconstruct'])
    
    parser.add_argument('--epoch', type=int, default=25, help='The number of epochs to run')
    parser.add_argument('--batch-size', type=int, default=64, help='The size of batch')
    parser.add_argument('--input-size', type=int, default=28, help='The size of input image')
    parser.add_argument('--channels', type=int, default=1, help='The number of rgb channels')
    parser.add_argument('--save-dir', type=str, default='generative/models',
                        help='Directory name to save the model')
    parser.add_argument('--result-dir', type=str, default='generative/imgs',
                        help='Directory name to save the generated images')

    parser.add_argument('--lrG', type=float, default=0.0002)
import waitGPU
waitGPU.wait(utilization=20, available_memory=10000, interval=10)

from examples.trainer import *
import examples.problems as pblm
import setproctitle

if __name__ == '__main__':
    args = pblm.argparser(prefix='mnist',
                          method='task_spec_robust',
                          opt='adam',
                          starting_epsilon=0.05,
                          epsilon=0.2)
    kwargs = pblm.args2kwargs(args)
    setproctitle.setproctitle('python')

    # train-validation split
    _, _, test_loader = pblm.mnist_loaders(batch_size=args.batch_size,
                                           path='./data',
                                           ratio=args.ratio,
                                           seed=args.seed)

    model = pblm.mnist_model().cuda()
    num_classes = model[-1].out_features

    # specify the task and the corresponding class semantic
    folder_path = os.path.dirname(args.proctitle)
    if args.type == 'binary':
        input_mat = np.zeros((num_classes, num_classes), dtype=np.int)
        if args.category == 'single_seed':
            seed_clas = 9
Пример #6
0
import waitGPU
waitGPU.wait(utilization=20, interval=60)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable

import torchvision.transforms as transforms
import torchvision.datasets as datasets

import setproctitle
import argparse

import problems as pblm
from trainer import *

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=20)
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--seed', type=int, default=0)
    parser.add_argument("--lr", type=float, default=1e-3)
    parser.add_argument("--epsilon", type=float, default=0.1)
    parser.add_argument("--starting_epsilon", type=float, default=None)
    parser.add_argument('--prefix')
    parser.add_argument('--baseline', action='store_true')
    parser.add_argument('--verbose', type=int, default='1')
    parser.add_argument('--alpha_grad', action='store_true')
    parser.add_argument('--scatter_grad', action='store_true')
    parser.add_argument('--l1_proj', type=int, default=None)
Пример #7
0
import waitGPU

waitGPU.wait(available_memory=9000)
waitGPU.wait(utilization=50)
waitGPU.wait(memory_ratio=0.5)
waitGPU.wait(nproc=0)
waitGPU.wait(gpu_ids=[1, 2], utilization=50)
waitGPU.wait(gpu_ids=[1, 2], utilization=50, ngpu=2)
Пример #8
0
import waitGPU
waitGPU.wait(gpu_ids=[7, 8, 9], nproc=0, interval=120)

import argparse
import json
import os
import numpy as np
import logging

import utilities

from robustness import classifiers
from attacks import attacks
from perturbation_learning import datasets

import torch
from torch import optim
import torch.nn as nn
import torch.nn.functional as F
from torchvision.utils import save_image
from torchvision.transforms import ToTensor
from matplotlib import cm

TRAIN_MODE = 'train'
VAL_MODE = 'val'
TEST_MODE = 'test'


def optimizers(config, parameters):
    if config.training.optimizer == "adam":
        return optim.Adam(parameters,
Пример #9
0
import os
try:
    import waitGPU
    ngpu = int(os.environ['NGPU']) if 'NGPU' in os.environ else 1
    waitGPU.wait(nproc=0, interval=10, ngpu=ngpu, gpu_ids=[2, 3])
except ImportError:
    print('Failed to import waitGPU --> no automatic scheduling on GPU')
    pass
import torch  # import torch *after* waitGPU.wait()


def set_cuda(args):
    args.cuda = args.cuda and torch.cuda.is_available()
    if args.cuda:
        torch.zeros(1).cuda()  # for quick initialization of process on device
Пример #10
0
import os
try:
    import waitGPU
    ngpu = int(os.environ['NGPU']) if 'NGPU' in os.environ else 1
    # waitGPU.wait(nproc=0, interval=10, ngpu=ngpu, gpu_ids=[0, 1, 2, 3])
    waitGPU.wait(nproc=0,
                 interval=10,
                 ngpu=ngpu,
                 gpu_ids=[int(os.environ['CUDA_VISIBLE_DEVICES'])])
except ImportError:
    print('Failed to import waitGPU --> no automatic scheduling on GPU')
    pass
import torch  # import torch *after* waitGPU.wait()


def set_cuda(args):
    print('setting cude...')
    args.cuda = args.cuda and torch.cuda.is_available()
    if args.cuda:
        torch.zeros(1).cuda()  # for quick initialization of process on device
Пример #11
0
# FILENAME for output
params_describe = "_".join([x + "-" + str(y)
                            for x, y in vars(args).items()]) + ".output"

# writing something there if it doesn't exist
# if exists, exiting
if os.path.isfile(params_describe):
    if open(params_describe, 'r').read() != 'Nothing[':
        print('Already exists')
        sys.exit(0)

# writing temporary data
open(params_describe, 'w').write('Nothing[')

# waiting for GPU
waitGPU.wait(nproc=8, interval=10, gpu_ids=[0, 1])

from baselines import *
from saferl import *
from sppo import *

sess = create_modest_session()

if args.agent == 'sppo':
    params = ['epsilon', 'lr_policy', 'lr_value', 'lr_failsafe', 'steps']
    agent = ConstrainedProximalPolicyOptimization
elif args.agent == 'cpo':
    params = ['delta']
    agent = ConstrainedPolicyOptimization
elif args.agent == 'random':
    params = []
Пример #12
0
import os
try:
    import waitGPU
    ngpu = int(os.environ['NGPU']) if 'NGPU' in os.environ else 1
    waitGPU.wait(nproc=0, interval=10, ngpu=ngpu)
except ImportError:
    print('Failed to import waitGPU --> no automatic scheduling on GPU')
    pass
import torch  # import torch *after* waitGPU.wait()


def set_cuda(args):
    args.cuda = args.cuda and torch.cuda.is_available()
    if args.cuda:
        torch.zeros(1).cuda()  # for quick initialization of process on device