示例#1
0
def trim(test=False):
    """
    Implementation of a FIFO log window by trimming logs from X days ago from MongoDB
    by deleting old collections
    """

    # for every log type, delete the collection from a certain amount of days ago (in config)
    for log_type in get_log_types():

        try:
            oldest_date_to_keep = int(get_date_x_days_ago(int(get_config(log_type, 'window'))))
            db = connect_db(log_type, local=test, master=True)

            # grab every collection and if it's older than the log window, drop it
            for collection in db.collection_names():
                collection_date = collection[-8:]
                try:
                    if int(collection_date) < int(oldest_date_to_keep):
                        db.drop_collection(collection)
                except:
                    continue

        except Exception as e:
            print str(e)
            continue
def main():
    parser = argparse.ArgumentParser(description='Load Genotypes')
    parser.add_argument('-c', '--config')
    opts = parser.parse_args()
    config = opts.config
    # config
    config = utilities.get_config(config)
    print("config:")
    for item in config.items('config'):
        print("\t", str(item))
    parse_genofile(fetch_parameters(config))
示例#3
0
def drop(user, password):
    """
    Drop every database and re-add net user account (CAREFUL BEFORE RUNNING)
    """

    connection = Connection(get_config('database', 'master_host'), int(get_config('database', 'port')))
    db = database.Database(connection, 'admin')
    db.authenticate(user, password)

    for log_type in get_log_types():
        try:

            db = database.Database(connection, log_type)

            print "dropping " + log_type
            connection.drop_database(log_type)

            # re-add net user account
            db.add_user(get_config('database', 'user'), get_config('database', 'password'))

        except Exception as e:
            print str(e)
            continue
示例#4
0
def main(argv):
    # config
    config = utilities.get_config(argv[1])
    print "config:"
    for item in config.items('config'):
        print "\t%s" % (str(item))
    # var
    inbredsetid = config.get('config', 'inbredsetid')
    print "inbredsetid: %s" % inbredsetid
    species = datastructure.get_species(inbredsetid)
    speciesid = species[0]
    print "speciesid: %s" % speciesid
    # datafile
    datafile = open(config.get('config', 'datafile'), 'r')
    data = csv.reader(datafile, delimiter ="\t", quotechar='"')
    strainnames = data.next()
    print "strainnames: %s\n\t%s" % (len(strainnames), strainnames)
    strains = datastructure.get_strains_bynames(inbredsetid=inbredsetid, strainnames=strainnames, updatestrainxref="yes")
    print("strain: %s\n\t%s" % (len(strains), strains))
    datafile.close()
def main(argv):
    # config
    config = utilities.get_config(argv[1])
    print "config:"
    for item in config.items('config'):
        print "\t%s" % (str(item))
    # var
    print "variable:"
    inbredsetid = config.get('config', 'inbredsetid')
    print "\tinbredsetid: %s" % inbredsetid
    # datafile
    datafile = open(config.get('config', 'datafile'), 'r')
    datafile = csv.reader(datafile, delimiter='\t', quotechar='"')
    datafile.next()
    delrowcount = 0
    for row in datafile:
        if len(row) == 0:
            continue
        genoname = row[0]
        delrowcount += genotypes.delete(genoname, inbredsetid)
    print "deleted %d genotypes" % (delrowcount)
def main(argv):
    # config
    config = utilities.get_config(argv[1])
    print "config:"
    for item in config.items('config'):
        print "\t%s" % (str(item))
    # var
    print "variable:"
    inbredsetid = config.get('config', 'inbredsetid')
    print "\tinbredsetid: %s" % inbredsetid
    # datafile
    datafile = open(config.get('config', 'datafile'), 'r')
    datafile = csv.reader(datafile, delimiter='\t', quotechar='"')
    delrowcount = 0
    for row in datafile:
        if len(row) == 0:
            continue
        try:
            publishxrefid = int(row[0])
        except:
            continue
        delrowcount += phenotypes.delete(publishxrefid=publishxrefid, inbredsetid=inbredsetid)
    print "deleted %d phenotypes" % (delrowcount)
import logging
import time
import traceback
from utilities import get_config
from utilities import tweet
from utilities import get_random_tweet_quote
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler


IMAGE_PATH = '/Users/ryankanno/Projects/Makerfaire/let-it-rain-images/'
IMAGE_PROCESSED_PATH = '/Users/ryankanno/Projects/Makerfaire/let-it-rain-images-processed/'

LOG_FORMAT = '%(asctime)s %(levelname)s %(message)s'

CONFIG = get_config('letitrain.ini')


class ImageHandler(PatternMatchingEventHandler):

    patterns = ["*.jpg", "*.JPG"]

    def process(self, event):
        """
        event.event_type
            'modified' | 'created' | 'moved' | 'deleted'
        event.is_directory
            True | False
        event.src_path
            path/to/observed/file
        """
示例#8
0
    latest = options.get("latest", None)
    if earliest and earliest.isdigit():
        start = datetime.datetime.fromtimestamp(int(earliest))
        kwords['start'] = start.strftime("%Y-%m-%d")
    if latest and (latest.isdigit() or latest == 'now'):
        if latest == 'now':
            end = datetime.datetime.now()
        else:
            end = datetime.datetime.fromtimestamp(int(latest))
        kwords['end'] = end.strftime("%Y-%m-%d")
    kwords['query'] = query_value

    logger.info("Query target: %s" % query_value)
    logger.debug("Raw options: %s" % str(options))

    configuration = get_config("passivetotal", "api-setup")
    username = configuration.get('username', None)
    api_key = configuration.get('apikey', None)

    output_events = list()
    pdns = DnsRequest(username, api_key, headers=build_headers()).get_passive_dns(**kwords)
    if 'error' in pdns:
        raise Exception("Whoa there, looks like you reached your quota for today! Please come back tomorrow to resume your investigation or contact support for details on enterprise plans.")
    for result in pdns.get("results", []):
        result = remove_keys(result, ['value', 'recordHash', 'collected'])
        result['count'] = pdns.get('totalRecords', 0)
        output_events.append(result)
    splunk.Intersplunk.outputResults(output_events)

except Exception, e:
    stack = traceback.format_exc()
from utilities import get_config
from utilities import tweet
from utilities import get_random_tweet_quote
from images2gif import writeGif
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from watchdog.events import LoggingEventHandler
import argparse


IMAGE_PATH = '/Users/ryankanno/Projects/Makerfaire/i-like-to-move-it-images/'
IMAGE_PROCESSED_PATH = '/Users/ryankanno/Projects/Makerfaire/i-like-to-move-it-images-processed/'

LOG_FORMAT = '%(asctime)s %(levelname)s %(message)s'

CONFIG = get_config('iliketomoveit.ini')


class ImageHandler(PatternMatchingEventHandler):

    patterns=["*DONE*"]

    def process(self, event):
        """
        event.event_type
            'modified' | 'created' | 'moved' | 'deleted'
        event.is_directory
            True | False
        event.src_path
            path/to/observed/file
        """
示例#10
0
def main(argv):
    config = utilities.get_config(argv[1])
    print("config file:")
    for item in config.items('config'):
        print("\t", str(item))
    parse_genofile(config, fetch_parameters(config))
示例#11
0
def main(argv):
    # config
    config = utilities.get_config(argv[1])
    print "config:"
    for item in config.items('config'):
        print "\t%s" % (str(item))
    # var
    inbredsetid = config.get('config', 'inbredsetid')
    print "inbredsetid: %s" % inbredsetid
    species = datastructure.get_species(inbredsetid)
    speciesid = species[0]
    print "speciesid: %s" % speciesid
    dataid = datastructure.get_nextdataid_phenotype()
    print "next data id: %s" % dataid
    cursor, con = utilities.get_cursor()
    # datafile
    datafile = open(config.get('config', 'datafile'), 'r')
    phenotypedata = csv.reader(datafile, delimiter='\t', quotechar='"')
    phenotypedata_head = phenotypedata.next()
    print("phenotypedata head [%d]:\n\t%s" % (len(phenotypedata_head), phenotypedata_head))
    strainnames = phenotypedata_head[1:]
    strains = datastructure.get_strains_bynames(inbredsetid=inbredsetid, strainnames=strainnames, updatestrainxref="yes")
    # metafile
    metafile = open(config.get('config', 'metafile'), 'r')
    phenotypemeta = csv.reader(metafile, delimiter='\t', quotechar='"')
    phenotypemeta_head = phenotypemeta.next()
    print("phenotypemeta head [%d]:\n\t%s" % (len(phenotypemeta_head), phenotypemeta_head))
    print
    # load
    for metarow in phenotypemeta:
        #
        datarow_value = phenotypedata.next()
        datarow_se = phenotypedata.next()
        datarow_n = phenotypedata.next()
        # Phenotype
        sql = """
            INSERT INTO Phenotype
            SET
            Phenotype.`Pre_publication_description`=%s,
            Phenotype.`Post_publication_description`=%s,
            Phenotype.`Original_description`=%s,
            Phenotype.`Pre_publication_abbreviation`=%s,
            Phenotype.`Post_publication_abbreviation`=%s,
            Phenotype.`Lab_code`=%s,
            Phenotype.`Submitter`=%s,
            Phenotype.`Owner`=%s,
            Phenotype.`Authorized_Users`=%s,
            Phenotype.`Units`=%s
            """
        cursor.execute(sql, (
            utilities.to_db_string(metarow[1], None),
            utilities.to_db_string(metarow[2], None),
            utilities.to_db_string(metarow[3], None),
            utilities.to_db_string(metarow[4], None),
            utilities.to_db_string(metarow[5], None),
            utilities.to_db_string(metarow[6], None),
            utilities.to_db_string(metarow[7], None),
            utilities.to_db_string(metarow[8], None),
            utilities.to_db_string(metarow[9], ""),
            utilities.to_db_string(metarow[18], ""),
            ))
        rowcount = cursor.rowcount
        phenotypeid = con.insert_id()
        print "INSERT INTO Phenotype: %d record: %d" % (rowcount, phenotypeid)
        # Publication
        publicationid = None # reset
        pubmed_id = utilities.to_db_string(metarow[0], None)
        if pubmed_id:
            sql = """
                SELECT Publication.`Id`
                FROM Publication
                WHERE Publication.`PubMed_ID`=%s
                """
            cursor.execute(sql, (pubmed_id))
            re = cursor.fetchone()
            if re:
                publicationid = re[0]
                print "get Publication record: %d" % publicationid
        if not publicationid:
            sql = """
                INSERT INTO Publication
                SET
                Publication.`PubMed_ID`=%s,
                Publication.`Abstract`=%s,
                Publication.`Authors`=%s,
                Publication.`Title`=%s,
                Publication.`Journal`=%s,
                Publication.`Volume`=%s,
                Publication.`Pages`=%s,
                Publication.`Month`=%s,
                Publication.`Year`=%s
                """
            cursor.execute(sql, (
                utilities.to_db_string(metarow[0], None),
                utilities.to_db_string(metarow[12], None),
                utilities.to_db_string(metarow[10], ""),
                utilities.to_db_string(metarow[11], None),
                utilities.to_db_string(metarow[13], None),
                utilities.to_db_string(metarow[14], None),
                utilities.to_db_string(metarow[15], None),
                utilities.to_db_string(metarow[16], None),
                utilities.to_db_string(metarow[17], ""),
                ))
            rowcount = cursor.rowcount
            publicationid = con.insert_id()
            print "INSERT INTO Publication: %d record: %d" % (rowcount, publicationid)
        # data
        for index, strain in enumerate(strains):
            #
            strainid = strain[0]
            value   = utilities.to_db_float(datarow_value[index+1], None)
            se      = utilities.to_db_float(datarow_se[index+1], None)
            n       = utilities.to_db_int(datarow_n[index+1], None)
            #
            if value is not None:
                sql = """
                    INSERT INTO PublishData
                    SET
                    PublishData.`Id`=%s,
                    PublishData.`StrainId`=%s,
                    PublishData.`value`=%s
                    """
                cursor.execute(sql, (dataid, strainid, value))
            if se is not None:
                sql = """
                    INSERT INTO PublishSE
                    SET
                    PublishSE.`DataId`=%s,
                    PublishSE.`StrainId`=%s,
                    PublishSE.`error`=%s
                    """
                cursor.execute(sql, (dataid, strainid, se))
            if n is not None:
                sql = """
                    INSERT INTO NStrain
                    SET
                    NStrain.`DataId`=%s,
                    NStrain.`StrainId`=%s,
                    NStrain.`count`=%s
                    """
                cursor.execute(sql, (dataid, strainid, n))
        # PublishXRef
        sql = """
            INSERT INTO PublishXRef
            SET
            PublishXRef.`InbredSetId`=%s,
            PublishXRef.`PhenotypeId`=%s,
            PublishXRef.`PublicationId`=%s,
            PublishXRef.`DataId`=%s,
            PublishXRef.`comments`=%s
            """
        cursor.execute(sql, (inbredsetid, phenotypeid, publicationid, dataid, ""))
        rowcount = cursor.rowcount
        publishxrefid = con.insert_id()
        print "INSERT INTO PublishXRef: %d record: %d" % (rowcount, publishxrefid)
        # for loop next
        dataid += 1
        print
    # release
    con.close()
示例#12
0
                nat_dict[model.is_training] = True
                sess.run(train_step, feed_dict=nat_dict)
            end = timer()
            training_time += end - start


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='Train script options',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-c',
                        '--config',
                        type=str,
                        help='path to config file',
                        default='config.json',
                        required=False)
    args = parser.parse_args()

    config_dict = utilities.get_config(args.config)

    model_dir = config_dict['model']['output_dir']
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    # keep the configuration file with the model for reproducibility
    with open(os.path.join(model_dir, 'config.json'), 'w') as f:
        json.dump(config_dict, f, sort_keys=True, indent=4)

    config = utilities.config_to_namedtuple(config_dict)
    train(config)
示例#13
0
# Constants
DATA = 'CIFAR' # Choices: ['CIFAR', 'ImageNet']
BATCH_SIZE = 10
NUM_WORKERS = 8
NOISE_SCALE = 20
NUM_ACTIVATIONS = 3
K = 5
VIS_CORRECT = False


DATA_SHAPE = 32 if DATA == 'CIFAR' else 224 # Image size (fixed for dataset)
REPRESENTATION_SIZE = 2048 # Size of representation vector (fixed for model)
#CLASSES = CLASS_DICT[DATA] # Class names for dataset


config = utilities.config_to_namedtuple(utilities.get_config('config_traincifar.json'))
model_dir = config.model.output_dir
if not os.path.exists(model_dir):
  os.makedirs(model_dir)
device = torch.device('cuda')

# Setting up training parameters
max_num_training_steps = config.training.max_num_training_steps
step_size_schedule = config.training.step_size_schedule
weight_decay = config.training.weight_decay
momentum = config.training.momentum
batch_size = config.training.batch_size
eval_during_training = config.training.eval_during_training
num_clean_examples = config.training.num_examples
if eval_during_training:
    num_eval_steps = config.training.num_eval_steps
示例#14
0
    latest = options.get("latest", None)
    if earliest and earliest.isdigit():
        start = datetime.datetime.fromtimestamp(int(earliest))
        kwords['start'] = start.strftime("%Y-%m-%d")
    if latest and (latest.isdigit() or latest == 'now'):
        if latest == 'now':
            end = datetime.datetime.now()
        else:
            end = datetime.datetime.fromtimestamp(int(latest))
        kwords['end'] = end.strftime("%Y-%m-%d")
    kwords['query'] = query_value

    logger.info("Query target: %s" % query_value)
    logger.debug("Raw options: %s" % str(options))

    configuration = get_config("passivetotal", "api-setup")
    username = configuration.get('username', None)
    api_key = configuration.get('apikey', None)

    output_events = list()
    pdns = DnsRequest(username, api_key,
                      headers=build_headers()).get_passive_dns(**kwords)
    if 'error' in pdns:
        raise Exception(
            "Whoa there, looks like you reached your quota for today! Please come back tomorrow to resume your investigation or contact support for details on enterprise plans."
        )
    for result in pdns.get("results", []):
        result = remove_keys(result, ['value', 'recordHash', 'collected'])
        result['count'] = pdns.get('totalRecords', 0)
        output_events.append(result)
    splunk.Intersplunk.outputResults(output_events)
示例#15
0
from model import Model, MLP, DeepMLP
from pgd_attack import LinfPGDAttack
import utilities

parser = argparse.ArgumentParser(
    description='Train script options',
    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c',
                    '--config',
                    type=str,
                    help='path to config file',
                    default='config.json',
                    required=False)
args = parser.parse_args()

config = utilities.get_config(args.config)

# Setting up training parameters
tf.set_random_seed(config['random_seed'])

max_num_training_steps = config['max_num_training_steps']
num_output_steps = config['num_output_steps']
num_summary_steps = config['num_summary_steps']
num_checkpoint_steps = config['num_checkpoint_steps']
should_compress_fc_first = config['compress_first_fc']
should_compress_fc_second = config['compress_second_fc']
batch_size = config['training_batch_size']
c_eps = config['c_eps']
nu = config['nu']
compression_k = np.log(1.0 / nu) / (np.square(c_eps))
示例#16
0
def train(config='configs/cifar10_config_stn.json',
          save_root_path='/cluster/work/math/fanyang-broglil/CoreRepo',
          worstofk=None,
          attack_style=None,
          attack_limits=None,
          lambda_core=None,
          num_grouped_ids=None,
          num_ids = None,
          group_size=None,
          use_core=None,
          seed=None,
          this_repo=None):

    config_dict = utilities.get_config(config)
    config_dict_copy = copy.deepcopy(config_dict)
    # model_dir = config_dict['model']['output_dir']
    # if not os.path.exists(model_dir):
    #   os.makedirs(model_dir)

    # # keep the configuration file with the model for reproducibility
    # with open(os.path.join(model_dir, 'config.json'), 'w') as f:
    #     json.dump(config_dict, f, sort_keys=True, indent=4)

    config = utilities.config_to_namedtuple(config_dict)

    # seeding randomness
    if seed == None:
        seed = config.training.tf_random_seed
    else:
        config_dict_copy['training']['tf_random_seed'] = seed
    tf.set_random_seed(seed)
    np.random.seed(seed)

    # Setting up training parameters
    max_num_epochs = config.training.max_num_epochs
    step_size_schedule = config.training.step_size_schedule
    weight_decay = config.training.weight_decay
    momentum = config.training.momentum
    num_ids = config.training.num_ids # number of IDs per minibatch

    if group_size == None:
        group_size = config.training.group_size
    else:
        config_dict_copy['training']['group_size'] = group_size
    if num_grouped_ids == None:
        num_grouped_ids = config.training.num_grouped_ids
    else:
        config_dict_copy['training']['num_grouped_ids'] = num_grouped_ids
    if num_ids == None:
        num_ids = config.training.num_ids
    else:
        config_dict_copy['training']['num_ids'] = num_ids
    if lambda_core == None:
        lambda_core = config.training.lambda_
    else:
        config_dict_copy['training']['lambda_'] = lambda_core
    if use_core == None:
        use_core = config.training.use_core
    else:
        config_dict_copy['training']['use_core'] = use_core

    adversarial_training = config.training.adversarial_training
    eval_during_training = config.training.eval_during_training
    if eval_during_training:
        num_eval_steps = config.training.num_eval_steps

    # Setting up output parameters
    num_summary_steps = config.training.num_summary_steps
    num_checkpoint_steps = config.training.num_checkpoint_steps
    num_easyeval_steps = config.training.num_easyeval_steps

    # mini batch size per iteration
    # ToDo: need to make this support variable number of num_grouped_ids
    batch_size = num_ids + num_grouped_ids

    # Setting up model and loss
    model_family = config.model.model_family
    with_transformer = config.model.transformer
    translation_model =config.model.translation_model
    if model_family == "resnet":
        if with_transformer == True:
            if translation_model == "fc":
                model = stn_resnet.Model(config.model)
                print("Using stn_resnet")
            if translation_model == "conv":
                model = stn_resnet_conv.Model(config.model)
                print("Using stn_resnet_conv")
        else:
            model = resnet.Model(config.model)
    else:
        print("Model family does not exist")
        exit()
    if use_core:
      total_loss = model.mean_xent + weight_decay * model.weight_decay_loss + lambda_core * model.core_loss2
    else:
      total_loss = model.mean_xent + weight_decay * model.weight_decay_loss


     # Setting up the data and the model
    data_path = config.data.data_path
    
    if config.data.dataset_name == "cifar-10":
        raw_cifar = cifar10_input.CIFAR10Data(data_path)
    elif config.data.dataset_name == "cifar-100":
        raw_cifar = cifar100_input.CIFAR100Data(data_path)
    else:
        raise ValueError("Unknown dataset name.")


    # uncomment to get a list of trainable variables
    # model_vars = tf.trainable_variables()
    # slim.model_analyzer.analyze_vars(model_vars, print_info=True)

    # Setting up the optimizer
    boundaries = [int(sss[0]) for sss in step_size_schedule]
    boundaries = boundaries[1:]
    values = [sss[1] for sss in step_size_schedule]

    global_step = tf.train.get_or_create_global_step()
    learning_rate = tf.train.piecewise_constant(
        tf.cast(global_step, tf.int32),
        boundaries,
        values)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
    #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
    #                                   name="Adam")
    train_step = optimizer.minimize( total_loss, global_step=global_step)

    # Set up adversary
    if worstofk == None:
        worstofk = config.attack.random_tries
    else:
        config_dict_copy['attack']['random_tries'] = worstofk
    # @ Luzius: incorporate being able to choose multiple transformations
    if attack_style == None:
        attack_style = 'rotate'

    # Training attack
    attack = SpatialAttack(model, config.attack, 'random', worstofk, attack_limits)
    # Different eval attacks
    # Same attack as worstofk
    # @ Luzius: currently the names are not clear/consistent since I wasn't sure if we actually want random or not since you originally had your attack like that but I feel like it should rather be worstofk?
    # attack_eval_adv = SpatialAttack(model, config.attack, 'random', worstofk, attack_limits)
    attack_eval_random = SpatialAttack(model, config.attack, 'random', 1, attack_limits)
    # Grid attack
    attack_eval_grid = SpatialAttack(model, config.attack, 'grid', None, attack_limits)

    # ------------------START EXPERIMENT -------------------------
    # Initialize the Repo
    print("==> Creating repo..")
    # Create repo object if it wasn't passed, comment out if repo has issues
    if this_repo == None:
        this_repo = exprepo.ExperimentRepo(root_dir=save_root_path)

    # Create new experiment
    if this_repo != None:
        exp_id = this_repo.create_new_experiment('cifar-10',
                                                 model_family,
                                                 worstofk,
                                                 attack_style,
                                                 attack_limits,
                                                 lambda_core,
                                                 num_grouped_ids,
                                                 group_size,
                                                 config_dict_copy)

    # Setting up the Tensorboard and checkpoint outputs
    model_dir = '%s/logdir/%s' % (save_root_path, exp_id)
    os.makedirs(model_dir, exist_ok=True)
    # We add accuracy and xent twice so we can easily make three types of
    # comparisons in Tensorboard:
    # - train vs eval (for a single run)
    # - train of different runs
    # - eval of different runs

    saver = tf.train.Saver(max_to_keep=3)

    tf.summary.scalar('accuracy_nat_train', model.accuracy, collections=['nat'])
    tf.summary.scalar('accuracy_nat', model.accuracy, collections = ['nat'])
    tf.summary.scalar('xent_nat_train', model.xent / batch_size,
                                                        collections=['nat'])
    tf.summary.scalar('xent_nat', model.xent / batch_size, collections=['nat'])
    tf.summary.image('images_nat_train', model.x_image, collections=['nat'])
    tf.summary.scalar('learning_rate', learning_rate, collections=['nat'])
    tf.summary.scalar('regression_loss', model.reg_loss, collections=['nat'])
    nat_summaries = tf.summary.merge_all('nat')

    #dataAugmentation
    x_input_placeholder = tf.placeholder(tf.float32,
                                                  shape=[None, 32, 32, 3])
    flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img),
                            x_input_placeholder)

    tot_samp = raw_cifar.train_data.n
    max_num_iterations = int(np.floor((tot_samp/num_ids)*max_num_epochs))
    print("Total # of samples is: %d; This exp. will run %d iterations" % (tot_samp, max_num_iterations))

    # Compute the (epoch) gaps between summary, worstof1eval, checkpoints should happen
    summary_gap = int(np.floor(max_num_epochs/num_summary_steps))
    easyeval_gap = int(np.floor(max_num_epochs/num_easyeval_steps))
    checkpoint_gap = int(np.floor(max_num_epochs/num_checkpoint_steps))

    with tf.Session() as sess:


        # initialize data augmentation
        if config.training.data_augmentation:
            if config.data.dataset_name == "cifar-10":
                cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess)
            elif config.data.dataset_name == "cifar-100":
                cifar = cifar100_input.AugmentedCIFAR100Data(raw_cifar, sess)
            else:
                raise ValueError("Unknown dataset name.")
        else:
            cifar = raw_cifar

            
        cifar_eval_dict = {model.x_input: cifar.eval_data.xs,
                           model.y_input: cifar.eval_data.ys,
                           model.group:  np.arange(0, batch_size, 1, dtype="int32"),
                           model.transform: np.zeros([cifar.eval_data.n, 3]),
                           model.is_training: False}


        # Initialize the summary writer, global variables, and our time counter.
        summary_writer = tf.summary.FileWriter(model_dir, sess.graph)
        #if eval_during_training:
        eval_dir = os.path.join(model_dir, 'eval')
        os.makedirs(eval_dir, exist_ok=True)
        eval_summary_writer = tf.summary.FileWriter(eval_dir)

        sess.run(tf.global_variables_initializer())
        
        training_time = 0.0

        ####################################
        # Main training loop
        ####################################
        # Initialize cache variables
        start_time = time.time()
        start_epoch = timer()
        it_count = 0
        epoch_count = 0
        acc_sum = 0
        it_summary = 0
        it_easyeval = 0
        it_ckpt = 0
        adv_time = 0
        train_time = 0

        for ii in range(max_num_iterations+1):
            x_batch, y_batch, epoch_done = cifar.train_data.get_next_batch(num_ids, multiple_passes=True)


            noop_trans = np.zeros([len(x_batch), 3])
            x_batch_nat = x_batch
            y_batch_nat = y_batch
            id_batch = np.arange(0, num_ids, 1, dtype="int32")
            if use_core:
                # Create rotated examples
                start = timer()
                ids = np.arange(0,num_grouped_ids,1,dtype="int32")

                for i in range(config.training.group_size):
                     
                   if config.training.data_augmentation_core:
                       x_batch_core = sess.run(flipped,feed_dict={x_input_placeholder: x_batch[0:num_grouped_ids,:,:,:]})
                   else:
                       x_batch_core = x_batch[0:num_grouped_ids,:,:,:]

                   x_batch_group, trans_group = attack.perturb(x_batch_core, y_batch[0:num_grouped_ids], sess)

                   #construct new batches including rotated examples
                   x_batch_nat = np.concatenate((x_batch_nat, x_batch_group), axis=0)
                   y_batch_nat = np.concatenate((y_batch_nat, y_batch), axis=0)
                   noop_trans = np.concatenate((noop_trans, trans_group), axis=0)
                   id_batch = np.concatenate((id_batch, ids), axis=0)

                end = timer()
                training_time += end - start
                adv_time +=  end - start

            else:

                if adversarial_training:
                    start = timer()
                    x_batch_nat, noop_trans = attack.perturb(x_batch, y_batch, sess)
                    end = timer()
                    adv_time +=  end - start

                else:
                    x_batch_nat, noop_trans = x_batch, noop_trans
                    
            nat_dict = {model.x_input: x_batch_nat,
                        model.y_input: y_batch_nat,
                        model.group: id_batch,
                        model.transform: noop_trans,
                        model.is_training: False}

            ################# Outputting/saving weights and evaluations ###############

            nat_acc = -1.0
            acc_grid = -1.0
            avg_xent_grid = -1.0
            saved_weights = 0

            # Compute training accuracy on this minibatch
            train_nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)
            # Output to stdout
            if epoch_done:
                epoch_time = timer() - start_epoch
                # Average 
                av_acc = acc_sum/it_count

                # ToDo: Log this to file as well 
                
                # Training accuracy over epoch
                print('Epoch {}:    ({})'.format(epoch_count, datetime.now()))
                print('    training natural accuracy {:.4}%'.format(av_acc * 100))
                print('    {:.4} seconds per epoch'.format(epoch_time))

                # Accuracy on entire test set
                test_nat_acc = sess.run(model.accuracy, feed_dict=cifar_eval_dict)

                print('    test set natural accuracy {:.4}%'.format(test_nat_acc * 100))
                # print('    {:.4} seconds for test evaluation'.format(test_time))



                print("example TIME")
                print(adv_time)
                print("train TIME")
                print(train_time)

                ########### Things to do every xxx epochs #############
                # Check if worstof1 eval should be run
                if it_summary == summary_gap - 1 or epoch_count == max_num_epochs - 1:
                    summary = sess.run(nat_summaries, feed_dict=nat_dict)
                    summary_writer.add_summary(summary, global_step.eval(sess))
                    it_summary = 0
                else:
                    it_summary += 1

                if it_easyeval == easyeval_gap - 1 or epoch_count == max_num_epochs - 1:
                    # Evaluation on adv and natural
                    [acc_nat, acc_adv, avg_xent_nat, avg_xent_adv] =  evaluate(model, attack_eval_random, sess, config, "random", data_path, None)
                    # Save in checkpoint
                    chkpt_id = this_repo.create_training_checkpoint(
                        exp_id, training_step=ii, 
                        epoch=epoch_count, 
                        train_acc_nat=nat_acc,
                        test_acc_adv=acc_adv, test_acc_nat=acc_nat,
                        test_loss_adv=avg_xent_adv, 
                        test_loss_nat=avg_xent_nat)

                    it_easyeval = 0
                else:
                    it_easyeval += 1
                    
                startt = timer()
                if it_ckpt == checkpoint_gap - 1 or epoch_count == max_num_epochs - 1:
                    # Create checkpoint id if non-existent
                    if not chkpt_id :
                        chkpt_id = this_repo.create_training_checkpoint(
                            exp_id, training_step=ii, 
                            epoch=epoch_count, 
                            train_acc_nat=train_nat_acc,
                            test_acc_nat=test_nat_acc)

                    # Save checkpoint data (weights)
                    saver.save(sess,
                               os.path.join(model_dir, '{}_checkpoint'.format(chkpt_id)))
                    print(' chkpt saving took {:.4}s '.format(timer()-startt))
                    it_ckpt = 0
                else:
                    it_ckpt += 1
                
                # Set loss sum, it count back to zero
                acc_sum = train_nat_acc
                epoch_done = 0
                epoch_count += 1 
                start_epoch = timer()
                it_count = 1

            else:
                it_count += 1
                acc_sum += train_nat_acc


            # Actual training step
            start = timer()        
            nat_dict[model.is_training] = True
            sess.run(train_step, feed_dict=nat_dict)
            training_time += timer() - start
            train_time += timer() - start


        runtime = time.time() - start_time

        # Do all evaluations in the last step - on grid
        [_, acc_grid, _, avg_xent_grid] = evaluate(model, attack_eval_grid, sess, config, "grid", data_path, eval_summary_writer)

        
        this_repo.mark_experiment_as_completed(
            exp_id, train_acc_nat=nat_acc,
            test_acc_adv=acc_adv, test_acc_nat=acc_nat, 
            test_acc_grid=acc_grid, runtime=runtime)

    return 0