Example #1
0
File: knox.py Project: wwlytton/AP
axondelay, narrowdiam, widediam = 0, 5, 10

it2l = ['it2WT', 'it2C456S', 'it2R788C', 'it2',
        'itrecustom']  # it2 is RE, it is TC channel
# RERE    RETCa  RETCb  TCRE  PYPY  PYIN  INPYa    INPYb  PYRE  PYTC   TCPY  TCIN
synparams = OD({
    'synaptic weights J neurophys':
    (0.20, 0.02, 0.04, 0.2, 0.6, 0.2, 0.1500, 0.03, 1.2, 0.01, 1.2, 0.4),
    '75% IN->PY weight (0.1125)':
    (0.20, 0.02, 0.04, 0.2, 0.6, 0.2, 0.1125, 0.03, 1.2, 0.01, 1.2, 0.4),
    '50% IN->PY weight (0.075)':
    (0.20, 0.02, 0.04, 0.2, 0.6, 0.2, 0.0750, 0.03, 1.2, 0.01, 1.2, 0.4),
    '40% IN->PY weight (0.06)':
    (0.20, 0.02, 0.04, 0.2, 0.6, 0.2, 0.0600, 0.03, 1.2, 0.01, 1.2, 0.4),
    '25% IN->PY weight (0.0375)': (0.20, 0.02, 0.04, 0.2, 0.6, 0.2, 0.0375,
                                   0.03, 1.2, 0.01, 1.2, 0.4),
    '10% IN->PY weight (0.015)': (0.20, 0.02, 0.04, 0.2, 0.6, 0.2, 0.0150,
                                  0.03, 1.2, 0.01, 1.2, 0.4),
    '0% IN->PY A weight': (0.20, 0.02, 0.04, 0.2, 0.6, 0.2, 0.0000, 0.03, 1.2,
                           0.01, 1.2, 0.4),
    'orig IN->PY A weight': (0.20, 0.02, 0.04, 0.2, 0.6, 0.2, 0.1500, 0.03,
                             1.2, 0.01, 1.2, 0.4),
    '10% IN->PY A weight better RERE for new synapses':
    (0.12, 0.02, 0.04, 0.2, 0.6, 0.2, 0.0000, 0.03, 1.2, 0.01, 1.2, 0.4),
    '0% RERE and RETC': (0.00, 0.00, 0.04, 0.2, 0.6, 0.2, 0.1500, 0.03, 1.2,
                         0.01, 1.2, 0.4)
})
# gababapercent, gababpercent were both == 1


def barname(mech='it'):
    '''return the name of a gbar (max conductance) for a given mechanism name'''
Example #2
0
def get_menu(dev):
    return OD([('Control', control_cb)])
from collections import OrderedDict as OD

# file generated at 2019-06-10 20:17:15 with the following command:
# create_dictionary.py -m python/samples/metaDict_2017_hh_sync.py -p /hdfs/local/karl/sync_ntuples/2017/nanoAODproduction/2019Jun10 -N samples_2017 -E 2017 -o python/samples -g hhAnalyzeSamples_2017_nanoAOD_sync.py -M

samples_2017 = OD()
samples_2017[
    "/GluGluToRadionToHHTo2B2VTo2L2Nu_M-750_narrow_13TeV-madgraph_correctedcfg/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM"] = OD(
        [
            ("type", "mc"),
            ("sample_category", "signal"),
            ("process_name_specific", "signal_ggf_spin0_750_hh_2b2v"),
            ("nof_files", 1),
            ("nof_db_files", 11),
            ("nof_events", {}),
            ("nof_tree_events", 52000),
            ("nof_db_events", 200000),
            ("fsize_local", 127111653),  # 127.11MB, avg file size 127.11MB
            ("fsize_db", 11931037531),  # 11.93GB, avg file size 1.08GB
            ("use_it", True),
            ("xsection", 0.026422),
            ("genWeight", True),
            ("triggers", [
                '1e', '1mu', '2e', '2mu', '1e1mu', '3e', '3mu', '2e1mu',
                '1e2mu', '1e1tau', '1mu1tau', '2tau'
            ]),
            ("has_LHE", True),
            ("LHE_set",
             "LHA IDs 306000 - 306102 -> NNPDF31_nnlo_hessian_pdfas PDF set, expecting 103 weights (counted 103 weights)"
             ),
            ("nof_reweighting", 0),
Example #4
0
        locale_folder = path.abspath(locale_folder)
        xml = ET.parse('{0}/lang_{1}.xml'.format(locale_folder, lang))

        # Looping and gathering texts from the xml file
        for elem in xml.getroot().getiterator():
            dico_texts[elem.tag] = elem.text

        # end of fonction
        return dico_texts


###############################################################################
###### Stand alone program ########
###################################

if __name__ == '__main__':
    """ standalone execution """
    # ordered dictionay to store texts
    blabla = OD()

    # get the app aobject
    app = TextsManager()

    # get texts
    app.load_texts(dico_texts=blabla,
                   lang='EN',
                   locale_folder=r'../../data/locale')

    # return texts
    print(blabla)
Example #5
0
def get_menu(dev):
    return OD([('Monitor', monitor_cb)])
Example #6
0
def main():
    dir_photos = "./data/flickr8k/Flicker8k_photos/"
    file_annot = "./data/flickr8k/Flickr8k_text/Flickr8k.token.txt"

    jpg_files = ds.images_info(dir_photos)
    ann_dframe = ds.annots_info(file_annot, df=True)
    print(
        "Dataset overview\n-------------------------------------------------------------------------------------------------------------\n"
    )
    print(ann_dframe)
    print(
        "\n-------------------------------------------------------------------------------------------------------------\n"
    )

    ## Prepare captions
    print("Preparing caption data for images")
    word_count = ds.word_freq(ann_dframe)
    # print(word_count)

    ## Clean text
    print("Cleaning text ... ", end="")
    for i, cpt in enumerate(ann_dframe.caption.values):
        ann_dframe["caption"].iloc[i] = ds.clean_text(cpt)
    print("done.")
    print(ann_dframe)
    word_count = ds.word_freq(ann_dframe)
    # print(word_count)

    ## Add start and end sequence token
    ann_dframe_orig = copy(ann_dframe)
    ann_dfrm = ds.add_start_end_tokens(ann_dframe)
    print(ann_dfrm)

    vgg_net = vis.models.vgg16(pretrained="imagenet", progress=True)
    for p in vgg_net.parameters():
        p.requires_grad = False
    ## Load model parameters from path
    # vgg_net.load_state_dict(torch.load('./models/vgg16-397923af.pth'))
    ## Features in the last layer
    num_ftrs = vgg_net.classifier[-1].in_features
    print(num_ftrs)
    print(vgg_net)
    ## Remove the last classifier layer: Softmax, ReLU, Dropout
    vgg_net.classifier = vgg_net.classifier[:-1]
    # ## Net architecture
    # summary(vgg_net, input_size=(3, 224, 224))
    print(vgg_net)
    # ## Features in the last layer
    # num_ftrs = vgg_net.classifier[-1].in_features
    # print(num_ftrs)
    #
    ## Read images with specified transforms
    print("Reading images ... ", end='')
    images = ds.read_image(jpg_files,
                           dir_photos,
                           normalize=True,
                           resize=224,
                           tensor=True)
    print("done.")
    # print(images.keys())
    ## Get feature map for image tensor through VGG-16
    img_featrs = OD()
    print("Gathering images' features from last conv layer ... ", end='')
    for i, jpg_name in enumerate(images.keys()):
        with torch.no_grad():
            print(i, jpg_name)
            img_featrs[jpg_name] = vgg_net(images[jpg_name].unsqueeze(0))
    print("done.")
    # print(img_featrs, img_featrs[jpg_name].size(), sep='\n')
    print(img_featrs.keys())

    # Get features for images in our dataset from pretrained VGG-16
    features = mdl.get_features(dir_photos, read=True, download=False)
    print(features)

    ## Prep image tensor
    print("Prepping image tensor ... ", end="")
    fnames = []
    img_tns_list = []
    cap_list = []
    for i, jpg_name in enumerate(ann_dfrm.filename.values):
        if (i % 5) == 0:
            if jpg_name in img_featrs.keys():
                fnames.append(jpg_name)
                img_tns_list.append(img_featrs[jpg_name])
                cap_list.append(ann_dfrm.iloc[i]["caption"])
    print("done.")
    print(len(img_tns_list), len(cap_list))
    img_tns = torch.cat(img_tns_list)
    print(img_tns.shape)
    print(
        "Saving filenames list, image tensor list, captions tensor list ... ",
        end="")
    torch.save(fnames, 'fnames.pkl')
    torch.save(img_tns_list, 'image_tns_list.pkl')
    torch.save(cap_list, 'captions_list.pkl')
    print("done.")

    print("Loading fnames, image tensor list and captions tensor list ... ",
          end="")
    fnames = torch.load('fnames.pkl')
    img_tns_list = torch.load('image_tns_list.pkl')
    img_tns = torch.cat(img_tns_list)
    cap_list = torch.load('captions_list.pkl')
    # print(len(fnames), cap_list)
    print("done.")

    cap_seq, vocab_size, cap_max_len, tokens = ds.tokenizer(cap_list)
    n_cap = len(cap_seq)
    vald_prop, test_prop = 0.2, 0.2
    n_vald = int(n_cap * vald_prop)
    n_test = int(n_cap * test_prop)

    train_cap, valid_cap, evaln_cap = ds.split_dset(cap_seq, n_vald, n_test)
    train_ims, valid_ims, evaln_ims = ds.split_dset(img_tns, n_vald, n_test)
    # train_fnm, valid_fnm, evaln_fnm = ds.split_dset(fnames, n_vald, n_test)

    print(len(train_cap), len(valid_cap), len(evaln_cap))
    print(len(train_ims), len(valid_ims), len(evaln_ims))
    # print(len(train_fnm), len(valid_fnm), len(evaln_fnm))

    images_train, captions_train, target_caps_train = ds.prep_data(
        train_ims, train_cap, vocab_size, cap_max_len)
    images_valid, captions_valid, target_caps_valid = ds.prep_data(
        valid_ims, valid_cap, vocab_size, cap_max_len)

    ## Dataloader
    bs = 64
    trainset = ds.Flickr8k(images_train, captions_train, target_caps_train)
    validset = ds.Flickr8k(images_valid, captions_valid, target_caps_valid)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=bs,
                                              shuffle=True)
    validloader = torch.utils.data.DataLoader(validset, batch_size=bs)

    #
    # ## Device: CPU or GPU?
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    print("Using " + device)

    ## Model
    model = mdl.CapNet(vocab_size, cap_max_len).to(device)
    criterion = nn.CrossEntropyLoss()

    ## Optimizer
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
    max_n_epochs = 5

    # ## Training
    print("Starting training ... ")

    epoch_train_loss, epoch_valid_loss = [], []
    min_val_loss = 100
    for epoch in range(1, max_n_epochs + 1):
        print("-------------------- Epoch: [%d / %d] ----------------------" %
              (epoch, max_n_epochs))
        training_loss, validation_loss = 0.0, 0.0
        ## Batch training
        for i, data in enumerate(trainloader):
            images, captions, target_caps = data[0].to(device), data[1].to(
                device), data[2].to(device)
            optimizer.zero_grad()
            out = model(images, captions.t())
            loss = criterion(out, target_caps)
            loss.backward()
            optimizer.step()
            training_loss += loss.item()
        epoch_train_loss.append(training_loss / len(trainloader))
        print("Training loss: %f" % (epoch_train_loss[-1]), end=" ")
        for i, data in enumerate(validloader):
            with torch.set_grad_enabled(False):
                images, captions, target_caps = data[0].to(device), data[1].to(
                    device), data[2].to(device)
                out = model(images, captions.t())
                loss = criterion(out, target_caps)
                validation_loss += loss.item()
        epoch_valid_loss.append(validation_loss / len(validloader))
        print("Validation loss: %f" % (epoch_valid_loss[-1]))
        scheduler.step()

        if epoch_valid_loss[-1] < min_val_loss:
            print("Found best model.")
            best_model = deepcopy(model)

    plt.plot(list(range(max_n_epochs)),
             epoch_train_loss,
             label="Training loss")
    plt.plot(list(range(max_n_epochs)),
             epoch_valid_loss,
             label="Validation loss")
    plt.xlabel("Number of epochs")
    plt.ylabel("Loss")
    plt.title("Number of epochs vs loss")
    plt.legend()
    plt.show()

    ###########
    # Save model
    print("Saving best model ... ")
    torch.save(best_model, 'best_model.pkl')
from collections import OrderedDict as OD

samples_2016 = OD()

# file generated with command:
# check_broken -p /hdfs/local/karl/ttHNtupleProduction/2016/2017Jan16/ntuples/2lss_1tau/ -o -z -i -P -v

samples_2016[
    "/DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/RunIISpring16MiniAODv2-PUSpring16_80X_mcRun2_asymptotic_2016_miniAODv2_v0-v1/MINIAODSIM"] = OD(
        [
            ("type", "mc"),
            ("sample_category", "EWK"),
            ("process_name_specific", "DYJetsToLL_M-10to50"),
            ("nof_files", 369),
            ("nof_events", 21614782),
            ("use_it", True),
            ("xsection", 18610.000000),
            ("genWeight", True),
            ("triggers", [
                "1e", "1mu", "2e", "1e1mu", "2mu", "3e", "2e1mu", "1e2mu",
                "3mu", "1e1tau", "1mu1tau", "2tau"
            ]),
            ("reHLT", False),
            ("local_paths", [
                OD([
                    ("path",
                     "/hdfs/local/karl/ttHNtupleProduction/2016/2017Jan16/ntuples/2lss_1tau/DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/VHBB_HEPPY_V24bis_DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-amcatnloFXFX-Py8__spr16MAv2-puspr16_80r2as_2016_MAv2_v0-v1/160911_223234"
                     ),
                    ("selection", "*"),
                    ("blacklist", []),
                ]),
Example #8
0
def __getFilters(obj):
    d = OD()
    for o in obj.filters:
        d[__color('<Filter {} >'.format(o), 'yellow')] = {}
    return d
Example #9
0
import networkx as nx
import random
#import seaborn as sns
import scipy.cluster.hierarchy as sch
import heapq

# Reading the file
file = open("data.txt", "r")
data = file.read()
data.replace('\n', "")
x_data = data.split('>')

# Special case
del x_data[0]

points = OD(
)  # Storing all points where key is chr_*** and values is the entire string
name_to_id = {}  # Hashing from name to id
id_to_name = {}  # Hashing from id to name


def distance(cluster_1, cluster_2):
    len1 = len(cluster_1)
    len2 = len(cluster_2)
    dist = 0
    for i in cluster_1:
        for j in cluster_2:
            dist += proximity_matrix[i, j]

    dist /= (len1 * len2)
    return dist
Example #10
0
all_functions = OD([
    ('fetch source', Function(
        OD([('command', 'git.get_project'),
            ('params', OD([
                ('directory', 'mongoc'),
            ]))]),
        shell_mongoc(r'''
        # TODO: CDRIVER-3573 do not hardcode the version.
        if [ "${project}" != "mongo-c-driver" ]; then
           # This is an older branch, like r1.17
           VERSION_CURRENT="1.17.0-pre"
           echo $VERSION_CURRENT > "VERSION_CURRENT"
           VERSION=$VERSION_CURRENT-${version_id}
        elif [ "${is_patch}" = "true" ]; then
           VERSION_CURRENT="1.17.0-pre"
           echo $VERSION_CURRENT > "VERSION_CURRENT"
           VERSION=$VERSION_CURRENT-patch-${version_id}
        else
           VERSION=latest
        fi
        echo "CURRENT_VERSION: $VERSION" > expansion.yml
        ''', test=False),
        OD([('command', 'expansions.update'),
            ('params', OD([
                ('file', 'mongoc/expansion.yml'),
            ]))]),
        shell_exec(r'''
        rm -f *.tar.gz
        curl --retry 5 https://s3.amazonaws.com/mciuploads/${project}/${branch_name}/mongo-c-driver-${CURRENT_VERSION}.tar.gz --output mongoc.tar.gz -sS --max-time 120
        ''', test=False, continue_on_err=True),
    )),
    ('upload release', Function(
        shell_exec(
            r'[ -f mongoc/cmake_build/mongo*gz ] && mv mongoc/cmake_build/mongo*gz mongoc.tar.gz',
            errexit=False, test=False),
        s3_put(
            '${project}/${branch_name}/mongo-c-driver-${CURRENT_VERSION}.tar.gz',
            project_path=False, aws_key='${aws_key}',
            aws_secret='${aws_secret}', local_file='mongoc.tar.gz',
            bucket='mciuploads', permissions='public-read',
            content_type='${content_type|application/x-gzip}'),
    )),
    ('upload build', Function(
        targz_pack('${build_id}.tar.gz', 'mongoc', './**'),
        s3_put('${build_variant}/${revision}/${task_name}/${build_id}.tar.gz',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_file='${build_id}.tar.gz', bucket='mciuploads',
               permissions='public-read',
               content_type='${content_type|application/x-gzip}'),
    )),
    ('release archive', Function(
        shell_mongoc(r'''
        # Need modern Sphinx for :caption: in literal includes.
        python -m virtualenv venv
        cd venv
        . bin/activate
        ./bin/pip install sphinx
        cd ..
        
        set -o xtrace
        export MONGOC_TEST_FUTURE_TIMEOUT_MS=30000
        export MONGOC_TEST_SKIP_LIVE=on
        export MONGOC_TEST_SKIP_SLOW=on
        sh .evergreen/check-release-archive.sh
        ''', xtrace=False),
    )),
    ('install ssl', Function(
        shell_mongoc(r'SSL=${SSL} sh .evergreen/install-ssl.sh', test=False),
    )),
    ('fetch build', Function(
        shell_exec(r'rm -rf mongoc', test=False, continue_on_err=True),
        OD([('command', 's3.get'),
            ('params', OD([
                ('aws_key', '${aws_key}'),
                ('aws_secret', '${aws_secret}'),
                ('remote_file',
                 '${project}/${build_variant}/${revision}/${BUILD_NAME}/${build_id}.tar.gz'),
                ('bucket', 'mciuploads'),
                ('local_file', 'build.tar.gz'),
            ]))]),
        shell_exec(r'''
        mkdir mongoc
        
        if command -v gtar 2>/dev/null; then
           TAR=gtar
        else
           TAR=tar
        fi
        
        $TAR xf build.tar.gz -C mongoc/
        ''', test=False, continue_on_err=True),
    )),
    ('upload docs', Function(
        shell_exec(r'''
        export AWS_ACCESS_KEY_ID=${aws_key}
        export AWS_SECRET_ACCESS_KEY=${aws_secret}
        aws s3 cp doc/html s3://mciuploads/${project}/docs/libbson/${CURRENT_VERSION} --recursive --acl public-read --region us-east-1
        ''', test=False, silent=True, working_dir='mongoc/cmake_build/src/libbson',
                   xtrace=False),
        s3_put('docs/libbson/${CURRENT_VERSION}/index.html',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_file='mongoc/cmake_build/src/libbson/doc/html/index.html',
               bucket='mciuploads', permissions='public-read',
               content_type='text/html', display_name='libbson docs'),
        shell_exec(r'''
        export AWS_ACCESS_KEY_ID=${aws_key}
        export AWS_SECRET_ACCESS_KEY=${aws_secret}
        aws s3 cp doc/html s3://mciuploads/${project}/docs/libmongoc/${CURRENT_VERSION} --recursive --acl public-read --region us-east-1
        ''', test=False, silent=True, working_dir='mongoc/cmake_build/src/libmongoc',
                   xtrace=False),
        s3_put('docs/libmongoc/${CURRENT_VERSION}/index.html',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_file='mongoc/cmake_build/src/libmongoc/doc/html/index.html',
               bucket='mciuploads', permissions='public-read',
               content_type='text/html', display_name='libmongoc docs'),
    )),
    ('upload man pages', Function(
        shell_mongoc(r'''
        # Get "aha", the ANSI HTML Adapter.
        git clone --depth 1 https://github.com/theZiz/aha.git aha-repo
        cd aha-repo
        make
        cd ..
        mv aha-repo/aha .
        
        sh .evergreen/man-pages-to-html.sh libbson cmake_build/src/libbson/doc/man > bson-man-pages.html
        sh .evergreen/man-pages-to-html.sh libmongoc cmake_build/src/libmongoc/doc/man > mongoc-man-pages.html
        ''', test=False, silent=True, xtrace=False),
        s3_put('man-pages/libbson/${CURRENT_VERSION}/index.html',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_file='mongoc/bson-man-pages.html', bucket='mciuploads',
               permissions='public-read', content_type='text/html',
               display_name='libbson man pages'),
        s3_put('man-pages/libmongoc/${CURRENT_VERSION}/index.html',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_file='mongoc/mongoc-man-pages.html', bucket='mciuploads',
               permissions='public-read', content_type='text/html',
               display_name='libmongoc man pages'),
    )),
    ('upload coverage', Function(
        shell_mongoc(r'''
        export AWS_ACCESS_KEY_ID=${aws_key}
        export AWS_SECRET_ACCESS_KEY=${aws_secret}
        aws s3 cp coverage s3://mciuploads/${project}/%s/coverage/ --recursive --acl public-read --region us-east-1
        ''' % (build_path,), test=False, silent=True, xtrace=False),
        s3_put(build_path + '/coverage/index.html', aws_key='${aws_key}',
               aws_secret='${aws_secret}',
               local_file='mongoc/coverage/index.html', bucket='mciuploads',
               permissions='public-read', content_type='text/html',
               display_name='Coverage Report'),
    )),
    ('abi report', Function(
        shell_mongoc(r'''
        sh .evergreen/abi-compliance-check.sh
        ''', test=False, xtrace=True),
        shell_mongoc(r'''
        export AWS_ACCESS_KEY_ID=${aws_key}
        export AWS_SECRET_ACCESS_KEY=${aws_secret}
        aws s3 cp abi-compliance/compat_reports s3://mciuploads/${project}/%s/abi-compliance/compat_reports --recursive --acl public-read --region us-east-1
        
        if [ -e ./abi-compliance/abi-error.txt ]; then
          exit 1
        else
          exit 0
        fi
        ''' % (build_path,), silent=True, test=False, xtrace=False),
        s3_put(build_path + '/abi-compliance/compat_report.html',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_files_include_filter='mongoc/abi-compliance/compat_reports/**/*.html',
               bucket='mciuploads', permissions='public-read',
               content_type='text/html', display_name='ABI Report:'),
    )),
    ('upload scan artifacts', Function(
        shell_mongoc(r'''
        if find scan -name \*.html | grep -q html; then
          (cd scan && find . -name index.html -exec echo "<li><a href='{}'>{}</a></li>" \;) >> scan.html
        else
          echo "No issues found" > scan.html
        fi
        '''),
        shell_mongoc(r'''
        export AWS_ACCESS_KEY_ID=${aws_key}
        export AWS_SECRET_ACCESS_KEY=${aws_secret}
        aws s3 cp scan s3://mciuploads/${project}/%s/scan/ --recursive --acl public-read --region us-east-1
        ''' % (build_path,), test=False, silent=True, xtrace=False),
        s3_put(build_path + '/scan/index.html', aws_key='${aws_key}',
               aws_secret='${aws_secret}', local_file='mongoc/scan.html',
               bucket='mciuploads', permissions='public-read',
               content_type='text/html', display_name='Scan Build Report'),
    )),
    ('upload mo artifacts', Function(
        shell_mongoc(r'''
        DIR=MO
        [ -d "/cygdrive/c/data/mo" ] && DIR="/cygdrive/c/data/mo"
        [ -d $DIR ] && find $DIR -name \*.log | xargs tar czf mongodb-logs.tar.gz
        ''', test=False),
        s3_put(build_path + '/logs/${task_id}-${execution}-mongodb-logs.tar.gz',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_file='mongoc/mongodb-logs.tar.gz', bucket='mciuploads',
               permissions='public-read',
               content_type='${content_type|application/x-gzip}',
               display_name='mongodb-logs.tar.gz'),
        s3_put(build_path + '/logs/${task_id}-${execution}-orchestration.log',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_file='mongoc/MO/server.log', bucket='mciuploads',
               permissions='public-read',
               content_type='${content_type|text/plain}',
               display_name='orchestration.log'),
        shell_mongoc(r'''
        # Find all core files from mongodb in orchestration and move to mongoc
        DIR=MO
        MDMP_DIR=$DIR
        [ -d "/cygdrive/c/data/mo" ] && DIR="/cygdrive/c/data/mo"
        [ -d "/cygdrive/c/mongodb" ] && MDMP_DIR="/cygdrive/c/mongodb"
        core_files=$(/usr/bin/find -H $MO $MDMP_DIR \( -name "*.core" -o -name "*.mdmp" \) 2> /dev/null)
        for core_file in $core_files
        do
          base_name=$(echo $core_file | sed "s/.*\///")
          # Move file if it does not already exist
          if [ ! -f $base_name ]; then
            mv $core_file .
          fi
        done
        ''', test=False),
        targz_pack('mongo-coredumps.tgz', 'mongoc', './**.core', './**.mdmp'),
        s3_put(build_path + '/coredumps/${task_id}-${execution}-coredumps.log',
               aws_key='${aws_key}', aws_secret='${aws_secret}',
               local_file='mongo-coredumps.tgz', bucket='mciuploads',
               permissions='public-read',
               content_type='${content_type|application/x-gzip}',
               display_name='Core Dumps - Execution ${execution}',
               optional='True'),
    )),
    ('backtrace', Function(
        shell_mongoc(r'./.evergreen/debug-core-evergreen.sh', test=False),
    )),
    ('upload working dir', Function(
        targz_pack('working-dir.tar.gz', 'mongoc', './**'),
        s3_put(
            build_path +
            '/artifacts/${task_id}-${execution}-working-dir.tar.gz',
            aws_key='${aws_key}', aws_secret='${aws_secret}',
            local_file='working-dir.tar.gz', bucket='mciuploads',
            permissions='public-read',
            content_type='${content_type|application/x-gzip}',
            display_name='working-dir.tar.gz'),
    )),
    ('upload test results', Function(
        OD([('command', 'attach.results'),
            ('params', OD([
                ('file_location', 'mongoc/test-results.json'),
            ]))]),
    )),
    ('bootstrap mongo-orchestration', Function(
        shell_mongoc(r'''
        export MONGODB_VERSION=${VERSION}
        export TOPOLOGY=${TOPOLOGY}
        export IPV4_ONLY=${IPV4_ONLY}
        export AUTH=${AUTH}
        export AUTHSOURCE=${AUTHSOURCE}
        export SSL=${SSL}
        export ORCHESTRATION_FILE=${ORCHESTRATION_FILE}
        export OCSP=${OCSP}
        sh .evergreen/integration-tests.sh
        ''', test=False),
    )),
    ('run tests', Function(
        shell_mongoc(r'''
        export COMPRESSORS='${COMPRESSORS}'
        export CC='${CC}'
        export AUTH=${AUTH}
        export SSL=${SSL}
        export URI=${URI}
        export IPV4_ONLY=${IPV4_ONLY}
        export VALGRIND=${VALGRIND}
        export MONGOC_TEST_URI=${URI}
        export DNS=${DNS}
        export ASAN=${ASAN}
        export CLIENT_SIDE_ENCRYPTION=${CLIENT_SIDE_ENCRYPTION}
        # Hide credentials.
        set +o xtrace
        set +o errexit
        # Only set creds if testing with Client Side Encryption.
        # libmongoc may build with CSE enabled (if the host has libmongocrypt installed)
        # and will try to run those tests (which fail on ASAN unless spawning is bypassed).
        if [ -n "$CLIENT_SIDE_ENCRYPTION" ]; then
          echo "testing with CSE"
          export MONGOC_TEST_AWS_SECRET_ACCESS_KEY="${client_side_encryption_aws_secret_access_key}"
          export MONGOC_TEST_AWS_ACCESS_KEY_ID="${client_side_encryption_aws_access_key_id}"
        fi
        set -o errexit
        set -o xtrace
        sh .evergreen/run-tests.sh
        '''),
    )),
    ('run tests bson', Function(
        shell_mongoc(r'CC="${CC}" sh .evergreen/run-tests-bson.sh'),
    )),
    # Use "silent=True" to hide output since errors may contain credentials.
    ('run auth tests', Function(
        shell_mongoc(r'''
        export AUTH_HOST='${auth_host}'
        export AUTH_PLAIN='${auth_plain}'
        export AUTH_MONGODBCR='${auth_mongodbcr}'
        export AUTH_GSSAPI='${auth_gssapi}'
        export AUTH_CROSSREALM='${auth_crossrealm}'
        export AUTH_GSSAPI_UTF8='${auth_gssapi_utf8}'
        export ATLAS_FREE='${atlas_free}'
        export ATLAS_FREE_SRV='${atlas_free_srv}'
        export ATLAS_REPLSET='${atlas_replset}'
        export ATLAS_REPLSET_SRV='${atlas_replset_srv}'
        export ATLAS_SHARD='${atlas_shard}'
        export ATLAS_SHARD_SRV='${atlas_shard_srv}'
        export ATLAS_TLS11='${atlas_tls11}'
        export ATLAS_TLS11_SRV='${atlas_tls11_srv}'
        export ATLAS_TLS12='${atlas_tls12}'
        export ATLAS_TLS12_SRV='${atlas_tls12_srv}'
        export REQUIRE_TLS12='${require_tls12}'
        export OBSOLETE_TLS='${obsolete_tls}'
        export VALGRIND='${valgrind}'
        sh .evergreen/run-auth-tests.sh
        ''', silent=True, xtrace=False),
    )),
    ('run mock server tests', Function(
        shell_mongoc(
            r'CC="${CC}" VALGRIND=${VALGRIND} sh .evergreen/run-mock-server-tests.sh'),
    )),
    ('cleanup', Function(
        shell_mongoc(r'''
        cd MO
        mongo-orchestration stop
        ''', test=False),
    )),
    ('windows fix', Function(
        shell_mongoc(r'''
        for i in $(find .evergreen -name \*.sh); do
          cat $i | tr -d '\r' > $i.new
          mv $i.new $i
        done
        ''', test=False, xtrace=False),
    )),
    ('make files executable', Function(
        shell_mongoc(r'''
        for i in $(find .evergreen -name \*.sh); do
          chmod +x $i
        done
        ''', test=False, xtrace=False),
    )),
    ('prepare kerberos', Function(
        shell_mongoc(r'''
        if test "${keytab|}"; then
           echo "${keytab}" > /tmp/drivers.keytab.base64
           base64 --decode /tmp/drivers.keytab.base64 > /tmp/drivers.keytab
           cat .evergreen/kerberos.realm | $SUDO tee -a /etc/krb5.conf
        fi
        ''', test=False, silent=True, xtrace=False),
    )),
    ('link sample program', Function(
        shell_mongoc(r'''
        # Compile a program that links dynamically or statically to libmongoc,
        # using variables from pkg-config or CMake's find_package command.
        export BUILD_SAMPLE_WITH_CMAKE=${BUILD_SAMPLE_WITH_CMAKE}
        export BUILD_SAMPLE_WITH_CMAKE_DEPRECATED=${BUILD_SAMPLE_WITH_CMAKE_DEPRECATED}
        export ENABLE_SSL=${ENABLE_SSL}
        export ENABLE_SNAPPY=${ENABLE_SNAPPY}
        LINK_STATIC=  sh .evergreen/link-sample-program.sh
        LINK_STATIC=1 sh .evergreen/link-sample-program.sh
        '''),
    )),
    ('link sample program bson', Function(
        shell_mongoc(r'''
        # Compile a program that links dynamically or statically to libbson,
        # using variables from pkg-config or from CMake's find_package command.
        BUILD_SAMPLE_WITH_CMAKE=  BUILD_SAMPLE_WITH_CMAKE_DEPRECATED=  LINK_STATIC=  sh .evergreen/link-sample-program-bson.sh
        BUILD_SAMPLE_WITH_CMAKE=  BUILD_SAMPLE_WITH_CMAKE_DEPRECATED=  LINK_STATIC=1 sh .evergreen/link-sample-program-bson.sh
        BUILD_SAMPLE_WITH_CMAKE=1 BUILD_SAMPLE_WITH_CMAKE_DEPRECATED=  LINK_STATIC=  sh .evergreen/link-sample-program-bson.sh
        BUILD_SAMPLE_WITH_CMAKE=1 BUILD_SAMPLE_WITH_CMAKE_DEPRECATED=  LINK_STATIC=1 sh .evergreen/link-sample-program-bson.sh
        BUILD_SAMPLE_WITH_CMAKE=1 BUILD_SAMPLE_WITH_CMAKE_DEPRECATED=1 LINK_STATIC=  sh .evergreen/link-sample-program-bson.sh
        BUILD_SAMPLE_WITH_CMAKE=1 BUILD_SAMPLE_WITH_CMAKE_DEPRECATED=1 LINK_STATIC=1 sh .evergreen/link-sample-program-bson.sh
        '''),
    )),
    ('link sample program MSVC', Function(
        shell_mongoc(r'''
        # Build libmongoc with CMake and compile a program that links
        # dynamically or statically to it, using variables from CMake's
        # find_package command.
        export ENABLE_SSL=${ENABLE_SSL}
        export ENABLE_SNAPPY=${ENABLE_SNAPPY}
        LINK_STATIC=  cmd.exe /c .\\.evergreen\\link-sample-program-msvc.cmd
        LINK_STATIC=1 cmd.exe /c .\\.evergreen\\link-sample-program-msvc.cmd
        '''),
    )),
    ('link sample program mingw', Function(
        shell_mongoc(r'''
        # Build libmongoc with CMake and compile a program that links
        # dynamically to it, using variables from pkg-config.exe.
        cmd.exe /c .\\.evergreen\\link-sample-program-mingw.cmd
        '''),
    )),
    ('link sample program MSVC bson', Function(
        shell_mongoc(r'''
        # Build libmongoc with CMake and compile a program that links
        # dynamically or statically to it, using variables from CMake's
        # find_package command.
        export ENABLE_SSL=${ENABLE_SSL}
        export ENABLE_SNAPPY=${ENABLE_SNAPPY}
        LINK_STATIC=  cmd.exe /c .\\.evergreen\\link-sample-program-msvc-bson.cmd
        LINK_STATIC=1 cmd.exe /c .\\.evergreen\\link-sample-program-msvc-bson.cmd
        '''),
    )),
    ('link sample program mingw bson', Function(
        shell_mongoc(r'''
        # Build libmongoc with CMake and compile a program that links
        # dynamically to it, using variables from pkg-config.exe.
        cmd.exe /c .\\.evergreen\\link-sample-program-mingw-bson.cmd
        '''),
    )),
    ('update codecov.io', Function(
        shell_mongoc(r'''
        export CODECOV_TOKEN=${codecov_token}
        curl -s https://codecov.io/bash | bash
        ''', test=False, xtrace=False),
    )),
    ('debug-compile-coverage-notest-nosasl-nossl', Function(
        shell_mongoc(r'''
        export EXTRA_CONFIGURE_FLAGS="-DENABLE_COVERAGE=ON -DENABLE_EXAMPLES=OFF"
        DEBUG=ON CC='${CC}' MARCH='${MARCH}' SASL=OFF SSL=OFF SKIP_MOCK_TESTS=ON sh .evergreen/compile.sh
        '''),
    )),
    ('debug-compile-coverage-notest-nosasl-openssl', Function(
        shell_mongoc(r'''
        export EXTRA_CONFIGURE_FLAGS="-DENABLE_COVERAGE=ON -DENABLE_EXAMPLES=OFF"
        DEBUG=ON CC='${CC}' MARCH='${MARCH}' SASL=OFF SSL=OPENSSL SKIP_MOCK_TESTS=ON sh .evergreen/compile.sh
        '''),
    )),
    ('run aws tests', Function(
        shell_mongoc(r'''
        # Add AWS variables to a file.
        # Clone in one directory above so it does not get uploaded in working directory.
        git clone --depth=1 git://github.com/mongodb-labs/drivers-evergreen-tools.git ../drivers-evergreen-tools
        cat <<EOF > ../drivers-evergreen-tools/.evergreen/auth_aws/aws_e2e_setup.json
        {
            "iam_auth_ecs_account" : "${iam_auth_ecs_account}",
            "iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}",
            "iam_auth_ecs_account_arn": "arn:aws:iam::557821124784:user/authtest_fargate_user",
            "iam_auth_ecs_cluster": "${iam_auth_ecs_cluster}",
            "iam_auth_ecs_task_definition": "${iam_auth_ecs_task_definition}",
            "iam_auth_ecs_subnet_a": "${iam_auth_ecs_subnet_a}",
            "iam_auth_ecs_subnet_b": "${iam_auth_ecs_subnet_b}",
            "iam_auth_ecs_security_group": "${iam_auth_ecs_security_group}",
            "iam_auth_assume_aws_account" : "${iam_auth_assume_aws_account}",
            "iam_auth_assume_aws_secret_access_key" : "${iam_auth_assume_aws_secret_access_key}",
            "iam_auth_assume_role_name" : "${iam_auth_assume_role_name}",
            "iam_auth_ec2_instance_account" : "${iam_auth_ec2_instance_account}",
            "iam_auth_ec2_instance_secret_access_key" : "${iam_auth_ec2_instance_secret_access_key}",
            "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}"
        }
        EOF
        ''', silent=True),
        shell_mongoc(r'''
        set -o errexit
        # Export the variables we need to construct URIs
        set +o xtrace
        export IAM_AUTH_ECS_ACCOUNT=${iam_auth_ecs_account}
        export IAM_AUTH_ECS_SECRET_ACCESS_KEY=${iam_auth_ecs_secret_access_key}
        set -o xtrace
        sh ./.evergreen/run-aws-tests.sh ${TESTCASE}
        ''', xtrace=False)
    ))
])
Example #11
0
from collections import OrderedDict as OD

# file generated at 2018-07-27 12:54:50 with the following command:
# find_samples.py -i ../NanoAOD/test/datasets_private_2017.txt -m python/samples/metaDict_2017_private.py -c /hdfs/cms/store/user/kaehatah/NanoProduction_v2_2018May23 -V

meta_dictionary = OD()

meta_dictionary["/THQ_4f_Hincl_13TeV_madgraph_pythia8_Fall17/private/USER"] = OD([
    ("crab_string",
     "NanoProduction_v2_2018May23_THQ_4f_Hincl_13TeV_madgraph_pythia8_Fall17__private"
     ),
    ("sample_category", "tHq"),
    ("process_name_specific", "THQ"),
    ("nof_db_events", 124500),
    ("nof_db_files", 9),
    ("fsize_db", 9518371556),
    ("xsection", 0.07096),
    ("use_it", True),
    ("genWeight", True),
    ("comment",
     "status: VALID; size: 9.52GB; nevents: 124.50k; release: 9_4_0; last modified: 2018-05-23 16:15:49"
     ),
])

meta_dictionary["/THW_5f_Hincl_13TeV_madgraph_pythia8_Fall17/private/USER"] = OD([
    ("crab_string",
     "NanoProduction_v2_2018May23_THW_5f_Hincl_13TeV_madgraph_pythia8_Fall17__private"
     ),
    ("sample_category", "tHW"),
    ("process_name_specific", "THW"),
    ("nof_db_events", 50000),
Example #12
0
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print('arguments:')
    print(argstr)

    argfile = osp.join(osp.join(args.expdir), 'evaluate_args.yaml')

    args.cuda = not args.no_cuda

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)

    collate_fn = dict(
        collate_fn=list_collate) if args.input_crop == 'rect' else {}

    transforms = get_transforms(input_size=args.input_size,
                                crop=(args.input_crop == 'square'),
                                need=('val', ),
                                backbone=args.backbone)

    if args.dataset.startswith('imagenet'):
        dataset = IdDataset(
            IN1K(args.imagenet_path,
                 args.dataset[len('imagenet-'):],
                 transform=transforms['val']))
        mode = "classification"
    else:
        raise NotImplementedError(
            "Retrieval evaluations not implemented yet, check datasets/retrieval.py to implement the evaluations."
        )

    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        num_workers=args.workers,
                        shuffle=args.shuffle,
                        pin_memory=True,
                        **collate_fn)
    model = get_multigrain(args.backbone,
                           include_sampling=False,
                           pretrained_backbone=args.pretrained_backbone)

    p = model.pool.p

    checkpoints = utils.CheckpointHandler(args.expdir)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        epoch = checkpoints.resume(model,
                                   resume_epoch=args.resume_epoch,
                                   resume_from=args.resume_from,
                                   return_extra=False)
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    if args.pooling_exponent is not None:  # overwrite stored pooling exponent
        p.data.fill_(args.pooling_exponent)

    print("Multigrain model with {} backbone and p={} pooling:".format(
        args.backbone, p.item()))
    # print(model)

    if args.cuda:
        model = utils.cuda(model)

    model.eval()  # freeze batch normalization
    print("Evaluating", args.dataset)

    metrics_history = OD()
    metrics = defaultdict(utils.HistoryMeter)

    embeddings = []
    index = None
    tic()
    for i, batch in enumerate(loader):
        with torch.no_grad():
            if args.cuda:
                batch = utils.cuda(batch)
            metrics["data_time"].update(1000 * toc())
            tic()
            output_dict = model(batch['input'])
        if mode == "classification":
            target = batch['classifier_target']
            top1, top5 = utils.accuracy(output_dict['classifier_output'],
                                        target,
                                        topk=(1, 5))
            metrics["val_top1"].update(top1, n=len(batch['input']))
            metrics["val_top5"].update(top5, n=len(batch['input']))
        elif mode == "retrieval":
            if index is None:
                index = faiss.IndexFlatL2(descriptors.size(1))
            descriptors = output_dict['normalized_embedding']
            for e in descriptors.cpu():
                index.append(e)
        metrics["batch_time"].update(1000 * toc())
        tic()
        print(
            logging.str_metrics(metrics,
                                iter=i,
                                num_iters=len(loader),
                                epoch=epoch,
                                num_epochs=epoch))
    print(logging.str_metrics(metrics, epoch=epoch, num_epochs=1))
    for k in metrics:
        metrics[k] = metrics[k].avg
    toc()

    metrics_history[epoch] = metrics
    checkpoints.save_metrics(metrics_history)
Example #13
0
from collections import OrderedDict as OD

# file generated at 2019-09-28 20:27:22 with the following command:
# find_samples.py -V -m python/samples/metaDict_2016_data_DeepTauIDv2p1.py -d ../NanoAOD/test/datasets/txt/datasets_data_2016_17Jul18.txt -p SingleElectron SingleMuon DoubleEG DoubleMuon MuonEG Tau -g ../NanoAOD/data/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt -v 9_4_9 -r 2016 -C +17Jul2018 xDoubleEG/Run2016B-17Jul2018_ver1-v1 -c python/samples/sampleLocations_2016_nanoAOD_data_DeepTauIDv2p1.txt

meta_dictionary = OD()

meta_dictionary["/SingleElectron/Run2016B-17Jul2018_ver2-v1/MINIAOD"] = OD([
    ("crab_string",
     "2016v3_2019Aug29_SingleElectron__Run2016B-17Jul2018_ver2-v1"),
    ("sample_category", "data_obs"),
    ("process_name_specific", "SingleElectron_Run2016B_17Jul2018_ver2_v1"),
    ("nof_db_events", 246440440),
    ("nof_db_files", 1560),
    ("fsize_db", 5824699722422),
    ("xsection", None),
    ("use_it", True),
    ("genWeight", False),
    ("run_range", [273150, 275376]),
    ("golden_run_range", [273158, 275376]),
    ("comment",
     "100.0%; status: VALID; size: 5.82TB; nevents: 246.44M; release: 9_4_9; last modified: 2018-08-02 14:45:46"
     ),
])

meta_dictionary["/SingleElectron/Run2016C-17Jul2018-v1/MINIAOD"] = OD([
    ("crab_string", "2016v3_2019Aug29_SingleElectron__Run2016C-17Jul2018-v1"),
    ("sample_category", "data_obs"),
    ("process_name_specific", "SingleElectron_Run2016C_17Jul2018_v1"),
    ("nof_db_events", 97259854),
    ("nof_db_files", 674),
def main():

    options = getOptions()

    # The submit command needs special treatment.
    if options.crabCmd == 'submit':

        #--------------------------------------------------------
        # This is the base config:
        #--------------------------------------------------------
        from CRABClient.UserUtilities import config
        config = config()
        
        config.General.requestName = None
        config.General.workArea = "crab_20200706_remainedJobs"
        
        config.General.transferOutputs = True
        config.General.transferLogs = False
        
        config.JobType.pluginName = 'Analysis'
        config.JobType.psetName = 'PSet.py'
        config.JobType.scriptExe = 'crab_script.sh'
        config.JobType.inputFiles = ['example_postproc_2017_MC.py','../scripts/haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
        config.JobType.sendPythonFolder	 = True
        config.JobType.allowUndistributedCMSSW = True
        
        config.Data.inputDataset = None
        config.Data.inputDBS = 'global'
        config.Data.splitting = 'FileBased' # 'Automatic' #'LumiBased'
        #config.Data.splitting = 'EventAwareLumiBased'
        config.Data.unitsPerJob = 2
        config.Data.totalUnits = 10
        
        config.Data.outLFNDirBase = '/store/user/ssawant/NanoAODPostProc_%d' % era 
        config.Data.publication = False
        config.Data.outputDatasetTag = None ####
        #config.Data.useParent = True
        
        config.Site.storageSite = 'T2_IN_TIFR' # Choose your site. 
        #--------------------------------------------------------
        
        # Will submit one task for each of these input datasets.
        inputDatasets = []
        inputDatasetsInfo = OD()
        
        # Read input NanoAOD dataset from samples.py file
        for sample_name, sample_info in samples.items():
            #print "\n{}: {}".format(sample_name, sample_info)
            if sample_name == 'sum_events'  or  sample_info["type"] == "data":
                continue
            
            if sample_info["use_it"] == False:
                continue
            
            
            if 'NANOAODSIM' in sample_info['NanoAOD']:
                #if not sample_info["process_name_specific"] in ["signal_ggf_spin0_400_hh_4t", "signal_ggf_spin0_400_hh_2v2t", "signal_ggf_spin0_400_hh_4v"]:
                if not sample_info["process_name_specific"] in remained_jobs:
                    continue
                inputDatasets.append(sample_info['NanoAOD'])
                inputDatasetsInfo[sample_info["process_name_specific"]] = sample_info['NanoAOD']
        
        
        
        
        #for inDS in inputDatasets:
        for process_name_specific, inDS in inputDatasetsInfo.items():
            print "\n\n{}, {}".format(process_name_specific, inDS)
            #config.General.requestName = inDS.split('/')[1] # sample name
            config.General.requestName = process_name_specific
            #config.General.workArea = config.General.requestName
            config.Data.inputDataset = inDS
            config.Data.outputDatasetTag = '%s' % (inDS.split('/')[2]) #  Campaign GT details
            
            
            # Submit.
            try:
                print "Submitting for input dataset %s" % (inDS)
                crabCommand(options.crabCmd, config = config, *options.crabCmdOpts.split())
            except HTTPException as hte:
                print "Submission for input dataset %s failed: %s" % (inDS, hte.headers)
            except ClientException as cle:
                print "Submission for input dataset %s failed: %s" % (inDS, cle)
        
    # resubmit FAILED jobs
    elif options.crabCmd == 'resubmit' and options.workArea:
        
        for dir in os.listdir(options.workArea):
            projDir = os.path.join(options.workArea, dir)
            if not os.path.isdir(projDir):
                continue
            # Execute the crab status.
            msg = "Executing (the equivalent of): crab status --dir %s %s" % (projDir, options.crabCmdOpts)
            print "-"*len(msg)
            print msg
            print "-"*len(msg)
         
            result = crabCommand("status", dir = projDir, *options.crabCmdOpts.split())
            print "\n\nresult of the crab resubmit: ",result
            print "result['status']: ",result['status']
            print "result['dagStatus']: ",result['dagStatus']
            if not result['status'] in ['FAILED', 'SUBMITFAILED']:
                continue
            print "crab_resubmit: %s  *******" % projDir
            
            # resubmit
            try:
                crabCommand("resubmit", dir = projDir, *options.crabCmdOpts.split())
            except HTTPException as hte:
                print "Failed executing command %s for task %s: %s" % (options.crabCmd, projDir, hte.headers)
            except ClientException as cle:
                print "Failed executing command %s for task %s: %s" % (options.crabCmd, projDir, cle)
            
            
    # All other commands can be simply executed.
    elif options.workArea:
        
        for dir in os.listdir(options.workArea):
            projDir = os.path.join(options.workArea, dir)
            if not os.path.isdir(projDir):
                continue
            # Execute the crab command.
            msg = "Executing (the equivalent of): crab %s --dir %s %s" % (options.crabCmd, projDir, options.crabCmdOpts)
            print "-"*len(msg)
            print msg
            print "-"*len(msg)
            try:
                crabCommand(options.crabCmd, dir = projDir, *options.crabCmdOpts.split())
            except HTTPException as hte:
                print "Failed executing command %s for task %s: %s" % (options.crabCmd, projDir, hte.headers)
            except ClientException as cle:
                print "Failed executing command %s for task %s: %s" % (options.crabCmd, projDir, cle)
Example #15
0
def makeplots(ccddf,master,truthplus,fitsname,expnums,mjdtrigger,ml_score_cut=0.,skip=False):

    season = os.environ.get('SEASON')
    season = str(season)

    rootdir = os.environ.get('ROOTDIR')
    rootdir = os.path.join(rootdir,'exp')

### get data
    if os.path.isfile(master):
        mlist = Table.read(master)
        masdf = mlist.to_pandas()
    else:
        skip = True
        print "No master list found with filename",master+'.'
        print "Plots requiring a master list (SNR, RA/DEC hex maps) will not be created."

    df1 = truthplus

    outdir = os.path.join(os.environ.get('ROOTDIR2'),'plots')
    if not os.path.isdir(outdir):
        os.mkdir(outdir)

    rtable = Table.read(fitsname)
    rdf1 = rtable.to_pandas()
    
### cut out rejects ###
    df = df1.loc[df1['REJECT'] == 0]
    rdf = rdf1.loc[rdf1['PHOTFLAG'].isin([4096,12288])]

### TEMPLATE FAILURES (temporary section?) ###
    ccdhexes = masdf['fullhex'].loc[masdf['epoch']==4].values
    ccdexp = masdf.loc[(masdf['epoch']==4) & (masdf['fullhex'].isin(ccdhexes))]

    tempfails,rafail,decfail = [],[],[]

    explist = list(ccdexp['expnum'].values)

    for t in sorted(explist):
        if (ccddf.loc[str(t)]==2).any():
            tempfails.append(t)
            rafail.append(masdf['RA'].loc[masdf['expnum']==t].values[0])
            decfail.append(masdf['DEC'].loc[masdf['expnum']==t].values[0])
    
    print len(tempfails)
    print tempfails

    np.savetxt('event3_ccds.txt',np.c_[tempfails,rafail,decfail],fmt=['%d','%.6f','%.6f'],delimiter='\t',header='EXP\tRA\t\tDEC',comments='')

    notemp = masdf.loc[masdf['expnum'].isin(tempfails)]
    yestemp = masdf.loc[(masdf['expnum'].isin(explist)) & (~masdf['expnum'].isin(tempfails))]

    notemp.ix[notemp['RA'] > 180, 'RA'] = notemp.ix[notemp['RA'] > 180, 'RA'] - 360.
    yestemp.ix[yestemp['RA'] > 180, 'RA'] = yestemp.ix[yestemp['RA'] > 180, 'RA'] - 360.

    xmax = max([max(notemp['RA']),max(yestemp['RA'])])+2
    ymax = max([max(notemp['DEC']),max(yestemp['DEC'])])+2
    xmin = min([min(notemp['RA']),min(yestemp['RA'])])-2
    ymin = max([min(notemp['DEC']),min(yestemp['DEC'])])-2

    plt.xlim(xmin,xmax)
    plt.ylim(ymin,ymax)

    plt.scatter(notemp['RA'],notemp['DEC'],marker='H',c='r',s=200,label='failed')
    plt.scatter(yestemp['RA'],yestemp['DEC'],marker='H',c='b',s=200,label='succeeded')
    plt.title('Template failures - GW170104')
    plt.xlabel('RA')
    plt.ylabel('DEC')
    plt.legend()
    #plt.show()
    plt.savefig('templatefailures.png',dpi=200)
    
    plt.clf()
    sys.exit()
    
### EFFICIENCY ###

    bins = np.arange(17,25,1)

    fhist, bin_edges = np.histogram(df['MAG'], bins=bins)
    thist, bin_edges = np.histogram(df1['TRUEMAG'], bins=bins)
    

    plt.xlim(17,25)
    plt.ylim(0,100)
    plt.plot(bins[:-1], fhist*100.0/thist, lw=4)
    plt.scatter(bins[:-1], fhist*100.0/thist, lw=4)
    plt.title('Efficiency')
    plt.xlabel('Mag')
    plt.ylabel('Percent Found')
    plt.savefig(os.path.join(outdir,'efftest_'+season+'.png'))
    plt.clf()

### ML_SCORE HISTOGRAM - FAKES ###

    plt.hist(df1['ML_SCORE'],bins=np.linspace(0.3,1,100))
    plt.title('ML_SCORE OF FAKES')
    plt.xlabel('ml_score')
    plt.ylabel('# of fakes')
    plt.savefig(os.path.join(outdir,'fakemltest_'+season+'.png'))
    plt.clf()

### PULL --> (MAG-TRUEMAG)/MAG_ERR -- FOR FAKES ### 

    magdiff = df['MAG']-df['TRUEMAG']
    pull = magdiff/df['MAGERR']
    
    #bins = np.linspace(int(min(pull)),int(max(pull)+1),100)
    bins = np.linspace(-3,3,100)
    plt.hist(pull,bins=bins)
    plt.xlabel("Magnitude Pull (MAG-TRUEMAG)/MAGERR")
    plt.ylabel("Number of Objects")
    plt.title("Magnitude Pull for Fakes")
    plt.xlim(bins.min(),bins.max())

    plt.savefig(os.path.join(outdir,'pulltest_'+season+'.png'))
    plt.clf()

### NUMBER OF REAL CANDIDATES PER CCD ###

    bins = np.arange(1,64,1)
    
    for e in expnums:
        ccdcand = rdf['CCDNUM'].loc[rdf['EXPNUM'] == e]
        ccdhist, bin_edges = np.histogram(ccdcand, bins=bins)
        
        plt.xlim(0,63)
        plt.ylim(0,max(ccdhist)+1)
        plt.scatter(bins[:-1], ccdhist)
        plt.plot(bins[:-1], ccdhist, label=str(e))
        plt.grid(True)
    
    plt.xlabel('CCD')
    plt.ylabel('# of candidates')
    plt.title('# of Candidates per CCD')
    plt.legend(fontsize='small')
    plt.savefig(os.path.join(outdir,'ccdtest'+season+'.png'))
    plt.clf()

### SNR VS. HEX -- FAKES ###
    if not skip:
        masdf_ord = masdf
        masdf_ord.ix[masdf_ord['RA'] > 180, 'RA'] = masdf_ord.ix[masdf_ord['RA'] > 180, 'RA'] - 360.
        masdf_ord = masdf_ord.sort_values(by='RA')
        SNR = OD()
        maxepoch = max(masdf_ord['epoch'])
        for hx in masdf_ord['hex'].unique():
            SNR[hx]=np.array([-5.]*maxepoch)
            epexpdf = masdf_ord[['hex','epoch','expnum']].loc[masdf_ord['hex']==hx]
            for ep in epexpdf['epoch'].unique():
                snrexp = epexpdf['expnum'].loc[epexpdf['epoch']==ep].values[0]
                snrs = df['SNR'].loc[df['EXPNUM']==snrexp].values
                if len(snrs)==0:
                    SNR[hx][ep-1] = 0
                else:
                    meansnr = np.mean(snrs)
                    SNR[hx][ep-1] = meansnr

        SNRdf = pd.DataFrame.from_dict(SNR,orient='index')
        SNRdf = SNRdf.reset_index()
        cols = ['hex']

        epochs = np.array(range(maxepoch))+1
        for i in epochs:
            cols.append(str(i))
        SNRdf.columns = cols

        inds = np.array(SNRdf.index.values)+1

        plt.figure(figsize=(16,9))
        
        ax = plt.axes()
        ax.yaxis.grid(True)
        ax.xaxis.grid(True)

        major = MultipleLocator(5)
        majForm = FormatStrFormatter('%d')
        minor = MultipleLocator(1)
        ax.xaxis.set_major_locator(major)
        ax.xaxis.set_major_formatter(majForm)
        ax.xaxis.set_minor_locator(minor)

        for e in epochs:
            plt.plot(inds, SNRdf[str(e)], '-o', markeredgewidth=0, label='epoch '+str(e), antialiased=True)
        
        plt.legend(loc='upper left',fontsize='small')
        plt.axis([0, max(inds)+1, -10, max(SNRdf.max(numeric_only=True))+5])
        plt.title("Average SNR of MAG=20 fakes by hex index and epoch - GW170104")

        plt.xlabel('Hex Index')
        plt.ylabel('Mean SNR')

        savefile = 'SNR_test.png'
        plt.savefig(os.path.join(outdir,savefile),dpi=400)

        plt.clf()

    sys.exit()

### RA/DEC MAPS ###
    
    radecdf = rdf
    if abs(max(radecdf['RA'])-min(radecdf['RA']))>180:
        for ira in range(len(radecdf['RA'])):
            if radecdf['RA'][ira]>180:
                radecdf['RA'][ira] = radecdf['RA'][ira]-360

    radecdf = radecdf.drop_duplicates('cand_ID')

    radecdf = radecdf.loc[radecdf['PHOTPROB'] > ml_score_cut]

    #plt.hist2d(radecdf['RA'],radecdf['DEC'],50)
    
    mapdir = os.path.join(outdir,'maps')
    if not os.path.isdir(mapdir):
        os.mkdir(mapdir)    

    hexex = []

    ### this loop gets the full set of first epoch exposures of each hex.
    ### if there are two (or more), it chooses the one with the best t_eff.
    for h in masdf['fullhex'].unique():
        exepteff = masdf[['expnum','epoch','t_eff']].loc[masdf['fullhex'] == h]
        cut = exepteff[['expnum','epoch','t_eff']].loc[exepteff['epoch']==1]
        if len(cut)>1:
            cut = cut.loc[cut['t_eff'] == cut['t_eff'].ix[cut['t_eff'].idxmax()]]
        hexex.append(cut['expnum'].values[0])

    radecdf = radecdf.loc[radecdf['EXPNUM'].isin(hexex)]

    ### overall map
    plt.scatter(radecdf['RA'],radecdf['DEC'],c=radecdf['PHOTPROB'],edgecolor='',s=5)
    plt.xlim(min(radecdf['RA'])-0.2,max(radecdf['RA'])+0.2)
    plt.ylim(min(radecdf['DEC'])-0.2,max(radecdf['DEC'])+0.2)
    plt.clim(0,1)
    plt.colorbar().set_label('ml_score')
    plt.title('Candidate Sky Map')
    plt.xlabel('RA')
    plt.ylabel('DEC')
    plt.savefig(os.path.join(outdir,'fullmap_'+season+'.png'))
    plt.clf()

    ### individual hex maps
    if not skip:
        for e in hexex:
            print e
            out = ''
            out = os.path.join(rootdir,str(masdf['nite'].loc[masdf['expnum']==e].values[0]))
            out = os.path.join(out,str(e))
            out = os.path.join(out,str(e)+'.out')

            odf = pd.read_table(out,delim_whitespace=True,header=None,names=['expnum','band','ccd','ra1','dec1','ra2','dec2','ra3','dec3','ra4','dec4'])

            odf = odf.drop_duplicates()
            odf = odf.reset_index(drop=True)

            odf.ix[odf.ra1 > 270., 'ra1'] = odf.ix[odf.ra1 > 270., 'ra1'] - 360.
            odf.ix[odf.ra2 > 270., 'ra2'] = odf.ix[odf.ra2 > 270., 'ra2'] - 360.
            odf.ix[odf.ra3 > 270., 'ra3'] = odf.ix[odf.ra3 > 270., 'ra3'] - 360.
            odf.ix[odf.ra4 > 270., 'ra4'] = odf.ix[odf.ra4 > 270., 'ra4'] - 360.

            ras = np.concatenate((odf['ra1'].tolist(),odf['ra2'].tolist(),odf['ra3'].tolist(),odf['ra4'].tolist()),axis=0)

            decs = np.concatenate((odf['dec1'].tolist(),odf['dec2'].tolist(),odf['dec3'].tolist(),odf['dec4'].tolist()),axis=0)

            for i in range(len(odf)):
                ra = odf.ix[i,['ra1','ra2','ra3','ra4']]
                dec = odf.ix[i,['dec1','dec2','dec3','dec4']]
                chip = str(odf.ix[i,'ccd'])
                midra = (max(ra)+min(ra))/2.
                middec = (max(dec)+min(dec))/2.
                middle = tuple([midra,middec])
                cs = zip(ra,dec)
                cent=(sum([c[0] for c in cs])/len(cs),sum([c[1] for c in cs])/len(cs))
                cs.sort(key=lambda c: math.atan2(c[1]-cent[1],c[0]-cent[0]))
                cs.append(cs[0])
                plt.plot([c[0] for c in cs],[c[1] for c in cs],ls=':',lw=0.5,c='k')
                plt.annotate(chip, xy=middle, ha='center',va='center',family='sans-serif',fontsize=12,alpha=0.3)

            plt.xlim(min(ras)-0.2,max(ras)+0.2)
            plt.ylim(min(decs)-0.2,max(decs)+0.2)

            pltdf = radecdf.loc[radecdf['EXPNUM'] == e]
            plt.scatter(pltdf['RA'],pltdf['DEC'],c=pltdf['PHOTPROB'],edgecolor='')
            plt.clim(0,1)
            plt.colorbar().set_label('ml_score')
            hexname = masdf['hex'].loc[masdf['expnum'] == e].values[0]
            plt.title('Candidate Sky Map: Hex '+str(hexname)+' (Exposure '+str(e)+')')
            plt.xlabel('RA')
            plt.ylabel('DEC')
            plt.savefig(os.path.join(mapdir,'map_'+str(hexname)+'_'+str(e)+'.png'),dpi=200)
            plt.clf()

    lcdir = os.path.join(outdir,'lightcurves')

    band = 'i'

    numsnid = len(rdf['SNID'].unique())
    ctsnid = 0
    noct,yesct = 0,0

    rdf['cutflag'] = np.zeros(len(rdf))

    for sn in rdf['SNID'].unique():
        #ctsnid += 1
        
        new = rdf[['MJD','FLUXCAL','FLUXCALERR','PHOTPROB']].loc[rdf['SNID']==sn]
        
        if all(i < ml_score_cut for i in new['PHOTPROB']):
            noct+=1
            continue
        
        yesct+=1
        
        if (noct+yesct) % 100 == 0:
            print str(noct+yesct)+'/'+str(numsnid)

        rdf.loc[rdf['SNID']==sn,'cutflag'] = 1
        
        continue

        mjd = np.array(new['MJD'].tolist())
        flux = np.array(new['FLUXCAL'].tolist())
        fluxerr = np.array(new['FLUXCALERR'].tolist())
        ml_score = np.array(new['PHOTPROB'].tolist())

        plt.errorbar(mjd-mjdtrigger,flux,yerr=fluxerr,fmt='none',ecolor='k',zorder=0)
        plt.scatter(mjd-mjdtrigger,flux,c=ml_score,edgecolor='',s=40,zorder=1)
        plt.clim(0,1)
        plt.title('SNID '+str(sn)+' ('+band+')')
        plt.colorbar().set_label('ml_score')
        plt.xlabel('MJD - MJD(TRIGGER)')
        plt.ylabel('FLUX')
        plt.savefig(os.path.join(lcdir,'SNID'+str(sn)+'.png'))
        plt.clf()
    return rdf
Example #16
0

# botlog.setLevel = logging.DEBUG
logging.basicConfig(filename="/var/log/errbot/icinga2bot.log")
botlog = logging.getLogger('icinga2bot')
botlog.setLevel(logging.INFO)

## Errbot Plugin Config

# One INI file for API host and authentication, user privs.
# There will be no defaults for users specified here, but the default
# 'root', 'icinga2' user and password for the icinga2 api are included
# in the INI file.

default_server= OD(( 
    ('host','localhost'), ('port', '5665'), ('api','v1'),
    ('ca','/etc/icinga2/pki/ca.crt')
    ))
# ConfigParser normally converts all options to lower case but 
# the icinga2 API is case sentitive so we'll add optionxform below.
default_events= OD(( 
    ('CheckResult', 'off'), ('StateChange', 'on'), ('Notification', 'on'),
    ('AcknowledgementSet', 'on'), ('AcknowledgementCleared', 'on'),
    ('CommentAdded', 'on'), ('CommentRemoved', 'on'),
    ('DowntimeAdded', 'on'), ('DowntimeRemoved', 'on'), ('DowntimeTriggered', 'on')
    ))
default_config = OD(( ('server',default_server), ('events',default_events) ))

cfg = configparser.ConfigParser()
cfg.optionxform = str  #make options case-sensitive
cfg.read_dict(default_config)
configfile = path.join(path.dirname(path.realpath(__file__)), 'icinga2bot.ini')
Example #17
0
def generate_master_conf(conf_str, master = True):
    conf = json.loads(conf_str)

    with open(thisdir + '/grants.json') as source:
        grants_conf = json.load(source)

    if 'host' not in conf:
        host = 'localhost'
    else:
        host = conf['host']

    user = conf['user']

    if 'readuser' in conf:
        readuser = conf['readuser']
    else:
        readuser = None
   
    if 'passwd' in conf:
        passwd = conf['passwd']
    else:
        passwd = grants_conf[user]['passwd']

    master_conf = OD([
        ('module', 'mysqlmaster:MySQLMasterServer'),
        ('config', OD())
    ])

    master_conf['config']['db_params'] = OD([
        ('host', host),
        ('db', 'dynamoserver'),
        ('user', user),
        ('passwd', passwd),
        ('scratch_db', 'dynamo_tmp')
    ])

    if master:
        master_conf['config']['applock'] = OD()
        master_conf['config']['applock']['db_params'] = OD([
            ('host', host),
            ('db', 'dynamoregister'),
            ('user', user),
            ('passwd', passwd),
            ('scratch_db', 'dynamo_tmp')
        ])

    if readuser is not None:
        if 'readpasswd' in conf:
            readpasswd = conf['readpasswd']
        else:
            readpasswd = grants_conf[readuser]['passwd']

        master_conf['readonly_config'] = OD()
        master_conf['readonly_config']['db_params'] = OD([
            ('host', host),
            ('db', 'dynamoserver'),
            ('user', readuser),
            ('passwd', readpasswd),
            ('scratch_db', 'dynamo_tmp')
        ])

    return master_conf
Example #18
0
class Treeify(object):
    @args.property(short='c', flag=True, help='output in colour')
    def colour(self):
        return False

    @args.property(short='a', flag=True, help='ascii instead of boxes')
    def ascii(self):
        return False

    _oneof = OD([(x, 'input as %s' % x) for x in ['json', 'xml', 'yaml']])

    @args.property(oneof=_oneof,
                   short=True,
                   flag=True,
                   default=list(_oneof.keys())[0])
    def format(self):
        return

    def __init__(self, colour=False, ascii=False):
        if colour: self.colour = True
        if ascii: self.ascii = True
        self.fundamentals = [str, str, int, float, bool]
        self.collections = [list, dict, OD]
        self.colours = Colours(colour=self.colour)

    def treeFix(self, node):
        if not node:
            return dict()
        if type(node) in self.fundamentals:
            return {
                ''.join([self.colours.Purple,
                         str(node), self.colours.Off]): dict()
            }
        if type(node) is list:
            new = OD()
            for n in range(len(node)):
                key = ''.join(
                    ['[', self.colours.Teal,
                     str(n), self.colours.Off, ']'])
                new[key] = self.treeFix(node[n])
            return new
        if type(node) in [dict, OD]:
            for key in list(node.keys()):
                tipe = type(node[key])
                value = self.treeFix(node[key])
                del node[key]
                if len(key) and key[0] in ['@', '#']:
                    node[''.join([self.colours.Green, key,
                                  self.colours.Off])] = value
                else:
                    if tipe in self.fundamentals:
                        parts = [self.colours.Green]
                    else:
                        parts = [self.colours.Teal]
                    parts += [key, self.colours.Off]
                    if self.format == 'xml':
                        parts = ['<'] + parts + ['>']
                    node[''.join(parts)] = value
        return node

    def process(self, input, output=sys.stdout):
        if type(input) in self.collections:
            o = input
        elif isinstance(input, IOBase) or isinstance(input, StringIO):
            input = input.read()
        if type(input) in [str]:
            if self.format == 'xml':
                o = xmltodict.parse(input)
            elif self.format == 'yaml':
                o = yaml.load(input)
            else:  # == 'json'
                o = json.loads(input)
        if self.ascii:
            tr = LeftAligned()
        else:
            tr = LeftAligned(
                draw=BoxStyle(label_space=0, gfx=BOX_LIGHT, horiz_len=1))

        output.write(tr(self.treeFix(o)))

    @args.operation
    @args.parameter(name='files', short='f', nargs='*', metavar='file')
    @args.parameter(name='output', short='o')
    def bark(self, files=[], output=None):
        _output = sys.stdout
        if output:
            _output = open(output(), 'w')
        if len(files) == 0:
            self.process(sys.stdin, _output)
        else:
            for file in files:
                with open(file) as _input:
                    self.process(_input, _output)
        if output:
            _output.close()
        return

    @args.operation
    def test(self):

        h = '\n' + '_' * 47

        j = {
            'one': {
                'one_one': {
                    'one_one': [{
                        '#text': '_1_1_1'
                    }, {
                        '#text': '_1_1_2'
                    }]
                },
                'one_two': {
                    '@attr': '_1_2',
                    '#text': '_1_2_1'
                }
            }
        }

        print(h)
        prettyPrintLn(j)
        print(h)

        f = '../test/treeify.json'
        with open(f, 'w') as output:
            json.dump(j, output)
        self.bark([f])
        print(h)

        #self.ascii = True
        self.colour = True
        self.process(StringIO(json.dumps(j)), sys.stdout)
        print(h)

        x = xmltodict.unparse(j)
        doParse(StringIO(str(x)), sys.stdout, colour=True)
        print(h)
        self.format = 'xml'
        self.process(StringIO(str(x)), sys.stdout)
        print(h)

        sio = StringIO()
        prettyPrintLn(j, output=sio, style=Style.YAML, colour=False)
        y = sio.getvalue()
        sio.close()
        #print y
        y = y.replace('#text', '"#text"')
        y = y.replace('@attr', '"@attr"')
        #print y
        prettyPrintLn(j, output=sys.stdout, style=Style.YAML, colour=True)
        print(h)
        self.format = 'yaml'
        self.process(StringIO(y), sys.stdout)

        return
Example #19
0
            # plot results, making the Nemex lines stand out more, and write Nemex results to file
            if 'nemex' in system:
                line_width = 2.0
                line_style = 'solid'
                # compare a nemex output against the gold standard
                precision_by_extraction, corrects, incorrects, unknowns = compare(gold_index,
                                                                                  extraction_index, True)
                unknowns_and_weights, no_corr, no_incorr, no_either, new_corr, new_incorr = estimate_weights(
                    gold_corrects, gold_incorrects, unknowns)

                write_new_vocab(new_corr, True, output_folder, data_set_name, system)
                write_new_vocab(new_incorr, False, output_folder, data_set_name, system)

                if do_learn:
                    all_cleaned = clean(new_corr, unknowns)
                    learned_corrects = OD()
                    for i, c_extrs in all_cleaned.items():
                        learned_corrects[i] = []
                        for c_extr in c_extrs:
                            if c_extr in corrects[i].keys():
                                learned_corrects[i].append(c_extr)
                    print('for the system: ' + system + ' on data set ' + data_set_name)
                    print('# of correct learned extractions:', sum([len(extrs) for extrs in learned_corrects.values()]))

                num_unknowns = sum([len(e) for e in unknowns.values()])
                with open(output_folder + data_set_name + '/' + system + '_estimation.txt', 'w+') as est_results:
                    est_results.write('total unknown extractions: ' + str(num_unknowns) + '\n')
                    est_results.write('no correct doc or query:(labeled as 0) ' + str(no_corr) + ' / ' +
                                      str(num_unknowns) + ' = ' + str(no_corr / num_unknowns) + '\n')
                    est_results.write('no incorrect doc or query:(labeled as 1) ' + str(no_incorr) + ' / ' +
                                      str(num_unknowns) + ' = ' + str(no_incorr / num_unknowns) + '\n')
 def post(self):
     if not self.global_modes['allow_registration']:
         self.see_other('home', error='Registration is currently disabled.')
         return
     try:
         with AccountSaver(rqh=self) as saver:
             email = self.get_argument('email', None)
             saver['first_name'] = self.get_argument('first_name', None)
             saver['last_name'] = self.get_argument('last_name', None)
             university = self.get_argument('university', None)
             if not university:
                 university = self.get_argument('university_other', None)
             saver['university'] = university
             saver['department'] = self.get_argument('department', None)
             saver['pi'] = utils.to_bool(self.get_argument('pi', False))
             gender = self.get_argument('gender', None)
             if gender:
                 saver['gender'] = gender.lower()
             group_size = self.get_argument('group_size', None)
             if group_size:
                 saver['group_size'] = group_size
             try:
                 saver['subject'] = int(self.get_argument('subject'))
             except (tornado.web.MissingArgumentError, ValueError,
                     TypeError):
                 saver['subject'] = None
             saver['address'] = dict(
                 address=self.get_argument('address', None),
                 zip=self.get_argument('zip', None),
                 city=self.get_argument('city', None),
                 country=self.get_argument('country', None))
             saver['invoice_ref'] = self.get_argument('invoice_ref', None)
             saver['invoice_address'] = dict(
                 address=self.get_argument('invoice_address', None),
                 zip=self.get_argument('invoice_zip', None),
                 city=self.get_argument('invoice_city', None),
                 country=self.get_argument('invoice_country', None))
             saver['phone'] = self.get_argument('phone', None)
             if not email:
                 raise ValueError('Email is required.')
             saver.set_email(email)
             saver['owner'] = saver['email']
             saver['role'] = constants.USER
             saver['status'] = constants.PENDING
             saver.check_required()
             saver.erase_password()
     except ValueError as msg:
         kwargs = OD()
         for key in self.KEYS:
             kwargs[key] = saver.get(key) or ''
         for key in self.ADDRESS_KEYS:
             kwargs[key] = saver.get('address', {}).get(key) or ''
         for key in self.ADDRESS_KEYS:
             kwargs['invoice_' + key] = saver.get('invoice_address', {}).\
                 get(key) or ''
         self.see_other('register', error=str(msg), **kwargs)
         return
     try:
         template = settings['ACCOUNT_MESSAGES'][constants.PENDING]
     except KeyError:
         pass
     else:
         account = saver.doc
         with MessageSaver(rqh=self) as saver:
             saver.create(template,
                          account=account['email'],
                          url=self.absolute_reverse_url(
                              'account', account['email']))
             # Recipients are hardwired here.
             saver.send([a['email'] for a in self.get_admins()])
     self.see_other('registered')
Example #21
0
'''

from collections import OrderedDict as OD
import matplotlib.pyplot as plt
import pickle

from X_corr import *
from plot_path import path

save_to_disk = True
use_log = True

all_C = [1.0, 1.5, 1.75, 2.0]  # C values, assume pre-calculated models
n = 1000  # number of grid points for plotting

all_params = OD()
x = np.linspace(-10, 10, n)
fig, axes = plt.subplots(len(all_C), 1, sharex=True)
if use_log:
    fig.suptitle(
        'GMM approximate correction distribution log-pdfs with varying C\nn={}'
        .format(n))
else:
    fig.suptitle(
        'GMM approximate correction distribution pdfs with varying C\nn={}'.
        format(n))
for i, C in enumerate(all_C):
    filename = './X_corr/supplement_test_x_corr_params_C{}.pickle'.format(C)
    with open(filename, 'rb') as f:
        all_params[str(C)] = pickle.load(f)
    normal_sigma = np.sqrt(C)
Example #22
0
def get_menu(dev):
    menu = OD()
    menu['FIR LPF'] = lambda dev: process_cb('fircalc', dev)
    return menu
Example #23
0
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print(argstr)

    argfile = osp.join(osp.join(args.expdir), 'evaluate_args.yaml')

    args.cuda = not args.no_cuda

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)

    collate_fn = dict(
        collate_fn=my_collate) if args.input_crop == 'rect' else {}

    transforms = get_transforms(input_size=args.input_size, \
        crop=(args.input_crop == 'square'), need=('val',),
        backbone=args.backbone)

    if args.dataset.startswith('imagenet'):
        dataset = IdDataset(
            IN1K(args.imagenet_path,
                 args.dataset[len('imagenet-'):],
                 transform=transforms['val']))
        mode = "classification"
    else:
        dataset = IdDataset(
            meizi_dataset(root=args.meizi_path, transform=transforms['val']))
        mode = "retrieval"

    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        num_workers=args.workers,
                        shuffle=args.shuffle,
                        pin_memory=True,
                        **collate_fn)

    import torchvision.datasets as datasets
    val_loader = torch.utils.data.DataLoader(FidDataset(
        datasets.ImageFolder(args.meizi_path, transforms['val'])),
                                             batch_size=args.batch_size,
                                             shuffle=args.shuffle,
                                             num_workers=args.workers,
                                             pin_memory=True,
                                             **collate_fn)

    # ipdb.set_trace()

    model = get_multigrain(args.backbone,
                           include_sampling=False,
                           pretrained_backbone=args.pretrained_backbone)

    p = model.pool.p
    checkpoints = utils.CheckpointHandler(args.expdir)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        epoch = checkpoints.resume(model,
                                   resume_epoch=args.resume_epoch,
                                   resume_from=args.resume_from,
                                   return_extra=False)
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    if args.pooling_exponent is not None:  # overwrite stored pooling exponent
        p.data.fill_(args.pooling_exponent)

    model = utils.cuda(model)

    print("Multigrain model with {} backbone and p={} pooling:".format(
        args.backbone, p.item()))
    #print(model)

    #temp = torch.stack(temp, dim=0)

    if True:
        #
        gpuDeviceIds = [1]
        device = torch.device('cuda:{}'.format(gpuDeviceIds[0]) if torch.cuda.
                              is_available() else 'cpu')
        # device = torch.device('cuda:{}'.format('0,1,2,3') if torch.cuda.is_available() else 'cpu')
        model = torch.nn.DataParallel(model, device_ids=gpuDeviceIds)
        model.to(device)
        # model.eval()
    else:
        # only use one gpu
        model.cuda()
        model.eval()

    metrics_history = OD()
    metrics = defaultdict(utils.HistoryMeter)

    tic()

    start_id = 0

    imageCount = len(dataset)
    Dim = 2048

    xb = MmapVectorUtils.Open(args.embeddingFilePath,
                              True,
                              shape=(imageCount, Dim + 2))

    # dataloader_iter = iter(loader)
    # data, target, fid = next(dataloader_iter)

    multiGpu = True

    # dataloader_iter = iter(loader)

    # print("val_loader lens", len(loader))
    # length = len(loader)
    # for step in tqdm(range(length)):
    #     try:
    #         dict_temp = next(dataloader_iter)

    #         data = dict_temp['input']
    #         classifier_target = dict_temp['classifier_target']
    #         vid = dict_temp['vid']
    #         frame_id = dict_temp['frame_id']
    #         instance_target = dict_temp['instance_target']

    #         batch = utils.cuda(data)

    #         data = batch.to(device) if True else data.cuda()

    #         data = data.to(device) if multiGpu else data.cuda()

    #         output = model(data)
    #         output = output['normalized_embedding']
    #         cpuOutput = output.cpu().detach().numpy()

    #         if not np.all(np.isfinite(cpuOutput)):
    #             print("Return infinit results in step ", step)
    #             continue

    #         yield target, fid, cpuOutput
    #     except Exception as e:
    #         print("error when prossess dataloader_iter, step ", step)
    #         print('Error:', e)

    for i, batch in enumerate(loader):
        #try:
        batch_vid = batch['vid']
        batch_fid = batch['frame_id']

        with torch.no_grad():
            #if args.cuda:
            if True:
                batch = utils.cuda(batch)

            metrics["data_time"].update(1000 * toc())
            tic()
            batch_input_conv = torch.stack(batch['input'], dim=0)

            batch_input_conv = batch_input_conv.to(
                device) if True else batch_input_conv.cuda()

            output_dict = model(batch_input_conv)
    #             data = batch.to(device) if True else data.cuda()

    #             output = model(data)
    #             cpuOutput = output.cpu().detach().numpy()

    # data = data.to(self.device) if self.multiGpu else data.cuda()
    # output = model(data)
    # output = output['normalized_embedding']
    # cpuOutput = output.cpu().detach().numpy()

    # output_dict = model(batch['input'])
    # output_dict = output_dict.cpu().detach().numpy()

    # metrics["data_time"].update(1000 * toc());
    # tic()

        if mode == "classification":
            # target = batch['classifier_target']
            # top1, top5 = utils.accuracy(output_dict['classifier_output'], target, topk=(1, 5))
            # metrics["val_top1"].update(top1, n=len(batch['input']))
            # metrics["val_top5"].update(top5, n=len(batch['input']))
            raise ValueError('only focus on retrival')

        elif mode == "retrieval":
            descriptors = output_dict['normalized_embedding']

            n, dim = descriptors.shape
            end_id = n + start_id
            end_id = min(imageCount, end_id)
            n = end_id - start_id

            xb[start_id:end_id, 0:1] = np.array(batch_vid).reshape(n, 1)
            xb[start_id:end_id, 1:2] = np.array(batch_fid).reshape(n, 1)

            xb[start_id:end_id, 2:] = descriptors[0:n].cpu().numpy()

            start_id += n
        metrics["batch_time"].update(1000 * toc())
        tic()
        print(
            logging.str_metrics(metrics,
                                iter=i,
                                num_iters=len(loader),
                                epoch=epoch,
                                num_epochs=epoch))

    print(logging.str_metrics(metrics, epoch=epoch, num_epochs=1))
    for k in metrics:
        metrics[k] = metrics[k].avg

    toc()

    metrics_history[epoch] = metrics
public_key_u = loadValue["public_key"]
address_u = loadValue["address"]

#ブロックチェーンの読み込み
if os.path.exists(fileName):
    f = open(fileName, 'rb')
    blockchain = pickle.load(f)
    f.close()
else:
    genesis = Block(0, 0, "", [])
    #genesis = Block(0, round(time.time()), "", [])
    blockchain = []

    # 送信者は、採掘者が新しいコインを採掘したことを表すために"0"とする
    add_transaction = OD([("sender", "0"), ("recipient", "my address"),
                          ("amount", 1), ("prev_TxID", ""), ("public_key", ""),
                          ("digital_sig", ""), ("TxID", "")])
    nonce = genesis.mining(add_transaction)  #マイニング処理
    genesis.nonce = nonce  #取得したnonceをブロックに挿入
    nonce_joined = genesis.current_hash + str(genesis.nonce)
    calced = hashlib.sha256(nonce_joined.encode("utf-8")).hexdigest()
    genesis.current_hash = calced
    blockchain.append(genesis)


#これよりFlask関数定義文
#####ユーザインターフェース関数
@app.route('/')
def index():
    return render_template('./index.html')
Example #25
0
from collections import namedtuple as NT
import cProfile
import pstats
import io


# ESieve = [1, 7, 11, 13, 17, 19, 23, 29]
# XJumps = bytes([6, 4, 2, 4, 2, 4, 6, 2])
# Primes = [2, 3, 5]
# XJumps = bytes([4, 2])
# Primes = [2, 3]
XJumps = bytes([2])
Primes = [2]

Strikes = set()
XFrame = OD()
ZDatas = OD()
curPrime = 1
# thisPool = MP.pool.Pool()
# MAXPROCS = 5
MAX2SHOW = 24
MAXPRIME = 29  # where should we stop - lists/sets grow very fast

Xtuple = NT('Xtuple',
            ['prime', 'step', 'start', 'finit', 'frame', 'strikes', 'jumpers'])


def profile(fnc):
    """
    decorator for profiling
    """
Example #26
0
 def clear(self, k):
     i = self.find_by_name(k)
     old = self[i]
     self[i] = OD()
     self[i].name = old.name
     self.select(i)
Example #27
0
def get_ctrl(dev):
    ctrl_buttons = OD([('Reset', reset_cb), ('Read', read_cb),
                       ('Write', write_cb)])
    data = Data(buttons=ctrl_buttons, io_cb=dev_io_cb)
    data.add_page('Mode', send=True)
    data.add('COMMON',
             label='Mode',
             wdgt='combo',
             state='readonly',
             value=['INDEPENDENT', 'PARALLEL', 'SERIES', 'DOUBLE'],
             fmt_cb=common_fmt_cb)
    data.add_page('Independent', send=True)
    data.add('SCH1V',
             label='Channel1, V',
             wdgt='spin',
             value=Data.spn(0, 30, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SCH1C',
             label='Channel1, A',
             wdgt='spin',
             value=Data.spn(0, 3, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SW1',
             label='Output1 enable',
             wdgt='combo',
             state='readonly',
             value=OD([('ON', '0'), ('OFF', '1')]))
    data.add('SCH2V',
             label='Channel2, V',
             wdgt='spin',
             value=Data.spn(0, 30, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SCH2C',
             label='Channel2, A',
             wdgt='spin',
             value=Data.spn(0, 3, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SW2',
             label='Output2 enable',
             wdgt='combo',
             state='readonly',
             value=OD([('ON', '0'), ('OFF', '1')]))
    data.add_page('Parallel', send=True)
    data.add('SPARAV',
             label='Output, V',
             wdgt='spin',
             value=Data.spn(0, 30, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SPARAC',
             label='Output, A',
             wdgt='spin',
             value=Data.spn(0, 6, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SW1',
             label='Output enable',
             wdgt='combo',
             state='readonly',
             value=OD([('ON', '0'), ('OFF', '1')]))
    data.add_page('Series', send=True)
    data.add('SSERIV',
             label='Output, V',
             wdgt='spin',
             value=Data.spn(0, 60, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SSERIC',
             label='Output, A',
             wdgt='spin',
             value=Data.spn(0, 3, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SW1',
             label='Output enable',
             wdgt='combo',
             state='readonly',
             value=OD([('ON', '0'), ('OFF', '1')]))
    data.add_page('PlusMinus', send=True)
    data.add('SDUAL1V',
             label='Output1, V',
             wdgt='spin',
             value=Data.spn(0, 30, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SDUAL1C',
             label='Output1, A',
             wdgt='spin',
             value=Data.spn(0, 3, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SDUAL2V',
             label='Output2, V',
             wdgt='spin',
             value=Data.spn(0, 30, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SDUAL2C',
             label='Output2, A',
             wdgt='spin',
             value=Data.spn(0, 3, .01),
             fmt_cb=odp_fmt_cb)
    data.add('SW1',
             label='Output enable',
             wdgt='combo',
             state='readonly',
             value=OD([('ON', '0'), ('OFF', '1')]))
    return data
Example #28
0
def checkoutputs(expdf,logfile,ccdfile,goodchecked,steplist):
    expnums = expdf['expnum'].tolist()
    nites = expdf['nite'].tolist()
    bands = expdf['filter'].tolist()

    season = os.environ.get('SEASON')
    outdir = os.path.join(os.environ.get('ROOTDIR2'),'checkoutputs')
    if not os.path.isdir(outdir):
        os.mkdir(outdir)

    steplist = os.path.join(os.environ.get('INDIR'),steplist)
    f = open('steplist.txt','r')
    stepnames = f.readlines()
    f.close()

    stepnames = map(lambda x: x.strip(), stepnames)

    goodchecked = os.path.join(outdir,goodchecked)
    if os.path.isfile(goodchecked):
        f = open(goodchecked,'r')
        good = f.readlines()
        f.close()
        good = map(lambda x: int(x.strip()), good)        
    else:
        good = []

    expdir = os.environ.get('EXPDIR')
    logname = os.path.join(outdir,logfile)
    lf = open(logname,'w+')
    lf.write('EXPOSURES PROVIDED: '),lf.write(','.join(map(str,sorted(expnums))))
    lf.write('\n\n')
    d = OD()
    d['expnum'] = []
    chips = range(1,63)
    steps = range(1,29)
    chips.remove(2),chips.remove(31),chips.remove(61)
    for ch in chips:
        ch = '%02d' % ch
        d[ch] = []
    for e in expnums:
        nite = nites[expnums.index(e)]
        nite = str(nite)
        band = bands[expnums.index(e)]
        e = str(e)
        end = nite+'/'+e+'/'+'dp'+season
        p = os.path.join(expdir,end)
        if int(e) not in good:
            if os.path.isdir(p):
                print str(expnums.index(int(e)))+'/'+str(len(expnums))+' - '+p
                d['expnum'].append(e)
            else:
                print str(expnums.index(int(e)))+'/'+str(len(expnums))+' - '+p, 'does not exist. Check diffimg outputs.'
                continue
            for c in chips:
                c = '%02d' % c
                p2 = os.path.join(p,band+'_'+c)
                if os.path.isdir(p2):
                    for r in steps:
                        fail = stepnames[r-1]+'.FAIL'
                        gpfail = os.path.join(p2,fail)
### The current assumption is that .FAIL files are cleared out when a CCD is reprocessed. 
### If this is not true, uncomment the 3 lines below and tab the append and break lines. 
### In that event, one must also consider how to deal with a RUN28 failure.
                        if os.path.isfile(gpfail):
                            #log = stepnames[r]+'.LOG'
                            #plog = os.path.join(gp,log)
                            #if os.path.isfile(plog):
                            d[c].append(int(r))
                            break
                    else:
                        d[c].append(0)
        else:
            print str(expnums.index(int(e)))+'/'+str(len(expnums))+' - '+p
            d['expnum'].append(e)
            for c in chips:
                c = '%02d' % c
                d[c].append(0)

    lf.write('EXPOSURES CHECKED: '),lf.write(','.join(map(str,sorted(d['expnum']))))
    lf.write('\n\n')

    nonex,yesex = [],[]
    for x in sorted(expnums):
        x=str(x)
        if x not in d['expnum']:
            nonex.append(x)
        else:
            yesex.append(x)
    lf.write('EXPOSURES NOT FOUND: ')
    if len(nonex)==0:
        lf.write('none')
    else:
        lf.write(','.join(map(str,nonex)))
    lf.write('\n\n')
    
    df1 = pd.DataFrame(d)
    df = df1.set_index('expnum')

    ccddf = df.copy()

    listgood = df.loc[df.sum(axis=1) == 0].index
    listgood = listgood.tolist()
    listgood = map(lambda x: int(x), listgood)
    np.savetxt(goodchecked,sorted(listgood),fmt='%d')

    df['successes']=(df==0).astype(int).sum(axis=1)
    df['fraction'] = ""
    for exp in list(df.index.values):
        frac = float(df.get_value(exp,'successes'))/59.
        frac = round(frac,3)
        df.set_value(exp,'fraction',frac)
    
    lf.write('# OF CCDS PROCESSED: ')
    ccdsum = sum(df['successes'])
    lf.write(str(ccdsum)),lf.write('\n')
    
    lf.write('TOTAL CCDS CHECKED: ')
    ccdtot = 59*len(df['successes'])
    lf.write(str(ccdtot)),lf.write('\n')
    
    lf.write('CCD SUCCESS RATE: ')
    div = float(ccdsum)/float(ccdtot)
    lf.write(str(round(div,3)))
    lf.close()
    
    df.sort_index(inplace=True)
    df.to_csv(os.path.join(outdir,ccdfile))

    return yesex,nonex,ccddf
    lines = fileIn.read().split('\n')
# Retrieve name, run and subrun info from string
print("Retrieving name, run and subrun info from string")
name = []
run = []
subrun = []
for line in lines:
    if line:
        name.append(line)
        run.append(line.split('-')[2])
        subrun.append(line.split('-')[3].split('_')[0])
# Create dataframe with information
print("Creating dataframe with information")
data = OD([
    ('run', run),
    ('subrun', subrun),
    ('name', name),
])

df = pd.DataFrame(data)
# Sort dataframe
odf = df.sort_values(['run', 'subrun'])

# Find filenames with unique subrun per run
name = []
run = []
subrun = []
uniqueRun = 0
# which_run = 0
files = 0
for i, row in odf.iterrows():
Example #30
0
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print('arguments:')
    print(argstr)
    argfile = osp.join(args.expdir, 'train_args.yaml')

    if osp.isfile(argfile):
        oldargs = yaml.load(open(argfile))
        if oldargs is not None and oldargs != args.__dict__:
            print(
                'WARNING: Changed configuration keys compared to stored experiment'
            )
            utils.arguments.compare_dicts(oldargs, args.__dict__, verbose=True)

    args.cuda = not args.no_cuda
    args.validate_first = not args.no_validate_first

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)

    transforms = get_transforms(IN1K, args.input_size, args.augmentation)
    datas = {}
    for split in ('train', 'val'):
        imload = preloader(args.imagenet_path, args.preload_dir_imagenet
                           ) if args.preload_dir_imagenet else default_loader
        datas[split] = IdDataset(
            IN1K(args.imagenet_path,
                 split,
                 transform=transforms[split],
                 loader=imload))
    loaders = {}
    loaders['train'] = DataLoader(datas['train'],
                                  batch_sampler=RASampler(
                                      len(datas['train']),
                                      args.batch_size,
                                      args.repeated_augmentations,
                                      args.epoch_len_factor,
                                      shuffle=True,
                                      drop_last=False),
                                  num_workers=args.workers,
                                  pin_memory=True)
    loaders['val'] = DataLoader(datas['val'],
                                batch_size=args.batch_size,
                                num_workers=args.workers,
                                pin_memory=True)

    model = get_multigrain(args.backbone,
                           p=args.pooling_exponent,
                           include_sampling=not args.global_sampling,
                           pretrained_backbone=args.pretrained_backbone)
    print("Multigrain model with {} backbone and p={} pooling:".format(
        args.backbone, args.pooling_exponent))
    print(model)

    cross_entropy = torch.nn.CrossEntropyLoss()
    cross_entropy_criterion = (cross_entropy, ('classifier_output',
                                               'classifier_target'),
                               args.classif_weight)
    if args.global_sampling:
        margin = SampledMarginLoss(margin_args=dict(beta_init=args.beta_init))
        beta = margin.margin.beta
        margin_criterion = (margin, ('normalized_embedding',
                                     'instance_target'),
                            1.0 - args.classif_weight)
    else:
        margin = MarginLoss(args.beta_init)
        beta = margin.beta
        margin_criterion = (margin,
                            ('anchor_embeddings', 'negative_embeddings',
                             'positive_embeddings'), 1.0 - args.classif_weight)

    extra = {'beta': beta}

    criterion = MultiCriterion(dict(cross_entropy=cross_entropy_criterion,
                                    margin=margin_criterion),
                               skip_zeros=(args.repeated_augmentations == 1))

    if args.cuda:
        criterion = utils.cuda(criterion)
        model = utils.cuda(model)

    optimizers = OD()
    optimizers['backbone'] = SGD(model.parameters(),
                                 lr=args.learning_rate,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay)
    optimizers['margin_beta'] = SGD([beta],
                                    lr=args.learning_rate * args.beta_lr,
                                    momentum=args.momentum)
    optimizers = MultiOptim(optimizers)
    optimizers.set_base_lr()

    if args.cuda:
        model = nn.DataParallel(model)

    def set_learning_rate(epoch):
        factor = 1.0
        for (drop, gamma) in zip(args.lr_drops_epochs, args.lr_drops_factors):
            if epoch > drop:
                factor *= gamma
        optimizers.lr_multiply(factor)

    batches_accumulated = 0

    def training_step(batch):
        nonlocal batches_accumulated
        if batches_accumulated == 0:
            optimizers.zero_grad()

        output_dict = model(batch['input'], batch['instance_target'])
        output_dict['classifier_target'] = batch['classifier_target']
        loss_dict = criterion(output_dict)
        top1, top5 = utils.accuracy(output_dict['classifier_output'].data,
                                    output_dict['classifier_target'].data,
                                    topk=(1, 5))

        loss_dict['loss'].backward()
        batches_accumulated += 1

        if batches_accumulated == args.gradient_accum:
            mag = {}
            for (name, p) in model.named_parameters():
                mag[name] = p.grad.norm().item()
            optimizers.step()
            batches_accumulated = 0

        return_dict = OD()
        for key in ['cross_entropy', 'margin', 'loss']:
            if key in loss_dict:
                return_dict[key] = loss_dict[key].item()
        return_dict['beta'] = beta.item()
        return_dict['top1'] = top1
        return_dict['top5'] = top5

        return return_dict

    def validation_step(batch):
        with torch.no_grad():
            output_dict = model(batch['input'])
            target = batch['classifier_target']
            xloss = cross_entropy(output_dict['classifier_output'], target)
            top1, top5 = utils.accuracy(output_dict['classifier_output'],
                                        target,
                                        topk=(1, 5))

        return OD([
            ('cross_entropy', xloss.item()),
            ('top1', top1),
            ('top5', top5),
        ])

    metrics_history = OD()

    checkpoints = utils.CheckpointHandler(args.expdir, args.save_every)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        begin_epoch, loaded_extra = checkpoints.resume(model, optimizers,
                                                       metrics_history,
                                                       args.resume_epoch,
                                                       args.resume_from)
        if 'beta' in loaded_extra:
            beta.data.copy_(loaded_extra['beta'])
        else:
            print('(re)initialized beta to {}'.format(beta.item()))
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    def loop(loader, step, epoch, prefix=''):  # Training or validation loop
        metrics = defaultdict(utils.AverageMeter)
        tic()
        for i, batch in enumerate(loader):
            if args.cuda:
                batch = utils.cuda(batch)
            data_time = 1000 * toc()
            tic()
            step_metrics = step(batch)
            step_metrics['data_time'] = data_time
            step_metrics['batch_time'] = 1000 * toc()
            tic()
            for (k, v) in step_metrics.items():
                metrics[prefix + k].update(v, len(batch['input']))
            print(
                logging.str_metrics(metrics,
                                    iter=i,
                                    num_iters=len(loader),
                                    epoch=epoch,
                                    num_epochs=args.epochs))
        print(logging.str_metrics(metrics, epoch=epoch,
                                  num_epochs=args.epochs))
        toc()
        return OD((k, v.avg) for (k, v) in metrics.items())

    if args.validate_first and begin_epoch == 0 and 0 not in metrics_history:
        model.eval()
        metrics_history[0] = loop(loaders['val'], validation_step, begin_epoch,
                                  'val_')
        checkpoints.save_metrics(metrics_history)

    for epoch in range(begin_epoch, args.epochs):
        set_learning_rate(epoch)

        batches_accumulated = 0
        model.train()
        metrics = loop(loaders['train'], training_step, epoch, 'train_')

        model.eval()
        metrics.update(loop(loaders['val'], validation_step, epoch, 'val_'))

        metrics_history[epoch + 1] = metrics

        if not args.dry:
            utils.make_plots(metrics_history, args.expdir)
            checkpoints.save(model, epoch + 1, optimizers, metrics_history,
                             extra)