Esempio n. 1
0
def func(argv):
    print("hello")
    print(cy.Define.strName)
    print(cy.Define.nAge)
    print("pocketgit")
    common.test()
    # common.my_abs('free')
    print(common.my_abs(3.36))

    print(common.power(4, 5))
    print(common.fact(5))

    p = argparse.ArgumentParser("help123")
    p.add_argument('-c', nargs='?',default='cy')
    p.add_argument('-y', nargs='?', default='wy')

    args = p.parse_args(argv[1:])
    if args.c == 'cy':
        print("-cy")

    # clean
    print("Clean my computor.")

    # Mac
    print("Mac下快速搜索或启动:Command+Space")

    #game
    print("game save git")
    
    #time
    print("E=mc^2")
Esempio n. 2
0
def run():
    """
    :param app_name: HPC application
    :param perf_coln: performance name to be optimized
    :param num_core: number of CPU cores
    :param num_node: number of computing nodes
    :param rand_seed: random seed
    :param num_smpl: number of samples
    :param pool_size: pool size
    :param num_iter: number of iterations
    :param prec_rand: precentage of random samples
    """
    try:
        cm.init()
        app_name = cm.app_name
        perf_coln = cm.perf_coln
        num_smpl = cm.num_smpl
        pool_size = cm.pool_size
        num_iter = cm.num_iter
        prec_rand = cm.prec_rand
    
        if (app_name == "lv"):
            conf_colns = data.lv_conf_colns 
        elif (app_name == "hs"):
            conf_colns = data.hs_conf_colns

        num_rand = int(num_smpl * prec_rand)
        nspi = int((num_smpl - num_rand) / num_iter)
        # pool_df = data.gen_smpl(app_name, pool_size)
        # conf_df = pool_df.head(num_rand)
        conf_df = data.gen_smpl(app_name, num_rand)
        train_df = cm.measure_perf(conf_df)

        for iter_idx in range(num_iter):
            num_curr = num_smpl - nspi * (num_iter - 1 - iter_idx)
 
            pool_df = data.gen_smpl(app_name, pool_size)
            pred_top_smpl = learn.whl_pred_top_eval(train_df, pool_df, conf_colns, perf_coln, num_smpl, 0)
            pred_top_smpl = pred_top_smpl.sort_values([perf_coln]).reset_index(drop=True)
            new_conf_df = pred_top_smpl[conf_colns].head(nspi)
            conf_df = tool.df_union(conf_df, new_conf_df) 
    
            last = nspi
            while (conf_df.shape[0] < num_curr):
                last = last + 1
                new_conf_df = pred_top_smpl[conf_colns].head(last)
                conf_df = tool.df_union(conf_df, new_conf_df)
    
            new_train_df = cm.measure_perf(new_conf_df)
            train_df = tool.df_union(train_df, new_train_df)
    
        data.df2csv(train_df, app_name + "_train.csv")
        mdl_chk, mdl = learn.train_mdl_chk(train_df, conf_colns, perf_coln)
        top_df = cm.find_top('ALe', (mdl_chk, mdl, ), conf_colns, perf_coln, train_df)
    
        cm.test(train_df, conf_colns, perf_coln)
        cm.finish(train_df, top_df)
    except:
        traceback.print_exc()
Esempio n. 3
0
                                    args['max_sentence_length'],
                                    pdtb_category=args['pdtb_category'])

# load model,optimizer and loss
model, optimizer, criterion = common.get_model(
    model=args['model'],
    model_path=results_path,
    lr=args['learning_rate'],
    weight_decay=args['weight_decay'],
    pdtb_category=args['pdtb_category'])
print(optimizer)
print(criterion)

if args['cuda']:
    torch.backends.cudnn.enabled = True
    cudnn.benchmark = True
    model.cuda()
    criterion = criterion.cuda()

if args['model'] == 'grn16':
    common.test_grn16(val_loader, model, criterion, args['cuda'],
                      args['print_freq'])
elif args['model'] == 'keann':
    common.test_keann(val_loader, model, criterion, args['cuda'],
                      args['print_freq'])
elif args['model'] == 'keann_kg':
    common.test_keann_kg(val_loader, model, criterion, args['cuda'],
                         args['print_freq'])
else:
    common.test(val_loader, model, criterion, args['cuda'], args['print_freq'])
Esempio n. 4
0
 def main():
     print(common.outNum(100))
     common.test()
Esempio n. 5
0
def getModel():
    model = Sequential()
    model.add(
        LSTM(1000,
             activation='tanh',
             input_shape=(4, 1),
             return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(500, activation='tanh', return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(200, activation='tanh', return_sequences=True))
    model.add(Dense(1, activation='linear'))
    adam = optimizers.Adam(lr=0.0001)
    model.compile(loss="mean_squared_error", optimizer=adam)
    return model


if __name__ == '__main__':
    file = common.readData()
    scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
    data = common.preprocessData(file, scaler)
    train_data, test_data = common.splitData(data, 0.8)
    common.plotTestAndTrain(train_data, test_data)
    x_train, y_train, x_test, y_test = common.prepareForTraining(
        train_data, test_data)
    model = getModel()
    common.train(x_train, y_train, model, 16, 10, "model01.h5")
    common.test(model, x_train, y_train, x_test, y_test)
    common.predict(model, x_test, y_test, scaler)
Esempio n. 6
0
"""
http://rosalind.info/problems/revc/

Given: A DNA string s of length at most 1000 bp.

Return: The reverse complement sc of s.
"""

import common

sample_data = "AAAACCCGGT"
sample_output = "ACCGGGTTTT"

comp = dict(A="T", T="A", C="G", G="C")


def complement(dna):
    return "".join([x.replace(x, comp[x]) for x in dna[::-1]])


common.test(complement, sample_data, sample_output)

common.runit(complement)
Esempio n. 7
0
"""
http://rosalind.info/problems/subs/

Given: Two DNA strings s and t (each of length at most 1 kbp).

Return: All locations of t as a substring of s.
"""

import common
import re

sample_data = """GATATATGCATATACTT
ATAT"""

sample_output = "2 4 10"


def motif(inp):
    s = inp.splitlines()[0]
    t = inp.splitlines()[1]
    p = re.compile(r'(?=(%s))' % t)
    results = []
    for m in p.finditer(s):
        results.append(str(m.start() + 1))
    return ' '.join(results)


common.test(motif, sample_data, sample_output)

common.runit(motif)
Esempio n. 8
0
"""
http://rosalind.info/problems/rna/

Given: A DNA string t having length at most 1000 nt.

Return: The transcribed RNA string of t. (T => U)
"""
import common

alphabet = ['A', 'C', 'G', 'U']
sample_data = "GATGGAACTTGACTACGTAAATT"
sample_output = "GAUGGAACUUGACUACGUAAAUU"

def scribe(dna):
    out = dna.replace("T", "U")
    return out


common.test(scribe, sample_data, sample_output)

common.runit(scribe)
Esempio n. 9
0
    parser.set_defaults(test=False)

    args = parser.parse_args()

    input_shape = (FB_HEIGHT, WIDTH, COLOR_DEPTH)

    if args.test:
        model = load_model(modelFileName)

        input_shape = (FB_HEIGHT, WIDTH, COLOR_DEPTH)
        label_binarizer, clazzes = common.build_label_binarizer()

        test_labels, test_features, test_metadata = common.load_data(
            label_binarizer, foldsFolder, 'test', [1], input_shape)

        common.test(test_labels, test_features, test_metadata, model, clazzes)
    else:
        accuracies = []
        numFolds = len(
            glob(os.path.join(foldsFolder, "train_metadata.fold*.npy")))
        generator = common.train_generator(numFolds,
                                           foldsFolder,
                                           input_shape,
                                           max_iterations=1)

        first = True
        for (train_labels, train_features, test_labels, test_features,
             test_metadata, clazzes) in generator:

            # TODO reset tensorflow
Esempio n. 10
0
    for line in inp.splitlines():
        print line
        if line.startswith(">"):
            if current_dna:
                current_dna.get_gc()
                dnas.append(current_dna)
            current_dna = Dna(ID=line.replace(">", "").strip())
        else:
            # dna stretches over multiple lines
            current_dna.dna_str += line.strip()
    dnas.append(current_dna)

    # from pprint import pprint
    # pprint(dnas)

    max_dna = dnas[0]
    for d in dnas:
        if d.gc > max_dna.gc:
            max_dna = d
    print ""
    print "DNA with largest GC content:"
    print max_dna.ID
    print max_dna.gc

    return "%s\n%s" % (max_dna.ID, str(max_dna.gc))


common.test(gc, sample_data, sample_output)

common.runit(gc)
Esempio n. 11
0
"""
http://rosalind.info/problems/hamm/

Given: Two DNA strings s and t of equal length (not exceeding 1 kbp).

Return: The Hamming distance dH(s,t).

"""

import common
from itertools import izip

sample_data = """GAGCCTACTAACGGGAT
CATCGTAATGACGGCCT"""

sample_output = "7"


def hamm(inp):
    s = inp.splitlines()[0]
    t = inp.splitlines()[1]
    return str(sum([i != j for i, j in izip(s, t)]))


common.test(hamm, sample_data, sample_output)

common.runit(hamm)
Esempio n. 12
0
    "AGC": "S",
    "GGC": "G",
    "UGA": "Stop",
    "CGA": "R",
    "AGA": "R",
    "GGA": "G",
    "UGG": "W",
    "CGG": "R",
    "AGG": "R",
    "GGG": "G"
}


def code(rna):
    # check it starts correctly
    if rna[0:3] != "AUG":
        return None

    # split into 3-letter words
    words = [rna[i:i + 3] for i in range(0, len(rna), 3)]

    # change to proteins
    protein = [codon_table[w] for w in words]
    protein = protein[:protein.index("Stop")]
    return ''.join(protein)


common.test(code, sample_data, sample_output)

common.runit(code)
Esempio n. 13
0
def run():
    """
    :param app_name: HPC application
    :param perf_coln: performance name to be optimized
    :param num_core: number of CPU cores
    :param num_node: number of computing nodes
    :param rand_seed: random seed
    :param num_smpl: number of samples
    :param pool_size: pool size
    :param num_iter: number of iterations
    :param prec_rand: precentage of random samples
    :param prec_init: precentage of initial samples replaced by equivalent samples
    """
    try:
        cm.init()
        app_name = cm.app_name
        perf_coln = cm.perf_coln
        num_smpl = cm.num_smpl
        pool_size = cm.pool_size
        num_iter = cm.num_iter
        prec_rand = cm.prec_rand
        prec_init = cm.prec_init

        if (app_name == "lv"):
            conf_colns = data.lv_conf_colns
            conf1_colns = data.lmp_conf_colns
            conf2_colns = data.vr_conf_colns
        elif (app_name == "hs"):
            conf_colns = data.hs_conf_colns
            conf1_colns = data.ht_conf_colns
            conf2_colns = data.sw_conf_colns

        num_rand = int(num_smpl * prec_rand)
        # pool_df = data.gen_smpl(app_name, pool_size)
        # conf_df = pool_df.head(num_rand)
        conf_df = data.gen_smpl(app_name, num_rand)
        train_df = cm.measure_perf(conf_df)

        num_init = int(num_smpl * prec_init)
        pool1_df = data.gen_smpl(cm.app1_name(app_name), num_init * 100)
        conf1_df = pool1_df.head(num_init)
        train1_df = cm.measure_perf(conf1_df)
        pool2_df = data.gen_smpl(cm.app2_name(app_name), num_init * 100)
        conf2_df = pool2_df.head(num_init)
        train2_df = cm.measure_perf(conf2_df)

        avg_mach_time = data.sa_mach_time(train_df) / num_rand
        avg_sprt_mach_time = (data.app_mach_time(train1_df) + \
                              data.app_mach_time(train2_df)) / num_init
        factor = max(1, avg_mach_time / avg_sprt_mach_time)
        if (factor > 1):
            num_sprt = int(num_init * factor)
            new_conf1_df = pool1_df.head(num_sprt).tail(num_sprt - num_init)
            new_train1_df = cm.measure_perf(new_conf1_df)
            train1_df = tool.df_union(train1_df, new_train1_df)
            new_conf2_df = pool2_df.head(num_sprt).tail(num_sprt - num_init)
            new_train2_df = cm.measure_perf(new_conf2_df)
            train2_df = tool.df_union(train2_df, new_train2_df)

        pool_df = data.gen_smpl(app_name, pool_size)
        pred_top_smpl = learn.sprt_pred_top_eval(train1_df, train2_df, pool_df,
                                                 conf1_colns, conf2_colns,
                                                 conf_colns, perf_coln,
                                                 num_smpl, 0)

        nspi = int((num_smpl - num_init - num_rand) / num_iter)

        for iter_idx in range(num_iter):
            num_curr = num_smpl - num_init - nspi * (num_iter - 1 - iter_idx)

            pred_top_smpl = pred_top_smpl.sort_values(
                [perf_coln]).reset_index(drop=True)
            new_conf_df = pred_top_smpl[conf_colns].head(nspi)
            conf_df = tool.df_union(conf_df, new_conf_df)

            last = nspi
            while (conf_df.shape[0] < num_curr):
                last = last + 1
                new_conf_df = pred_top_smpl[conf_colns].head(last)
                conf_df = tool.df_union(conf_df, new_conf_df)

            new_train_df = cm.measure_perf(new_conf_df)
            train_df = tool.df_union(train_df, new_train_df)
            if (iter_idx < num_iter - 1):
                pool_df = data.gen_smpl(app_name, pool_size)
                pred_top_smpl = learn.whl_pred_top_eval(
                    train_df, pool_df, conf_colns, perf_coln, num_smpl, 0)

        data.df2csv(train_df, app_name + "_train.csv")
        mdl_chk, mdl = learn.train_mdl_chk(train_df, conf_colns, perf_coln)
        top_df = cm.find_top('TaLeC', (
            mdl_chk,
            mdl,
        ), conf_colns, perf_coln, train_df)

        cm.test(train_df, conf_colns, perf_coln)
        cm.finish(train_df, top_df)
    except:
        traceback.print_exc()
Esempio n. 14
0
m are heterozygous, and n are homozygous recessive.

Return: The probability that two randomly selected mating organisms will produce 
an individual possessing a dominant allele (and thus displaying the dominant 
phenotype). Assume that any two organisms can mate.
"""

import common
from math import factorial

sample_data = "2 2 2"
sample_output = "0.78333"


def prob(inp):
    # k homozygous dominant (YY)
    # m heterozygous (Yy)
    # n homozygous recessive (yy)
    k, m, n = [float(x) for x in inp.split(" ")]
    N = k + m + n
    Nm = N - 1.0
    p = (k / N)  # YY chosen first
    p += (m / N) * (
        (k / Nm) + (0.75 * (m - 1) / Nm) + (0.5 * (n / Nm)))  # Yy first
    p += (n / N) * ((k / Nm) + (0.5 * (m / Nm)))  #yy first
    return "%.5f" % p


common.test(prob, sample_data, sample_output)

common.runit(prob)
Esempio n. 15
0
def counter_old(word):
    """brute-force way"""
    counter = dict(A=0, C=0, G=0, T=0)
    for w in word:
        if w in alphabet:  # error checking
            counter[w] += 1
    print counter["A"], counter["C"], counter["G"], counter["T"]
    return "%s %s %s %s" % (counter["A"], counter["C"], counter["G"],
                            counter["T"])


def counter_good(word):
    """nice way"""
    counter = dict((x, word.count(x)) for x in alphabet)
    print counter["A"], counter["C"], counter["G"], counter["T"]
    return "%s %s %s %s" % (counter["A"], counter["C"], counter["G"],
                            counter["T"])


def counter_fancy(word):
    """super nice way"""
    from collections import Counter
    counter = Counter(word)
    print counter["A"], counter["C"], counter["G"], counter["T"]
    return "%s %s %s %s" % (counter["A"], counter["C"], counter["G"],
                            counter["T"])


common.test(counter_old, sample_data, sample_output)

common.runit(counter_fancy)
Esempio n. 16
0
d_word_index, results_path = common.get_word_index(args.model, args.glove, args.embedding_size)

# create tester
print("===> creating dataloaders ...")
val_loader = common.get_data_loader(args.model, 'test', d_word_index, args.batch_size, args.max_sentence_length,
                                    pdtb_category=args.pdtb_category)

# load model,optimizer and loss
model, optimizer, criterion = common.get_model(model=args.model,
                                               model_path=results_path,
                                               lr=args.lr,
                                               weight_decay=args.weight_decay,
                                               pdtb_category=args.pdtb_category)
print(optimizer)
print(criterion)

if args.cuda:
    torch.backends.cudnn.enabled = True
    cudnn.benchmark = True
    model.cuda()
    criterion = criterion.cuda()

if args.model == 'grn16':
    common.test_grn16(val_loader, model, criterion, args.cuda, args.print_freq)
elif args.model == 'keann':
    common.test_keann(val_loader, model, criterion, args.cuda, args.print_freq)
elif args.model == 'keann_kg':
    common.test_keann_kg(val_loader, model, criterion, args.cuda, args.print_freq)
else:
    common.test(val_loader, model, criterion, args.cuda, args.print_freq)
def mnist_classifier_tanh():
    # paths
    path = dict()
    path['project'] = os.path.dirname(os.path.abspath(__file__))
    path['state'] = os.path.join(path['project'], 'epoch')
    path['dataset'] = os.path.join(path['project'], 'dataset')
    path['graph'] = os.path.join(path['project'], 'graph')
    path['array'] = os.path.join(path['project'], 'array')
    for key, value in path.items():
        if not os.path.exists(path[key]):
            os.mkdir(path[key])

    # parameters
    batch_size = 1000
    number_of_epochs = 20
    learning_rate = 1e-3
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    mean = 0.1307
    std = 0.3081
    loss = nn.CrossEntropyLoss()
    train_info_per_batch = 6
    validation_info_per_batch = 3
    test_info_per_batch = 5
    validation_ratio = 0.1

    # transform
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=(mean, ), std=(std, ))
    ])

    # dataset
    train_dataset = torchvision.datasets.MNIST(root=path['dataset'],
                                               train=True,
                                               transform=transform,
                                               download=True)
    test_dataset = torchvision.datasets.MNIST(root=path['dataset'],
                                              train=False,
                                              transform=transform,
                                              download=True)

    # validation dataset
    validation_limit = int((1 - validation_ratio) * len(train_dataset))
    index_list = list(range(len(train_dataset)))
    train_indexes, validation_indexes = index_list[:
                                                   validation_limit], index_list[
                                                       validation_limit:]
    train_sampler = SubsetRandomSampler(train_indexes)
    validation_sampler = SequentialSampler(validation_indexes)

    # dataset loaders
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               sampler=train_sampler)
    validation_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                                    batch_size=batch_size,
                                                    sampler=validation_sampler)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size)

    # model
    model = MnistClassifierTanh().to(device)

    # optimizer
    optimizer = optim.SGD(params=model.parameters(), lr=learning_rate)

    epochs = np.arange(start=1, stop=(number_of_epochs + 1), step=1, dtype=int)

    print('Mnist Classifier Tanh')
    train_losses = []
    train_accuracies = []
    validation_losses = []
    validation_accuracies = []
    test_losses = []
    test_accuracies = []
    for epoch in epochs:
        info = 'Epoch {epoch_index}/{number_of_epochs}'
        print(info.format(epoch_index=epoch,
                          number_of_epochs=number_of_epochs))

        # train
        train_loss, train_accuracy = train(model=model,
                                           device=device,
                                           loader=train_loader,
                                           optimizer=optimizer,
                                           loss=loss,
                                           info_per_batch=train_info_per_batch)
        info = 'Train: Average Loss: {train_loss:.5f}, Accuracy: % {train_accuracy:.2f}'
        print(
            info.format(train_loss=train_loss,
                        train_accuracy=(100 * train_accuracy)))
        train_losses.append(train_loss)
        train_accuracies.append(train_accuracy)

        # validation
        validation_loss, validation_accuracy = test(
            model=model,
            loader=validation_loader,
            device=device,
            loss=loss,
            info_per_batch=validation_info_per_batch,
            info_name='Validation')
        info = 'Validation: Average Loss: {validation_loss:.5f}, Accuracy: % {validation_accuracy:.2f}'
        print(
            info.format(validation_loss=validation_loss,
                        validation_accuracy=(100 * validation_accuracy)))
        validation_losses.append(validation_loss)
        validation_accuracies.append(validation_accuracy)

        # test
        test_loss, test_accuracy = test(model=model,
                                        loader=test_loader,
                                        device=device,
                                        loss=loss,
                                        info_per_batch=test_info_per_batch,
                                        info_name='Test')
        info = 'Test: Average Loss: {test_loss:.5f}, Accuracy: % {test_accuracy:.2f}'
        print(
            info.format(test_loss=test_loss,
                        test_accuracy=(100 * test_accuracy)))
        test_losses.append(test_loss)
        test_accuracies.append(test_accuracy)

        # epoch state
        state_file_name = 'mnist_classifier_tanh_epoch_{epoch_index}.pkl'.format(
            epoch_index=epoch)
        save_state(model=model,
                   directory=path['state'],
                   file_name=state_file_name)

    # train loss
    save_data(array=train_losses,
              directory=path['array'],
              file_name='mnist_classifier_tanh_train_loss.npy')
    draw_line_graph(x=epochs,
                    y=train_losses,
                    x_label='Epoch',
                    y_label='Loss',
                    title='Mnist Classifier Tanh Train Loss',
                    directory=path['graph'],
                    file_name='mnist_classifier_tanh_train_loss.png')

    # train accuracy
    save_data(array=train_accuracies,
              directory=path['array'],
              file_name='mnist_classifier_tanh_train_accuracy.npy')
    draw_line_graph(x=epochs,
                    y=train_accuracies,
                    x_label='Epoch',
                    y_label='Accuracy',
                    title='Mnist Classifier Tanh Train Accuracy',
                    directory=path['graph'],
                    file_name='mnist_classifier_tanh_train_accuracy.png')

    # validation loss
    save_data(array=validation_losses,
              directory=path['array'],
              file_name='mnist_classifier_tanh_validation_loss.npy')
    draw_line_graph(x=epochs,
                    y=validation_losses,
                    x_label='Epoch',
                    y_label='Loss',
                    title='Mnist Classifier Tanh Validation Loss',
                    directory=path['graph'],
                    file_name='mnist_classifier_tanh_validation_loss.png')

    # validation accuracy
    save_data(array=validation_accuracies,
              directory=path['array'],
              file_name='mnist_classifier_tanh_validation_accuracy.npy')
    draw_line_graph(x=epochs,
                    y=validation_accuracies,
                    x_label='Epoch',
                    y_label='Accuracy',
                    title='Mnist Classifier Tanh Validation Accuracy',
                    directory=path['graph'],
                    file_name='mnist_classifier_tanh_validation_accuracy.png')

    # test loss
    save_data(array=test_losses,
              directory=path['array'],
              file_name='mnist_classifier_tanh_test_loss.npy')
    draw_line_graph(x=epochs,
                    y=test_losses,
                    x_label='Epoch',
                    y_label='Loss',
                    title='Mnist Classifier Tanh Test Loss',
                    directory=path['graph'],
                    file_name='mnist_classifier_tanh_test_loss.png')

    # test accuracy
    save_data(array=test_accuracies,
              directory=path['array'],
              file_name='mnist_classifier_tanh_test_accuracy.npy')
    draw_line_graph(x=epochs,
                    y=test_accuracies,
                    x_label='Epoch',
                    y_label='Accuracy',
                    title='Mnist Classifier Tanh Test Accuracy',
                    directory=path['graph'],
                    file_name='mnist_classifier_tanh_test_accuracy.png')

    # loss
    draw_multi_lines_graph(lines=[
        dict(label='Train', data=dict(x=epochs, y=train_losses)),
        dict(label='Validation', data=dict(x=epochs, y=validation_losses)),
        dict(label='Test', data=dict(x=epochs, y=test_losses))
    ],
                           x_label='Epoch',
                           y_label='Loss',
                           title='Mnist Classifier Tanh Loss',
                           directory=path['graph'],
                           file_name='mnist_classifier_tanh_loss.png')

    # accuracy
    draw_multi_lines_graph(lines=[
        dict(label='Train', data=dict(x=epochs, y=train_accuracies)),
        dict(label='Validation', data=dict(x=epochs, y=validation_accuracies)),
        dict(label='Test', data=dict(x=epochs, y=test_accuracies))
    ],
                           x_label='Epoch',
                           y_label='Accuracy',
                           title='Mnist Classifier Tanh Accuracy',
                           directory=path['graph'],
                           file_name='mnist_classifier_tanh_accuracy.png')
Esempio n. 18
0
# open the design YAML file and parse it into a dictionary for passing to raft
with open(fname_design) as file:
    design = yaml.load(file, Loader=yaml.FullLoader)
design['potModMaster'] = 1

# grab the depth (currently needs to be passed separately)
depth = float(design['mooring']['water_depth'])

# set up frequency range for computing response over
w = np.arange(0.05, 5,
              0.05)  # frequency range (to be set by modeling options yaml)

# Create and run the model
model = raft.Model(design, w=w, depth=depth)

model.setEnv(spectrum="unit")

model.calcSystemProps()

model.solveEigen()

model.calcMooringAndOffsets()

model.solveDynamics()

results = model.calcOutputs()
print('-----------------')
testPass = test(prob, results)

print('Test ' + ('FAILED' if not testPass else 'PASSED'))
Esempio n. 19
0
Return: The total number of rabbit pairs that will be present after n months if 
we begin with 1 pair and in each generation, every pair of reproduction-age 
rabbits produces a litter of k rabbit pairs (instead of only 1 pair).

"""

import common

sample_data = "5 3"
sample_output = "19"

# Formula: F_n = F_{n-1} + kF_{n-2}


def fib(inp):
    n = int(inp.split()[0])
    k = int(inp.split()[1])
    return str(term(n, k))


def term(i, k):
    if i == 1 or i == 2:
        return 1
    else:
        return term(i - 1, k) + (k * term(i - 2, k))


common.test(fib, sample_data, sample_output)

common.runit(fib)