# layers.append(softmax)

        for i, l in enumerate(layers):
            pr("{}: {}".format(i, l))
        return layers


def build_architecture(opts, sample):
    opts.architecture = {}
    opts.architecture['nlayers'] = 6
    opts.architecture['kernel_sizes'] = [5, 5, 3, 3, 3, 3]
    opts.architecture['filters'] = [64, 128, 256, 256, 512, 1024]
    opts.architecture['paddings'] = ['same'] * 7
    opts.architecture['activation'] = utils.activation(opts.activation_type)
    opts.architecture['embedding_size'] = 2048
    return network(sample['image'], opts, regularizer=None)


if __name__ == '__main__':
    opts = options.get_opts()
    opts.architecture = {}
    opts.architecture['nlayers'] = 6
    opts.architecture['kernel_sizes'] = [5, 5, 3, 3, 3, 3]
    opts.architecture['filters'] = [64, 128, 256, 256, 512, 1024]
    opts.architecture['paddings'] = ['same'] * 7
    opts.architecture['activation'] = utils.activation(opts.activation_type)
    opts.architecture['embedding_size'] = 2048
    size = [None, opts.net_img_size, opts.net_img_size, opts.nchannels]
    x = tf.placeholder(tf.float32, size)
    network(x, opts, debug=True)
                age = 0
            elif age >= 24 and age < 35:
                age = 1
            elif age >= 35 and age < 45:
                age = 2
            elif age >= 45 and age < 55:
                age = 3
            else:
                age = 4
            f.write("%s\r" % " ".join([filename,str(age)]))
    call(["convert_imageset", "-resize_height", "256", "-resize_width", "256", "/", train_file, image_db_path])
    call(["compute_image_mean", image_db_path, proto_path])

if __name__ == "__main__":
    from sys import argv

    # Read command line arguments and populate environment variables
    from options import get_opts
    opts = get_opts(argv[1:])

    # Read profile.csv and load a dictionary
    from read_profile import read_profile

    # Assert at each point to check output and print if error
    from assert_return import assert_return

    status,data = read_profile(opts)
    assert_return(opts, status, data)

    create_dataset(opts, data)
         words  = [word for word,count in bucket]
         try:
             super_set &= set(words)
         except:
             super_set = set(words)
         print val,bucket
         buckets.append((val,words))
     print super_set

     print "\n"
     for val,bucket in buckets:
         print val, set(bucket) - super_set

if __name__ == "__main__":
    from sys import argv

    # Read command line arguments and populate environment variables
    from options import get_opts
    opts = get_opts(argv[1:])

    # Read profile.csv and load a dictionary
    from read_profile import read_profile

    # Assert at each point to check output and print if error
    from assert_return import assert_return

    status,data = read_profile(opts)
    assert_return(opts, status, data)

    get_word_count(opts, data)
Exemplo n.º 4
0
import m5
from m5.objects import *
from caches import *
from options import get_opts, get_process_cmd

(opts, args) = get_opts()

system = System()

system.clk_domain = SrcClockDomain()
system.clk_domain.clock = '1GHz'
system.clk_domain.voltage_domain = VoltageDomain()

system.mem_mode = 'atomic'  # Use timing accesses
system.mem_ranges = [AddrRange('512MB')]  # Create an address range

system.cpu = AtomicSimpleCPU()

system.membus = SystemXBar()

system.cpu.dcache = L1DCache(opts)
system.cpu.icache = L1ICache(opts)

fault_injector = FaultInjector(input_path=opts.input_path)

system.cpu.dcache.fault_injector = fault_injector
system.cpu.icache.fault_injector = fault_injector

system.cpu.dcache.connectCPU(system.cpu)
system.cpu.icache.connectCPU(system.cpu)