Пример #1
0
 def save(self, savedir="../params"):
     mkdir(savedir)
     for name, net in self.networks.iteritems():
         net.save(os.path.join(savedir, name + ".pkl"))
     if self.descenter is not None:
         # Always save the momentum.
         self.descenter.save(savedir)
def main():
    # データの読み込み
    df = helpers.load_data()
    df['body_wakati'] = df.body.apply(helpers.fetch_tokenize)

    # 入力データと正解ラベル生成
    X = df.body_wakati.values
    le = LabelEncoder()
    y = le.fit_transform(df.category)

    # パイプラインの構築とグリッドサーチ
    pipe = make_pipeline(BoW(), PCA(n_components=100),
                         SVC(random_state=0, probability=True))
    param_range = [0.1, 1, 10, 100]
    param_grid = [{
        'C': param_range,
        'kernel': 'linear'
    }, {
        'C': param_range,
        'gamma': param_range,
        'kernel': 'rbf'
    }]
    best_score, best_model = evaluator.grid_search(estimator=pipe,
                                                   params=param_grid,
                                                   X=X,
                                                   y=y)

    # スコアとモデルの保存
    save_dir = './models/bow'
    helpers.mkdir(save_dir)
    np.savetxt(save_dir + '/accuracy.txt', np.array(best_score).reshape(1, 1))
    joblib.dump(best_model, save_dir + '/model.pkl')
Пример #3
0
    def summarize_directory_using_score_dump(self, dir_path):
        directory_result_path = "/home/lukasz/Projects/PracaMagisterska/experiments/tmpROUGE/Wide/rouge_summarization"
        system_path = os.path.join(directory_result_path, "system")
        mkdir(system_path)

        for root, dirs, files in os.walk(dir_path, topdown=False):
            sum_lengths = []
            sentences = []
            for i, name in enumerate(files):
                path = os.path.join(root, name)
                if "sentences" in path:
                    # print "[Trainer] -> file: %s" % name
                    sentences = {int(s.split("\t")[0]): s.split("\t")[1] for s in read_file_lines(path)}
                    continue

                summary = read_file_lines(path)
                sum_lengths.append(sum(1.0 for s in summary if s.startswith("2.0")))

            scores = []
            results = []
            res_sum = []
            if summary:
                feats = [str(f)[24:].split(" ")[0][:-7] for f in self.summarizer.get_features()]
                for line in [s.split("|")[1] for s in summary]:
                    scores.append([float(x.split(":")[1]) for x in line[:line.rfind("%")].strip().split(" ") if x.split(":")[0].strip() in feats])
                results = {self.summarizer.model.activate(s)[0]: i for i, s in enumerate(scores)}

                for key in sorted(results, reverse=True)[:int(round((sum(sum_lengths)/len(sum_lengths))))]:
                    res_sum.append(sentences[results[key]])

                file = codecs.open(os.path.join(system_path, name[name.index("_")+1:name.index(".")] + "_system1.txt"), "w", "utf-8")
                file.write("".join(res_sum))
                file.close()
            summary = None
Пример #4
0
def test_deleting_directory_in_fixed(sub_open, mount):
    fdir = join(sub_open, 'new_test_file')
    fdir2 = join(sub_open, 'new_test_file2')
    fdir3 = join(sub_open, 'new_test_file3')

    # Make sure we cannot delete existing files in fixed mode
    assert not isdir(fdir)
    mkdir(fdir)
    assert isdir(fdir)

    mount(fixed=True)

    assert isdir(fdir)
    with pytest.raises(PermissionError):
        rmdir(fdir)
    assert isdir(fdir)

    del fdir

    assert not isdir(fdir2)
    mkdir(fdir2)
    assert isdir(fdir2)
    rename([fdir2], [fdir3])

    assert not isdir(fdir2)
    assert isdir(fdir3)

    mount(fixed=True)
    assert not isdir(fdir3)
Пример #5
0
def main():
    # データの読み込み
    df = helpers.load_data()
    df['body_wakati'] = df.body.apply(helpers.fetch_tokenize)

    # 入力データと正解ラベル生成
    X = df.body_wakati.values
    le = LabelEncoder()
    y = le.fit_transform(df.category)

    # doc2vecの学習
    print('training doc2vec')
    training_data = [TaggedDocument(words=tokenize_texts, tags=[idx]) for idx, tokenize_texts in enumerate(X)]
    doc2vec = Doc2Vec(training_data, vector_size=100, workers=4)
    print('finish training doc2vec')

    # # パイプラインの構築とグリッドサーチ
    pipe = make_pipeline(Doc2Vec_feature(model=doc2vec), SVC(random_state=0, probability=True))
    param_range = [0.1, 1, 10, 100]
    param_grid = [
        {'C':param_range, 'kernel':'linear'},
        {'C':param_range, 'gamma':param_range, 'kernel':'rbf'}
    ]
    best_score, best_model = evaluator.grid_search(estimator=pipe, params=param_grid,X=X, y=y)
    print(best_score)
    print(best_model.get_params())

    # スコアとモデルの保存
    save_dir = './models/doc2vec'
    helpers.mkdir(save_dir)
    np.savetxt(save_dir + '/accuracy.txt', np.array(best_score).reshape(1,1))    
    joblib.dump(best_model, save_dir + '/model.pkl')
Пример #6
0
def resize_pad():

    srcdir0 = '/home/kevin/PycharmProjects/autograde/dataset/svm/data-0810'

    # srcdir0 = '/home/kevin/PycharmProjects/autograde/dataset/svm/data-0810-aug'
    dstdir0 = hp.mkdir(
        '/home/kevin/PycharmProjects/autograde/dataset/svm/data-0810-28')

    # for cls in range(22):
    for cls in [22]:

        srcdir = os.path.join(srcdir0, str(cls))
        dstdir = hp.mkdir(os.path.join(dstdir0, str(cls)))

        file_list = os.listdir(srcdir)
        print(len(file_list))

        for file in file_list:
            if file.endswith('.jpg') or file.endswith('.png'):

                src_full = os.path.join(srcdir, file)
                dst_full = os.path.join(dstdir, file[:-4] + '.jpg')
                print(dst_full)

                gray = cv2.imread(src_full, 0)
                gray = imu.strip_white_boder(gray)
                result = adjust_img(gray)
                cv2.imwrite(dst_full, result)
Пример #7
0
def test_make_directories(mount_dir, sub_done, sub_open):
    with pytest.raises(FileExistsError):
        mkdir(sub_open, 'dir')
    assert isdir(sub_open, 'dir')

    with pytest.raises(PermissionError):
        rm_rf(sub_open, 'dir')

    assert isdir(sub_open, 'dir')
    assert isfile(sub_open, 'dir', 'single_file_work')
    assert isfile(sub_open, 'dir', 'single_file_work_copy')

    with pytest.raises(IsADirectoryError):
        rm(sub_done, 'dir')
    with pytest.raises(OSError):
        rmdir(sub_done, 'dir')
    assert isdir(sub_done, 'dir')

    rm_rf(sub_done, 'dir')

    assert 'dir' not in ls(sub_done)

    mkdir(sub_done, 'dir')
    assert isdir(sub_done, 'dir')

    rmdir(sub_done, 'dir')
    assert 'dir' not in ls(sub_done)
Пример #8
0
def test_create_symlink(sub_done):
    with pytest.raises(PermissionError):
        symlink([sub_done, 'dir'], [sub_done, 'wowsers'])

    assert isdir(sub_done, 'dir')
    assert not isdir(sub_done, 'wowsers')

    mkdir(sub_done, 'wowsers')
    assert isdir(sub_done, 'wowsers')
Пример #9
0
def download_dataset():
    helpers.mkdir('data')
    url = 'https://www.dropbox.com/s/c452t2dpaq8nm2y/hamsterhare.tar.gz?dl=1'
    path = 'data/hamsterhare.tar.gz'
    print('Downloading hamster hare dataset...')
    helpers.download(url, path)
    print('Dataset downloaded.')
    print('Extracting dataset...')
    helpers.extract_tar(path)
    print('Dataset extracted')
Пример #10
0
 def process_dir(self, summarizer, root, files):
     mkdir("dataset_dump")
     mkdir(os.path.join("dataset_dump",root[root.rfind("/")+1:]))
     previous_document = None
     for i, name in enumerate(files):
         doc_psc = read_psc_file(os.path.join(root, name), previous_document)
         path = os.path.join("dataset_dump", os.path.join(root[root.rfind("/")+1:], name))
         path = path[:path.rfind(".")] + ".xml"
         print path
         save_to_file(doc_psc, path)
         previous_document = doc_psc
Пример #11
0
def test_illegal_rename(mount_dir, sub_done, sub_open):
    assert isdir(sub_done, 'dir')
    assert not isdir(sub_done, 'dir33')
    mkdir(sub_done, 'dir33')
    assert isdir(sub_done, 'dir33')

    with pytest.raises(FileNotFoundError):
        rename([sub_done, 'dir33'], [sub_done, 'dir', 'non_existing', 'dir33'])

    with pytest.raises(FileExistsError):
        rename([sub_done, 'dir33'], [sub_done, 'dir'])
Пример #12
0
    def __init__(self):
        def exit():
            os.killpg(os.getpgid(jshell_process.pid), signal.SIGTERM)
            os.killpg(os.getpgid(dummy_input_process.pid), signal.SIGTERM)

            for f in [in_path, out_path, snippet_path]:
                try:
                    os.remove(f)
                    pass
                except FileNotFoundError:
                    pass

        atexit.register(exit)

        mkdir("snippets")
        mkdir("fifos")

        self.EOF_key = random_string(32)
        self.snippet_key = random_string(32)
        self.fifo_key = random_string(32)
        snippet_path = "snippets/" + self.snippet_key
        in_path = "fifos/in_" + self.fifo_key
        out_path = "fifos/out_" + self.fifo_key

        for path in [in_path, out_path]:
            try:
                os.mkfifo(path)
            except FileExistsError:
                pass

        dummy_input_process = subprocess.Popen(
            f"(while true; do sleep 86400; done) > {in_path}",
            shell=True,
            preexec_fn=os.setsid)
        jshell_process = subprocess.Popen(
            f"jshell < {in_path} | tee -a fifos/log_{self.EOF_key}",
            shell=True,
            preexec_fn=os.setsid,
            universal_newlines=True,
            stdout=subprocess.PIPE)

        # This is needed so that exit() can refer to both
        # processes without using self.{process}.
        # This way, garbage collection of this jshell
        # instances works properly.
        self.dummy_input_process = dummy_input_process
        self.jshell_process = jshell_process
        self.in_path = in_path
        self.out_path = out_path
        self.snippet_path = snippet_path

        self.jshell_process.stdout.read(89)
Пример #13
0
    def create_request_project(self):
		'''
		create site skeleton and configure files
		>>> self.request
		>>> controllers/site.py
		'''
		xpaths = ['sites','/viewlets','/browser','/src','/templates']
		mkdir(str(self._post) + str(self.request))
		path = str(self._post) + str(self.request)
		path  += xpaths[3]
		#split do nome do site separado por .
		directories = (str(self._post) + str(self.request)).split(".")
		for directory in directories:
			path += "/"+directory
		# profiles
		profiles = path
		mkdir(profiles+"/profiles/default/");
		self.add_profile_file(path)
		#browser
		path += xpaths[2]
		# viewlets
		path += xpaths[1]
		mkdir(path);
		# zcml file
		self.add_configure_file(path,str(self._post) + str(self.request))
		# python file configure
		self.add_viewlets_file(path)
		# templates
		path += xpaths[4]
		mkdir(path);
		# pt file 
		self.add_template_file(path)
Пример #14
0
def test_non_exising_submission(assig_done):
    with pytest.raises(FileNotFoundError):
        open(join(assig_done, 'NON EXISTING', 'file'), 'x').close()

    with pytest.raises(FileNotFoundError):
        mkdir(assig_done, 'NON EXISTING', 'file')

    with pytest.raises(FileNotFoundError):
        rename(
            [assig_done, 'NON EXISTING', 'file'],
            [assig_done, 'NON EXISTING', 'file2']
        )

    with pytest.raises(FileNotFoundError):
        ls(assig_done, 'NON EXISTING')
Пример #15
0
def test_rename(mount_dir, sub_done, sub_open):
    assert isdir(sub_done, 'dir')
    assert not isdir(sub_done, 'dir33')

    assert isdir(sub_done, 'dir')
    assert not isdir(sub_done, 'dir33')
    mkdir(sub_done, 'dir33')
    mkdir(sub_done, 'dir', 'sub_dir')
    assert isdir(sub_done, 'dir33')
    with open(join(sub_done, 'dir33', 'new_file'), 'w') as f:
        f.write('bye\n')

    rename([sub_done, 'dir33'], [sub_done, 'dir', 'sub_dir', 'dir33'])
    with open(join(sub_done, 'dir', 'sub_dir', 'dir33', 'new_file'), 'r') as f:
        assert f.read() == 'bye\n'
Пример #16
0
def copy_augment():
    import shutil
    srcdir0 = '/home/kevin/PycharmProjects/autograde/dataset/svm/data-0810'
    dstdir0 = '/home/kevin/PycharmProjects/autograde/dataset/svm/data-0810-aug'

    for cls in range(22):
        srcdir = os.path.join(srcdir0, str(cls))
        dstdir = hp.mkdir(os.path.join(dstdir0, str(cls)))

        file_list = os.listdir(srcdir)

        aug_num = 5

        for file in file_list:
            if file.endswith('.jpg') or file.endswith('.png'):

                src_full = os.path.join(srcdir, file)
                dst_full = os.path.join(dstdir, file)
                print(src_full)

                shutil.copyfile(src_full, dst_full)

                gray = cv2.imread(src_full, 0)
                bw = imu.preprocess_bw(gray)
                dst_bw = dst_full[:-4] + 'bw.jpg'
                cv2.imwrite(dst_bw, bw)

                for i in range(aug_num):
                    aug_full = dst_bw = dst_full[:-4] + '_' + str(i) + '.jpg'
                    if cls == 20:  # for dot
                        aug = augment(gray, shift_low=0)
                    else:
                        aug = augment(gray, shift_low=-5)
                    # imu.imshow_(aug)
                    cv2.imwrite(aug_full, aug)
Пример #17
0
def test_write_to_directory(sub_done):
    fdir = join(sub_done, 'new_dir')
    fname = join(fdir, 'new_file')

    mkdir(fdir)
    assert isdir(fdir)
    assert not isfile(fdir)

    with pytest.raises(IsADirectoryError):
        with open(fdir, 'w') as f:
            f.write('hello\n')

    open(fname, 'w').close()
    with pytest.raises(NotADirectoryError):
        ls(fname)
    with pytest.raises(FileExistsError):
        mkdir(fname)
Пример #18
0
    def summarize_directory(self, dir_path):
        directory_result_path = "/home/lukasz/Projects/PracaMagisterska/experiments/tmpROUGE/Wide/rouge_summarization"
        system_path = os.path.join(directory_result_path, "system")
        mkdir(system_path)

        for root, dirs, files in os.walk(dir_path, topdown=False):
            previous_document = None
            sum_lengths = []
            for i, name in enumerate(files):
                # print "[Trainer] -> file: %s" % name
                path = os.path.join('/home/lukasz/Projects/PracaMagisterska/SummarizerApp/Summarizer/dataset_dump', os.path.join(root[root.rfind("/")+1:], name))
                path = path[:path.rfind(".")] + ".xml"
                doc_psc = load_from_file(path)
                previous_document = doc_psc
                sum_lengths.append(len([(s[1], s[2]) for s in doc_psc.get_summary(self.length)]))

            if previous_document:
                summary = self.summarizer.create_summary(previous_document, sum(sum_lengths) / len(sum_lengths))
                self.save_summary_to_file(summary.get_scored_sentences(),
                                          os.path.join(system_path, name[2:-4] + "_system1.txt"))
Пример #19
0
def test_quote_paths(sub_done, path):
    dirs = path.split('/')[:-1]
    if len(dirs):
        mkdir(join(sub_done, *dirs))

    f_path = join(sub_done, path)
    with open(join(sub_done, f_path), 'w') as f:
        assert f.write('abc') == 3

    tmp_path = join(sub_done, 'abc')
    rename([f_path], [tmp_path])
    assert not isfile(f_path)
    assert isfile(tmp_path)

    rename([tmp_path], [f_path])
    assert isfile(f_path)
    assert not isfile(tmp_path)

    rm(f_path)
    if len(dirs):
        rmdir(join(sub_done, *dirs))
Пример #20
0
def test_detect_vertical_line():

    books = ['biaozhundashijuan', 'jiangsumijuan', 'liangdiangeili']
    out_base = hp.mkdir('../result/vertical')

    for book in books:

        srcdir = os.path.join(data_dir, 'waibu/' + book)
        outdir = hp.mkdir(os.path.join(out_base, book))

        papers = os.listdir(srcdir)
        # papers = ['120190703153847968.jpg']
        for paper in papers:
            if paper.endswith('.jpg'):
                print(paper)
                src = cv2.imread(os.path.join(srcdir, paper))
                gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
                vline = detect_vertical_line3(gray)
                # rotated, angle = correct_skew3(src, gray)

                cv2.imwrite(os.path.join(outdir, paper), vline)
Пример #21
0
def test_create_file_outside_submissions(mount_dir, mount):
    with open(join(mount_dir, 'file'), 'w+') as f:
        f.write('hello\n')
    with open(join(mount_dir, 'file'), 'r') as f:
        assert f.read() == 'hello\n'

    with open(
        join(
            mount_dir, [l for l in ls(mount_dir) if isdir(mount_dir, l)][0],
            'file'
        ), 'w+'
    ) as f:
        f.write('hello\n')

    mkdir(mount_dir, 'dir')
    mkdir(mount_dir, 'dir', 'hello')
    mkdir(mount_dir, 'dir', 'hello', 'codegrade')
    with open(
        join(mount_dir, 'dir', 'hello', 'codegrade', 'nice_to_meet'), 'w'
    ) as f:
        f.write('hello')
    with pytest.raises(FileExistsError):
        mkdir(mount_dir, 'dir', 'hello', 'codegrade', 'nice_to_meet')
    with open(
        join(mount_dir, 'dir', 'hello', 'codegrade', 'nice_to_meet'), 'r'
    ) as f:
        assert f.read() == 'hello'

    with pytest.raises(FileNotFoundError):
        with open(join(mount_dir, 'dir', 'bye', 'cg'), 'w') as f:
            pass
    with pytest.raises(FileNotFoundError):
        mkdir(mount_dir, 'dir', 'bye', 'cg2')

    assert 'dir' in ls(mount_dir)
    assert 'file' in ls(mount_dir)
    mount()
    assert 'dir' not in ls(mount_dir)
    assert 'file' not in ls(mount_dir)
Пример #22
0
configs_folder_name = 'configs'

parameter_dict = {
    'model_version': [1],
    'epoch': [10],
    'iteration': [200],
    'learning_rate': [1e-2, 1e-3],
    'batch_size': [128, 64],
    'test_batch_size': [512],
    'is_training': [1]  # often required by batch normalization
}

grid = sorted(list(ParameterGrid(parameter_dict)),
              key=lambda x: x['model_version'])

mkdir(configs_folder_name)

print('[INFO] Start creating configuration files in the folder "config"...')
for i in grid:
    i['experiment_name'] = "Model-V{}-Ep{}-It{}-Lr{}-Bs{}".format(
        i['model_version'], i['epoch'], i['iteration'], i['learning_rate'],
        i['batch_size'])

    config_name = '{}.json'.format(i['experiment_name'])

    with open('./configs/{}'.format(config_name), 'w') as f:
        print('[INFO] Writing - {}'.format(config_name))
        json.dump(i, f)

    with open('./main/main.sh', 'a') as f:
        f.write('python mnist_lenet.py -c ../configs/{}\n'.format(config_name))
Пример #23
0
def test_create_existing_dir(sub_done):
    with pytest.raises(FileExistsError):
        mkdir(sub_done, 'dir')
Пример #24
0
ROOT.gROOT.SetBatch(True)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)


plotdir = 'diff_%s' % os.environ['tag']
train_var_out = {
   -1 : '%s/single_value' % plotdir,
    0 : '%s/vectorial/0th' % plotdir,
    1 : '%s/vectorial/1st' % plotdir,
    2 : '%s/vectorial/2nd' % plotdir,
    3 : '%s/vectorial/3rd+' % plotdir
}
for i in train_var_out.values():
   mkdir(i)

infile = io.root_open('analyzed/jet_by_jet_diff.root')
inview = views.StyleView(
   infile,
   fillstyle = 'hollow',
   linewidth = 2,
   markerstyle= 0,
   drawstyle='hist',
   legendstyle='l',
   linecolor = 'black'
   )
histos = [i.name for i in infile.keys()]

for path in histos:
   plot_dir = ''
Пример #25
0
nice_names = {
   'c' : 'charm', 
   'b' : 'b', 
   'l' : 'light', 
   'sum' : 'all'
}

disc_shapes = {
 'pat' : dict((i, {}) for i in flavors),
 'flat_tree' : dict((i, {}) for i in flavors),
 'varex_tree' : dict((i, {}) for i in flavors),
}

colors = ['#ab5555', '#FFCC66', '#2aa198']
plotdir = 'plots_%s' % os.environ['tag']
mkdir(plotdir)

for prefix, infile in inputs.iteritems():
   mkdir('%s/%s' % (plotdir, prefix))

   for name in discrims:
      mva_out_view = views.StyleView(
         views.FunctorView(
            infile,
            lambda x: x.Rebin(4)
            ),
         fillstyle = 'solid',
         linewidth = 0,
         markerstyle= 0,
         drawstyle='hist',
         legendstyle='f'
Пример #26
0
 def mkdir(self):
     mkdir("rouge_summarization")
     mkdir(self.dump_dir)
Пример #27
0
if location == "hpc":
    projectFiles = projectPath

helpersPath = os.path.expanduser(projectPath + "source/")
sys.path.insert(1, helpersPath)

import helpers

data_dir_base = projectFiles + "data/"
results_dir_base = projectFiles + "results/"

dataPath = data_dir_base + size
resultsPath_test = results_dir_base + "test/"
resultsPath = resultsPath_test + size

helpers.mkdir(resultsPath_test)
helpers.mkdir(resultsPath)

# Economic Parameters
beta = np.genfromtxt(dataPath + 'beta.csv', delimiter=',')
theta = np.genfromtxt(dataPath + 'theta.csv', delimiter=',')
mu = np.genfromtxt(dataPath + 'mu.csv', delimiter=',')
nu = np.genfromtxt(dataPath + 'nu.csv', delimiter=',')

params = {"beta": beta, "theta": theta, "mu": mu, "nu": nu}

# Data
tau = np.genfromtxt(dataPath + 'tau.csv', delimiter=',')
Xcif = np.genfromtxt(dataPath + 'Xcif.csv', delimiter=',')
Y = np.genfromtxt(dataPath + 'y.csv', delimiter=',')
Eq = np.genfromtxt(dataPath + 'Eq.csv', delimiter=',')
def create_alexnet():

    helpers.mkdir('data')
    numpy_data_path = os.path.join('data', 'bvlc_alexnet.npy')
    download_url = 'https://www.dropbox.com/s/gl5wa3uzru555nd/bvlc_alexnet.npy?dl=1'
    print('Downloading pre-trained AlexNet weights.')
    helpers.download(download_url, numpy_data_path)
    print('Weights downloaded.')

    variable_data = np.load(numpy_data_path, encoding='bytes').item()

    conv1_preW = variable_data["conv1"][0]
    conv1_preb = variable_data["conv1"][1]
    conv2_preW = variable_data["conv2"][0]
    conv2_preb = variable_data["conv2"][1]
    conv3_preW = variable_data["conv3"][0]
    conv3_preb = variable_data["conv3"][1]
    conv4_preW = variable_data["conv4"][0]
    conv4_preb = variable_data["conv4"][1]
    conv5_preW = variable_data["conv5"][0]
    conv5_preb = variable_data["conv5"][1]
    fc6_preW = variable_data["fc6"][0]
    fc6_preb = variable_data["fc6"][1]
    fc7_preW = variable_data["fc7"][0]
    fc7_preb = variable_data["fc7"][1]
    fc8_preW = variable_data["fc8"][0]
    fc8_preb = variable_data["fc8"][1]

    pixel_depth = 255.0
    resized_height = 227
    resized_width = 227
    num_channels = 3

    print('Creating AlexNet model.')

    graph = tf.Graph()

    with graph.as_default():
        x = tf.placeholder(tf.uint8, [None, None, None, num_channels],
                           name='input')

        to_float = tf.cast(x, tf.float32)
        resized = tf.image.resize_images(to_float,
                                         [resized_height, resized_width])

        # Convolution 1
        with tf.name_scope('conv1') as scope:
            kernel = tf.Variable(conv1_preW, name='weights')
            biases = tf.Variable(conv1_preb, name='biases')
            conv = tf.nn.conv2d(resized, kernel, [1, 4, 4, 1], padding="SAME")
            bias = tf.nn.bias_add(conv, biases)
            conv1 = tf.nn.relu(bias, name=scope)

        # Local response normalization 2
        radius = 2
        alpha = 2e-05
        beta = 0.75
        bias = 1.0
        lrn1 = tf.nn.local_response_normalization(conv1,
                                                  depth_radius=radius,
                                                  alpha=alpha,
                                                  beta=beta,
                                                  bias=bias)

        # Maxpool 1
        pool1 = tf.nn.max_pool(lrn1,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool1')

        # Convolution 2
        with tf.name_scope('conv2') as scope:

            kernel = tf.Variable(conv2_preW, name='weights')
            biases = tf.Variable(conv2_preb, name='biases')

            input_a, input_b = tf.split(pool1, 2, 3)
            kernel_a, kernel_b = tf.split(kernel, 2, 3)

            with tf.name_scope('A'):
                conv_a = tf.nn.conv2d(input_a,
                                      kernel_a, [1, 1, 1, 1],
                                      padding="SAME")

            with tf.name_scope('B'):
                conv_b = tf.nn.conv2d(input_b,
                                      kernel_b, [1, 1, 1, 1],
                                      padding="SAME")

            conv = tf.concat([conv_a, conv_b], 3)
            bias = tf.nn.bias_add(conv, biases)
            conv2 = tf.nn.relu(bias, name=scope)

        # Local response normalization 2
        radius = 2
        alpha = 2e-05
        beta = 0.75
        bias = 1.0
        lrn2 = tf.nn.local_response_normalization(conv2,
                                                  depth_radius=radius,
                                                  alpha=alpha,
                                                  beta=beta,
                                                  bias=bias)

        # Maxpool 2
        pool2 = tf.nn.max_pool(lrn2,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool2')

        with tf.name_scope('conv3') as scope:
            kernel = tf.Variable(conv3_preW, name='weights')
            biases = tf.Variable(conv3_preb, name='biases')
            conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding="SAME")
            bias = tf.nn.bias_add(conv, biases)
            conv3 = tf.nn.relu(bias, name=scope)

        with tf.name_scope('conv4') as scope:

            kernel = tf.Variable(conv4_preW, name='weights')
            biases = tf.Variable(conv4_preb, name='biases')

            input_a, input_b = tf.split(conv3, 2, 3)
            kernel_a, kernel_b = tf.split(kernel, 2, 3)

            with tf.name_scope('A'):
                conv_a = tf.nn.conv2d(input_a,
                                      kernel_a, [1, 1, 1, 1],
                                      padding="SAME")

            with tf.name_scope('B'):
                conv_b = tf.nn.conv2d(input_b,
                                      kernel_b, [1, 1, 1, 1],
                                      padding="SAME")

            conv = tf.concat([conv_a, conv_b], 3)
            bias = tf.nn.bias_add(conv, biases)
            conv4 = tf.nn.relu(bias, name=scope)

        with tf.name_scope('conv5') as scope:

            kernel = tf.Variable(conv5_preW, name='weights')
            biases = tf.Variable(conv5_preb, name='biases')

            input_a, input_b = tf.split(conv4, 2, 3)
            kernel_a, kernel_b = tf.split(kernel, 2, 3)

            with tf.name_scope('A'):
                conv_a = tf.nn.conv2d(input_a,
                                      kernel_a, [1, 1, 1, 1],
                                      padding="SAME")

            with tf.name_scope('B'):
                conv_b = tf.nn.conv2d(input_b,
                                      kernel_b, [1, 1, 1, 1],
                                      padding="SAME")

            conv = tf.concat([conv_a, conv_b], 3)
            bias = tf.nn.bias_add(conv, biases)
            conv5 = tf.nn.relu(bias, name=scope)

        # Maxpool 2
        pool5 = tf.nn.max_pool(conv5,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool5')

        # Fully connected 6
        with tf.name_scope('fc6'):
            weights = tf.Variable(fc6_preW, name='fc6_weights')
            bias = tf.Variable(fc6_preb, name='fc6_bias')
            shape = tf.shape(pool5)
            size = shape[1] * shape[2] * shape[3]
            z = tf.matmul(tf.reshape(pool5, [-1, size]), weights) + bias
            fc6 = tf.nn.relu(z, name='relu')

        # Fully connected 7
        with tf.name_scope('fc7'):
            weights = tf.Variable(fc7_preW, name='weights')
            bias = tf.Variable(fc7_preb, name='bias')
            z = tf.matmul(fc6, weights) + bias
            fc7 = tf.nn.relu(z, name='relu')

        # Fully connected 8
        with tf.name_scope('fc8'):
            weights = tf.Variable(fc8_preW, name='weights')
            bias = tf.Variable(fc8_preb, name='bias')
            fc8 = tf.matmul(fc7, weights) + bias

        softmax = tf.nn.softmax(fc8)

        init = tf.global_variables_initializer()

    print('Model created.')

    sess = tf.Session(graph=graph)
    sess.run(init)

    print('Exporting TensorBoard graph to tbout/alexnet')
    writer = tf.summary.FileWriter('tbout/alexnet', graph=graph)
    writer.close()

    print('Exporting TensorFlow model to data/alexnet')
    with graph.as_default():
        saver = tf.train.Saver()
        save_path = saver.save(sess, 'data/alexnet')
Пример #29
0
 def mkdir(self):
     mkdir(self.dump_dir)
Пример #30
0
    def __init__(self,
                 location,
                 size,
                 bootstrap=False,
                 bootstrap_id=0,
                 mil_off=False,
                 base_path=None):

        # CALIBRATED PARAMETERS

        self.eta = 1.5
        self.c_hat = 25

        # MACRO PATHS

        if base_path is None:
            mkdirs = True
            base_path = os.path.expanduser('~')
        else:
            mkdirs = False

        if location == "local":
            self.project_files = base_path + "/Dropbox (Princeton)/1_Papers/tpsp/01_files/"
        if location == "hpc":
            self.project_files = base_path + "/tpsp/"

        self.data_path_0 = self.project_files + "data/"
        self.results_path_0 = self.project_files + "results/"

        self.data_path_base = self.data_path_0 + size
        self.results_path_base = self.results_path_0 + size

        if mil_off == False:
            self.estimates_path_base = self.results_path_base + "estimates/"
        else:
            self.estimates_path_base = self.results_path_base + "estimates_mil_off/"

        if bootstrap == True:
            self.data_path = self.data_path_base + str(bootstrap_id) + "/"
            self.estimates_path = self.estimates_path_base + str(
                bootstrap_id) + "/"
        else:
            self.data_path = self.data_path_base
            self.estimates_path = self.estimates_path_base

        self.counterfactuals_path = self.results_path_base + "counterfactuals/"
        self.xlhvt_star_path = self.estimates_path + "x.csv"

        self.figs_path_0 = "../02_figs/"

        # DATA (STATIC)

        self.icews_reduced_path = "~/Dropbox (Princeton)/Public/reducedICEWS/"
        self.gdp_raw_path = self.data_path_base + "gdp_raw.csv"

        # DATA (DYNAMIC)

        self.ccodes_path = self.data_path + "ccodes.csv"
        self.ROWname_path = self.data_path + "ROWname.csv"

        self.beta_path = self.data_path + "beta.csv"
        self.theta_path = self.data_path + "theta.csv"
        self.mu_path = self.data_path + "mu.csv"
        self.nu_path = self.data_path + "nu.csv"

        self.tau_path = self.data_path + "tau.csv"
        self.Xcif_path = self.data_path + "Xcif.csv"
        self.Y_path = self.data_path + "y.csv"
        self.Eq_path = self.data_path + "Eq.csv"
        self.Ex_path = self.data_path + "Ex.csv"
        self.r_path = self.data_path + "r.csv"
        self.D_path = self.data_path + "d.csv"

        self.dists_path = self.data_path + "cDists.csv"
        self.M_path = self.data_path + "milex.csv"

        # OUTPUT

        self.icews_counts_path = self.data_path_0 + "icews_counts.csv"
        self.rcv_ft_path = self.results_path_base + "rcv_ft.csv"
        self.M2030_path = self.results_path_base + "M2030.csv"
        self.M2030_raw_path = self.results_path_base + "M2030_raw.csv"

        self.quantiles_v_path = self.estimates_path_base + "quantiles_v.csv"
        self.quantiles_gamma_path = self.estimates_path_base + "quantiles_gamma.csv"
        self.quantiles_alpha1_path = self.estimates_path_base + "quantiles_alpha1.csv"
        self.quantiles_alpha2_path = self.estimates_path_base + "quantiles_alpha2.csv"
        self.quantiles_rcv_path = self.estimates_path_base + "quantiles_rcv.csv"
        self.quantiles_peace_probs_path = self.estimates_path_base + "quantiles_peace_probs.csv"
        self.quantiles_tau_path = self.estimates_path_base + "quantiles_tau.csv"
        self.quantiles_Ghat_path = self.estimates_path_base + "quantiles_Ghat.csv"
        self.quantiles_Uhat1_path = self.estimates_path_base + "quantiles_Uhat1.csv"

        # COUNTERFACTUAL PATHS

        self.cfct_demilitarization_path = self.counterfactuals_path + "demilitarization/"
        self.cfct_china_path = self.counterfactuals_path + "china/"
        self.cfct_china_v_path = self.counterfactuals_path + "china_v/"
        self.cfct_us_path = self.counterfactuals_path + "us/"

        # FIGURE PATHS

        self.f_ccodes_path = self.figs_path_0 + "ccodes.png"
        self.f_ccodes_wide_path = self.figs_path_0 + "ccodes_wide.png"
        self.f_cfact_demilitarization_Xprime_path = self.figs_path_0 + "cfact_demilitarization_Xprime.png"
        self.f_cfact_demilitarization_G_path = self.figs_path_0 + "cfact_demilitarization_G.png"
        self.f_cfact_demilitarization_U_path = self.figs_path_0 + "cfact_demilitarization_U.png"
        self.f_cfact_china_Xprime_path = self.figs_path_0 + "cfact_china_Xprime.png"
        self.f_cfact_china_G_path = self.figs_path_0 + "cfact_china_G.png"
        self.f_cfact_china_U_path = self.figs_path_0 + "cfact_china_U.png"
        self.f_cfact_china_tau_path = self.figs_path_0 + "cfact_china_tau.png"
        self.f_estimates_pref_path = self.figs_path_0 + "estimates_pref.png"
        self.f_estimates_pref_mo_path = self.figs_path_0 + "estimates_pref_mo.png"
        self.f_estimates_mil_path = self.figs_path_0 + "estimates_mil.png"
        self.f_fit_path = self.figs_path_0 + "fit.png"
        self.f_fit_eps_path = self.figs_path_0 + "fit_eps.png"
        self.f_milex_path = self.figs_path_0 + "milex.png"
        self.f_pr_peace_path = self.figs_path_0 + "pr_peace.png"
        self.f_rcv_path = self.figs_path_0 + "rcv.png"
        self.f_tau_epbt_path = self.figs_path_0 + "tau_epbt.png"
        self.f_tau_rf_dw_path = self.figs_path_0 + "tau_rf_dw.png"
        self.f_tau_rf_table_path = self.figs_path_0 + "tau_rf_table.png"

        # MAKE DIRECTORIES

        if mkdirs == True:
            helpers.mkdir(self.results_path_base)
            helpers.mkdir(self.estimates_path)
            helpers.mkdir(self.counterfactuals_path)
            helpers.mkdir(self.cfct_demilitarization_path)
            helpers.mkdir(self.cfct_china_path)
            helpers.mkdir(self.cfct_us_path)
            helpers.mkdir(self.cfct_china_v_path)
Пример #31
0
 def get_file_path(self, root):
     path_scores = os.path.join(self.dump_dir, root[root.rfind("/")+1:])
     mkdir(path_scores)
     return path_scores
Пример #32
0
def run(args, server):
    env = create_env(args.env_id,
                     client_id=str(args.task),
                     remotes=args.remotes)
    if args.cfg is not (None or False):
        # load the configuration filew
        cfg_dir = args.cfg + '.yaml'
        if os.path.exists(cfg_dir):
            pass
        else:
            cfg_dir = '../cfg/cfg.yaml'
        with open(cfg_dir, 'r+') as handle:
            cfg = yaml.load(
                handle
            )  # Data is in the form of a demonstration manager defined in another class.
            # if the path already exists load the cfg file in there.
        if cfg['demonstrations'] != 'None':
            trainer = A3C_gail(env, args.task, args.visualise, cfg)
        else:
            trainer = A3C(env, args.task, args.visualise, record=args.record)
    else:
        trainer = A3C(env, args.task, args.visualise, record=args.record)

    # Variable names that start with "local" are not saved in checkpoints.
    if use_tf12_api:
        variables_to_save = [
            v for v in tf.global_variables()
            if not (v.name.startswith("local") or v.name.startswith("rif"))
        ]
        for v in variables_to_save:
            print v.name
        print[v.name for v in tf.global_variables()]
        init_op = tf.variables_initializer(variables_to_save)
        init_all_op = tf.global_variables_initializer()
    else:
        variables_to_save = [
            v for v in tf.all_variables()
            if not (v.name.startswith("local") or v.name.startswith("rif"))
        ]
        init_op = tf.initialize_variables(variables_to_save)
        init_all_op = tf.initialize_all_variables()
    saver = FastSaver(variables_to_save)

    var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 tf.get_variable_scope().name)
    logger.info('Trainable vars:')
    for v in var_list:
        logger.info('  %s %s', v.name, v.get_shape())

    def init_fn(ses):
        logger.info("Initializing all parameters.")
        ses.run(init_all_op)

    config = tf.ConfigProto(device_filters=[
        "/job:ps", "/job:worker/task:{}/cpu:0".format(args.task)
    ])

    logdir = os.path.join(args.log_dir, 'train')

    if use_tf12_api:
        summary_writer = tf.summary.FileWriter(logdir + "_%d" % args.task)
    else:
        summary_writer = tf.train.SummaryWriter(logdir + "_%d" % args.task)

    logger.info("Events directory: %s_%s", logdir, args.task)
    sv = tf.train.Supervisor(
        is_chief=(args.task == 0),
        logdir=logdir,
        saver=saver,
        summary_op=None,
        init_op=init_op,
        init_fn=init_fn,
        summary_writer=summary_writer,
        ready_op=tf.report_uninitialized_variables(variables_to_save),
        global_step=trainer.global_step,
        save_model_secs=30,
        save_summaries_secs=30)

    num_global_steps = 53348207

    logger.info(
        "Starting session. If this hangs, we're mostly likely waiting to connect to the parameter server. "
        +
        "One common cause is that the parameter server DNS name isn't resolving yet, or is misspecified."
    )

    if args.cfg is not (None or False):
        #Make the cfg history directory.
        mkdir(args.log_dir + '/cfg_hist')
        # # save the cfg file used durign this restart of the training. One can change the main cfg file from the training directory. Nohting will be lost.
        hist_dir = args.log_dir + '/cfg_hist/'
        count = len(
            [name for name in os.listdir(hist_dir) if os.path.isfile(name)])
        with open(hist_dir + "cfg_" + str(count) + ".yaml", 'w+') as handle:
            yaml.dump(cfg, handle)

    with sv.managed_session(server.target,
                            config=config) as sess, sess.as_default():
        sess.run(trainer.sync)
        #sess.run(trainer.reward_iface.sync)
        trainer.start(sess, summary_writer)
        global_step = sess.run(trainer.global_step)
        print "GLOBAL STEPS", global_step
        logger.info("Starting training at step=%d", global_step)
        while not sv.should_stop() and (not num_global_steps
                                        or global_step < num_global_steps):
            rollout = trainer.process(sess)
            global_step = sess.run(trainer.global_step)

    # Ask for all the services to stop.
    sv.stop()
    logger.info('reached %s steps. worker stopped.', global_step)
    os.system("tmux kill-session")