def test_pull_box_sparse_op(self): paddle.enable_static() program = fluid.Program() with fluid.program_guard(program): x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0) y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0) emb_x, emb_y = _pull_box_sparse([x, y], size=1)
def run_boxps_preload(self, is_cpu=True): x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0) y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0) emb_x, emb_y = _pull_box_sparse([x, y], size=2) emb_xp = _pull_box_sparse(x, size=2) layers.Print(emb_xp) concat = layers.concat([emb_x, emb_y], axis=1) fc = layers.fc(input=concat, name="fc", size=1, num_flatten_dims=1, bias_attr=False) loss = layers.reduce_mean(fc) layers.Print(loss) place = fluid.CPUPlace( ) if is_cpu or not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) exe = fluid.Executor(place) optimizer = fluid.optimizer.SGD(learning_rate=0.5) batch_size = 2 def binary_print(slot, fout): fout.write(str(len(slot)) + " ") for e in slot: fout.write(str(e) + " ") batch1 = np.ones( (batch_size, 2, 1)).astype("int64").reshape(batch_size, 2, 1) filelist = [] place_str = "cpu" if is_cpu else "gpu" for i in range(2): filelist.append("test_hdfs_" + place_str + "_" + str(i)) for f in filelist: with open(f, "w") as fout: for ins in batch1: for slot in ins: binary_print(slot, fout) fout.write("\n") def create_dataset(): dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset") dataset.set_use_var([x, y]) dataset.set_batch_size(2) dataset.set_thread(1) dataset.set_filelist(filelist) return dataset datasets = [] datasets.append(create_dataset()) datasets.append(create_dataset()) optimizer.minimize(loss) exe.run(fluid.default_startup_program()) datasets[0].load_into_memory() datasets[0].begin_pass() datasets[1].preload_into_memory() exe.train_from_dataset(program=fluid.default_main_program(), dataset=datasets[0], print_period=1) datasets[0].end_pass() datasets[1].wait_preload_done() datasets[1].begin_pass() exe.train_from_dataset(program=fluid.default_main_program(), dataset=datasets[1], print_period=1) datasets[1].end_pass() for f in filelist: os.remove(f)
def run_dataset(self, is_cpu): x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0) y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0) rank_offset = fluid.layers.data(name="rank_offset", shape=[-1, 7], dtype="int32", lod_level=0, append_batch_size=False) emb_x, emb_y = _pull_box_sparse([x, y], size=2) emb_xp = _pull_box_sparse(x, size=2) concat = layers.concat([emb_x, emb_y], axis=1) fc = layers.fc(input=concat, name="fc", size=1, num_flatten_dims=1, bias_attr=False) loss = layers.reduce_mean(fc) place = fluid.CPUPlace( ) if is_cpu or not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) exe = fluid.Executor(place) with open("test_run_with_dump_a.txt", "w") as f: data = "1 1702f830eee19501ad7429505f714c1d 1 1 1 9\n" data += "1 1702f830eee19502ad7429505f714c1d 1 2 1 8\n" data += "1 1702f830eee19503ad7429505f714c1d 1 3 1 7\n" data += "1 1702f830eee0de01ad7429505f714c2d 1 4 1 6\n" data += "1 1702f830eee0df01ad7429505f714c3d 1 5 1 5\n" data += "1 1702f830eee0df02ad7429505f714c3d 1 6 1 4\n" f.write(data) with open("test_run_with_dump_b.txt", "w") as f: data = "1 1702f830fff22201ad7429505f715c1d 1 1 1 1\n" data += "1 1702f830fff22202ad7429505f715c1d 1 2 1 2\n" data += "1 1702f830fff22203ad7429505f715c1d 1 3 1 3\n" data += "1 1702f830fff22101ad7429505f714ccd 1 4 1 4\n" data += "1 1702f830fff22102ad7429505f714ccd 1 5 1 5\n" data += "1 1702f830fff22103ad7429505f714ccd 1 6 1 6\n" data += "1 1702f830fff22104ad7429505f714ccd 1 6 1 7\n" f.write(data) self.set_data_config() self.dataset.set_use_var([x, y]) self.dataset.set_filelist( ["test_run_with_dump_a.txt", "test_run_with_dump_b.txt"]) optimizer = fluid.optimizer.SGD(learning_rate=0.5) optimizer = fluid.optimizer.PipelineOptimizer(optimizer, cut_list=[], place_list=[place], concurrency_list=[1], queue_size=1, sync_steps=-1) optimizer.minimize(loss) exe.run(fluid.default_startup_program()) self.dataset.set_current_phase(1) self.dataset.load_into_memory() self.dataset.preprocess_instance() self.dataset.begin_pass() pv_num = self.dataset.get_pv_data_size() exe.train_from_dataset(program=fluid.default_main_program(), dataset=self.dataset, print_period=1) self.dataset.set_current_phase(0) self.dataset.postprocess_instance() exe.train_from_dataset(program=fluid.default_main_program(), dataset=self.dataset, print_period=1) self.dataset.end_pass(True) os.remove("test_run_with_dump_a.txt") os.remove("test_run_with_dump_b.txt")
def run_boxps_preload(self, is_cpu=True, random_with_lineid=False): program = fluid.Program() with fluid.program_guard(program): x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0) y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0) emb_x, emb_y = _pull_box_sparse([x, y], size=2) emb_xp = _pull_box_sparse(x, size=2) concat = layers.concat([emb_x, emb_y], axis=1) fc = layers.fc(input=concat, name="fc", size=1, num_flatten_dims=1, bias_attr=False) loss = layers.reduce_mean(fc) layers.Print(loss) place = fluid.CPUPlace( ) if is_cpu or not core.is_compiled_with_cuda( ) else fluid.CUDAPlace(0) exe = fluid.Executor(place) batch_size = 100 def binary_print(slot, fout): fout.write(str(len(slot)) + " ") for e in slot: fout.write(str(e) + " ") batch1 = np.ones( (batch_size, 2, 1)).astype("int64").reshape(batch_size, 2, 1) filelist = [] place_str = "cpu" if is_cpu else "gpu" for i in range(2): filelist.append("test_hdfs_" + place_str + "_" + str(i)) for f in filelist: with open(f, "w") as fout: for ins in batch1: for slot in ins: binary_print(slot, fout) fout.write("\n") def create_dataset(): dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset") dataset.set_date("20190930") dataset.set_use_var([x, y]) dataset.set_batch_size(2) dataset.set_thread(1) dataset.set_filelist(filelist) return dataset datasets = [] datasets.append(create_dataset()) datasets.append(create_dataset()) optimizer = fluid.optimizer.SGD(learning_rate=0.5) optimizer = fluid.optimizer.PipelineOptimizer(optimizer, cut_list=[], place_list=[place], concurrency_list=[1], queue_size=1, sync_steps=-1) optimizer.minimize(loss) program._pipeline_opt["dump_fields"] = [ "fc.tmp_0", "fc.tmp_0@GRAD", "hehe" ] program._pipeline_opt["dump_fields_path"] = "./dump_log/" program._pipeline_opt["dump_param"] = ["fc.w_0"] program._pipeline_opt["enable_random_dump"] = True program._pipeline_opt["dump_interval"] = 10 program._pipeline_opt["random_with_lineid"] = random_with_lineid exe.run(fluid.default_startup_program()) datasets[0].load_into_memory() datasets[0].begin_pass() datasets[1].preload_into_memory() exe.train_from_dataset(program=fluid.default_main_program(), dataset=datasets[0], print_period=1) datasets[0].end_pass(True) datasets[1].wait_preload_done() datasets[1].begin_pass() exe.train_from_dataset(program=fluid.default_main_program(), dataset=datasets[1], print_period=1, debug=True) datasets[1].end_pass(False) for f in filelist: os.remove(f) if os.path.isdir("dump_log"): shutil.rmtree("dump_log")