def run(**kargs): args = ParamDict(defaults) args.from_dict(kargs) ns = args.to_namespace() #args = Namespace(args) # print(dargs) gpu = os.environ["CUDA_VISIBLE_DEVICES"] # test exceptions # if random.random() < 0.3: # raise Exception("failled with params: \n{}".format(kargs)) a = tf.random_uniform([100000, 3]) b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') c = tf.matmul(a, b) d = tf.multiply(c, ns.x) #d = tf.matmul(c, ns.x) # cfg = tf.ConfigProto(log_device_placement=True) # sess = tf.Session(config=cfg) sess = tf.Session() res = sess.run(d) sess.close() debug = "INSIDE GPU WORKER ---------------\n" \ "params: {params}\n" \ "using GPU: {env}\n " \ "result: \n {res}" \ "-----------------------------------".format(params=args, env=gpu, res=res) tf.reset_default_graph() return debug
'f_init': (str, "uniform", ["normal", "uniform"]), 'f_init_val': (float, 0.01), 'logit_bias': (bool, False), # regularisation 'clip_grads': (bool, True), # if true clips by local norm, else clip by norm of all gradients 'clip_local': (bool, True), 'clip_value': (float, 1.0), 'dropout': (bool, False), 'embed_dropout': (bool, True), 'keep_prob': (float, 0.95), 'l2_loss': (bool, True), 'l2_loss_coef': (float, 1e-6), } arg_dict = ParamDict(defaults) def run(**kwargs): arg_dict.from_dict(kwargs) args = arg_dict.to_namespace() # ====================================================================================== # Load Params, Prepare results assets # ====================================================================================== # os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) # print(args.corpus) # Experiment parameter summary res_param_filename = os.path.join(args.out_dir, "params_{id}.csv".format(id=args.run_id))