parser.add_argument( '--concat_comp', default=False, type=bool, help= 'option to re-use vector of elemental composition after global summation of crystal feature.(default: False)' ) args = parser.parse_args(sys.argv[1:]) # GATGNN --- parameters crystal_property = args.property data_src = args.data_src material_name = args.to_predict _, _, RSM = use_property(crystal_property, data_src, True) norm_action, classification = set_model_properties(crystal_property) number_layers = args.num_layers number_neurons = args.num_neurons n_heads = args.num_heads xtra_l = args.use_hidden_layers global_att = args.global_attention attention_technique = args.cluster_option concat_comp = args.concat_comp # SETTING UP CODE TO RUN ON GPU gpu_id = 0 device = torch.device(f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu') # MODEL HYPER-PARAMETERS
'--concat_comp', default=False, type=bool, help= 'option to re-use vector of elemental composition after global summation of crystal feature.(default: False)' ) parser.add_argument('--train_size', default=0.8, type=float, help='ratio size of the training-set (default:0.8)') args = parser.parse_args(sys.argv[1:]) # GATGNN --- parameters crystal_property = args.property data_src = args.data_src source_comparison, training_num, RSM = use_property(crystal_property, data_src) norm_action, classification = set_model_properties(crystal_property) if training_num == None: training_num = args.train_size number_layers = args.num_layers number_neurons = args.num_neurons n_heads = args.num_heads xtra_l = args.use_hidden_layers global_att = args.global_attention attention_technique = args.cluster_option concat_comp = args.concat_comp # SETTING UP CODE TO RUN ON GPU gpu_id = 0 device = torch.device(f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu')