state.BATCH_TEST = 100 state.BATCH_CREATION_LIBSVM = 500 state.NB_MAX_TRAINING_EXAMPLES_SVM = 10000 #NB_MAX_TRAINING_EXAMPLES_SVM = 1000 # FIXME: Change back to 10000 <======================================================================== # 1000 is just for fast running during development #NB_MAX_TRAINING_EXAMPLES_SVM = 100 # FIXME: Change back to 10000 <======================================================================== # # 100 is just for superfast running during development state.SVM_INITIALC = 0.001 state.SVM_STEPFACTOR = 10. state.SVM_MAXSTEPS = 10 #hardcoded path to your liblinear source: #state.SVMPATH = '/work/glorotxa/netscale_sentiment_for_ET/lib/liblinear/' state.SVMPATH = '/home/turian/dev/python/DARPA-preprocessor/preprocessor_baseline_UdeM/lib/install/bin/' state.batchsize = 10 # The total number of files into which the training set is broken state.nb_files = 15 #state.path_data = '/scratch/glorotxa/OpenTable/' #state.path_data = '/home/turian/data/DARPAproject/randomprojection.dimensions=1000.seed=0.randomization=gaussian.mode=online.scale=0.172946.squash=erf/' #state.path_data = '/home/turian/data/DARPAproject/randomprojection.dimensions=1000.seed=0.randomization=ternary.ternary_non_zero_percent=0.010000.mode=online.scale=1.748360.squash=erf/' state.path_data = '/home/turian/data/DARPAproject/' # Train and test (validation) here should be disjoint subsets of the # original full training set. state.name_traindata = 'OpenTable_5000_train_instances' state.name_trainlabel = 'OpenTable_5000_train_labels' state.name_testdata = 'OpenTable_5000_test_instances' state.name_testlabel = 'OpenTable_5000_test_labels'
#epochstest = [[0,5,30],[0,5,30],[0,2,4,8,16,30]] state.BATCH_TEST = 100 state.BATCH_CREATION_LIBSVM = 500 state.NB_MAX_TRAINING_EXAMPLES_SVM = 10000 #NB_MAX_TRAINING_EXAMPLES_SVM = 1000 # FIXME: Change back to 10000 <======================================================================== # 1000 is just for fast running during development #NB_MAX_TRAINING_EXAMPLES_SVM = 100 # FIXME: Change back to 10000 <======================================================================== # # 100 is just for superfast running during development state.SVM_INITIALC = 0.001 state.SVM_STEPFACTOR = 10. state.SVM_MAXSTEPS = 10 #hardcoded path to your liblinear source: state.SVMPATH = '/work/glorotxa/netscale_sentiment_for_ET/lib/liblinear/' state.batchsize = 10 # The total number of files into which the training set is broken state.nb_files = 15 state.path_data = '/scratch/glorotxa/OpenTable/' # Train and test (validation) here should be disjoint subsets of the # original full training set. state.name_traindata = 'OpenTable_5000_train_instances' state.name_trainlabel = 'OpenTable_5000_train_labels' state.name_testdata = 'OpenTable_5000_test_instances' state.name_testlabel = 'OpenTable_5000_test_labels' # If there is a model file specified to build upon, the output of this # model is the input for the model we are currently building.