from caffe import layers as L
from caffe import params as P

"""
This function calls CaffeOnSpark to train the model.  It is similar in 
structure to the LeNext example, e.g., see
https://github.com/yahoo/CaffeOnSpark/wiki/GetStarted_python
In fact, the Python interface for CaffeOnSpark currently (July 2016)
allows for very little deviation from this format. 
"""

if __name__ == '__main__':

    sparkConf = SparkConf().setAppName("BeijingTomorrow").setMaster("local")
    sc=SparkContext(conf=sparkConf)
    registerContext(sc)
    sqlContext = SQLContext(sc)
    registerSQLContext(sqlContext)
    cos=CaffeOnSpark(sc,sqlContext)
    cfg=Config(sc)
    this_file = os.path.abspath(inspect.getfile(inspect.currentframe()))
    project_dir = os.path.dirname(os.path.dirname(os.path.dirname(this_file)))
    visualProtoFile= os.path.join(project_dir,"resources/caffe_prototxt/beijing_pollution_solver_visual.prototxt")
    visualModelFile= os.path.join(project_dir,"resources/caffe_models/beijing_pollution_model_visual.model")
    aerosolProtoFile= os.path.join(project_dir,"resources/caffe_prototxt/beijing_pollution_solver_aerosol.prototxt")
    aerosolModelFile= os.path.join(project_dir,"resources/caffe_models/beijing_pollution_model_aerosol.model")

    cfg.protoFile = visualProtoFile
    cfg.modelPath = 'file:' + visualModelFile
    cfg.devices = 1
    cfg.isFeature=True
 def __init__(self,sc):
     registerContext(sc)
     wrapClass("com.yahoo.ml.caffe.tools.Conversions$")
     self.__dict__['conversions']=toPython(getScalaSingleton("com.yahoo.ml.caffe.tools.Conversions"))
     self.__dict__['sqlContext']=SQLContext(sc)
Beispiel #3
0
from caffe import SGDSolver
from caffe import layers as L
from caffe import params as P
"""
This function calls CaffeOnSpark to train the model.  It is similar in 
structure to the LeNext example, e.g., see
https://github.com/yahoo/CaffeOnSpark/wiki/GetStarted_python
In fact, the Python interface for CaffeOnSpark currently (July 2016)
allows for very little deviation from this format. 
"""

if __name__ == '__main__':

    sparkConf = SparkConf().setAppName("BeijingTomorrow").setMaster("local")
    sc = SparkContext(conf=sparkConf)
    registerContext(sc)
    sqlContext = SQLContext(sc)
    registerSQLContext(sqlContext)
    cos = CaffeOnSpark(sc, sqlContext)
    cfg = Config(sc)
    this_file = os.path.abspath(inspect.getfile(inspect.currentframe()))
    project_dir = os.path.dirname(os.path.dirname(os.path.dirname(this_file)))
    visualProtoFile = os.path.join(
        project_dir,
        "resources/caffe_prototxt/beijing_pollution_solver_visual.prototxt")
    visualModelFile = os.path.join(
        project_dir,
        "resources/caffe_models/beijing_pollution_model_visual.model")
    aerosolProtoFile = os.path.join(
        project_dir,
        "resources/caffe_prototxt/beijing_pollution_solver_aerosol.prototxt")
Beispiel #4
0
 def __init__(self,sc):
     registerContext(sc)
     self.vocab=wrapClass("com.yahoo.ml.caffe.tools.Vocab")
     self.sqlContext=SQLContext(sc)
     self.vocabObject=self.vocab(self.sqlContext)
Beispiel #5
0
 def __init__(self, sc):
     registerContext(sc)
     self.vocab = wrapClass("com.yahoo.ml.caffe.tools.Vocab")
     self.sqlContext = SQLContext(sc)
     self.vocabObject = self.vocab(self.sqlContext)
 def __init__(self, sc):
     registerContext(sc)
     wrapClass("com.yahoo.ml.caffe.tools.Conversions$")
     self.__dict__['conversions'] = toPython(
         getScalaSingleton("com.yahoo.ml.caffe.tools.Conversions"))
     self.__dict__['sqlContext'] = SQLContext(sc)