def do_GET(self): if self.path == "/version": self.send_head() self.server.stats("", "Version check") self.wfile.write(VERSION) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/render"): match = render_pattern.match(self.path) if match: job_id = match.groups()[0] frame_number = int(match.groups()[1]) job = self.server.getJobID(job_id) if job: frame = job[frame_number] if frame: if frame.status in { netrender.model.FRAME_QUEUED, netrender.model.FRAME_DISPATCHED }: self.send_head(http.client.ACCEPTED) elif frame.status == netrender.model.FRAME_DONE: self.server.stats("", "Sending result to client") filename = job.getResultPath( frame.getRenderFilename()) f = open(filename, 'rb') self.send_head(content="image/x-exr") shutil.copyfileobj(f, self.wfile) f.close() elif frame.status == netrender.model.FRAME_ERROR: self.send_head(http.client.PARTIAL_CONTENT) else: # no such frame self.send_head(http.client.NO_CONTENT) else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid url self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/result"): match = result_pattern.match(self.path) if match: job_id = match.groups()[0] job = self.server.getJobID(job_id) if job: self.server.stats("", "Sending result to client") zip_filepath = job.getResultPath("results.zip") with zipfile.ZipFile(zip_filepath, "w") as zfile: for frame in job.frames: if frame.status == netrender.model.FRAME_DONE: for filename in frame.results: filepath = job.getResultPath(filename) zfile.write(filepath, filename) f = open(zip_filepath, 'rb') self.send_head(content="application/x-zip-compressed") shutil.copyfileobj(f, self.wfile) f.close() else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid url self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/thumb"): match = thumb_pattern.match(self.path) if match: job_id = match.groups()[0] frame_number = int(match.groups()[1]) job = self.server.getJobID(job_id) if job: frame = job[frame_number] if frame: if frame.status in { netrender.model.FRAME_QUEUED, netrender.model.FRAME_DISPATCHED }: self.send_head(http.client.ACCEPTED) elif frame.status == netrender.model.FRAME_DONE: filename = job.getResultPath( frame.getRenderFilename()) thumbname = thumbnail.generate(filename) if thumbname: f = open(thumbname, 'rb') self.send_head(content="image/jpeg") shutil.copyfileobj(f, self.wfile) f.close() else: # thumbnail couldn't be generated self.send_head(http.client.PARTIAL_CONTENT) return elif frame.status == netrender.model.FRAME_ERROR: self.send_head(http.client.PARTIAL_CONTENT) else: # no such frame self.send_head(http.client.NO_CONTENT) else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid url self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/log"): match = log_pattern.match(self.path) if match: job_id = match.groups()[0] frame_number = int(match.groups()[1]) job = self.server.getJobID(job_id) if job: frame = job[frame_number] if frame: if not frame.log_path or frame.status in { netrender.model.FRAME_QUEUED, netrender.model.FRAME_DISPATCHED }: self.send_head(http.client.PROCESSING) else: self.server.stats("", "Sending log to client") f = open(frame.log_path, 'rb') self.send_head(content="text/plain") shutil.copyfileobj(f, self.wfile) f.close() else: # no such frame self.send_head(http.client.NO_CONTENT) else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid URL self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path == "/status": job_id = self.headers.get('job-id', "") job_frame = int(self.headers.get('job-frame', -1)) if job_id: job = self.server.getJobID(job_id) if job: if job_frame != -1: frame = job[frame] if frame: message = frame.serialize() else: # no such frame self.send_head(http.client.NO_CONTENT) return else: message = job.serialize() else: # no such job id self.send_head(http.client.NO_CONTENT) return else: # status of all jobs message = [] for job in self.server: message.append(job.serialize()) self.server.stats("", "Sending status") self.send_head() self.wfile.write(bytes(json.dumps(message), encoding='utf8')) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path == "/job": self.server.balance() slave_id = self.headers['slave-id'] slave = self.server.getSeenSlave(slave_id) if slave: # only if slave id is valid job, frames = self.server.newDispatch(slave) if job and frames: for f in frames: print("dispatch", f.number) f.status = netrender.model.FRAME_DISPATCHED f.slave = slave slave.job = job slave.job_frames = [f.number for f in frames] self.send_head(headers={"job-id": job.id}) message = job.serialize(frames) self.wfile.write( bytes(json.dumps(message), encoding='utf8')) self.server.stats("", "Sending job to slave") else: # no job available, return error code slave.job = None slave.job_frames = [] self.send_head(http.client.ACCEPTED) else: # invalid slave id self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/file"): match = file_pattern.match(self.path) if match: slave_id = self.headers['slave-id'] slave = self.server.getSeenSlave(slave_id) if not slave: # invalid slave id print("invalid slave id") job_id = match.groups()[0] file_index = int(match.groups()[1]) job = self.server.getJobID(job_id) if job: render_file = job.files[file_index] if render_file: self.server.stats("", "Sending file to slave") f = open(render_file.filepath, 'rb') self.send_head() shutil.copyfileobj(f, self.wfile) f.close() else: # no such file self.send_head(http.client.NO_CONTENT) else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid url self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path == "/slaves": message = [] self.server.stats("", "Sending slaves status") for slave in self.server.slaves: message.append(slave.serialize()) self.send_head() self.wfile.write(bytes(json.dumps(message), encoding='utf8')) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- else: # hand over the rest to the html section netrender.master_html.get(self)
def render_slave(engine, netsettings, threads): bisleep = BreakableIncrementedSleep(INCREMENT_TIMEOUT, 1, MAX_TIMEOUT, engine.test_break) engine.update_stats("", "Network render node initiation") slave_path = bpy.path.abspath(netsettings.path) if not os.path.exists(slave_path): print("Slave working path ( %s ) doesn't exist" % netsettings.path) return if not os.access(slave_path, os.W_OK): print("Slave working path ( %s ) is not writable" % netsettings.path) return conn = clientConnection(netsettings) if not conn: print("Connection failed, will try connecting again at most %i times" % MAX_CONNECT_TRY) bisleep.reset() for i in range(MAX_CONNECT_TRY): bisleep.sleep() conn = clientConnection(netsettings) if conn or engine.test_break(): break print("Retry %i failed, waiting %is before retrying" % (i + 1, bisleep.current)) if conn: with ConnectionContext(): conn.request("POST", "/slave", json.dumps(slave_Info(netsettings).serialize())) response = conn.getresponse() response.read() slave_id = response.getheader("slave-id") NODE_PREFIX = os.path.join(slave_path, "slave_" + slave_id) verifyCreateDir(NODE_PREFIX) engine.update_stats( "", "Network render connected to master, waiting for jobs") while not engine.test_break(): with ConnectionContext(): conn.request("GET", "/job", headers={"slave-id": slave_id}) response = conn.getresponse() if response.status == http.client.OK: bisleep.reset() job = netrender.model.RenderJob.materialize( json.loads(str(response.read(), encoding='utf8'))) engine.update_stats( "", "Network render processing job from master") job_prefix = os.path.join(NODE_PREFIX, "job_" + job.id) verifyCreateDir(job_prefix) # set tempdir for fsaa temp files # have to set environ var because render is done in a subprocess and that's the easiest way to propagate the setting os.environ["TMP"] = job_prefix if job.type == netrender.model.JOB_BLENDER: job_path = job.files[ 0].original_path # original path of the first file main_path, main_file = os.path.split(job_path) job_full_path = testFile(conn, job.id, slave_id, job.files[0], job_prefix) print("Fullpath", job_full_path) print("File:", main_file, "and %i other files" % (len(job.files) - 1, )) for rfile in job.files[1:]: testFile(conn, job.id, slave_id, rfile, job_prefix, main_path) print("\t", rfile.filepath) netrender.repath.update(job) engine.update_stats( "", "Render File " + main_file + " for job " + job.id) elif job.type == netrender.model.JOB_VCS: if not job.version_info: # Need to return an error to server, incorrect job type pass job_path = job.files[0].filepath # path of main file main_path, main_file = os.path.split(job_path) job.version_info.update() # For VCS jobs, file path is relative to the working copy path job_full_path = os.path.join(job.version_info.wpath, job_path) engine.update_stats( "", "Render File " + main_file + " for job " + job.id) # announce log to master logfile = netrender.model.LogFile( job.id, slave_id, [frame.number for frame in job.frames]) with ConnectionContext(): conn.request( "POST", "/log", bytes(json.dumps(logfile.serialize()), encoding='utf8')) response = conn.getresponse() response.read() first_frame = job.frames[0].number # start render start_t = time.time() if job.rendersWithBlender(): frame_args = [] for frame in job.frames: print("frame", frame.number) frame_args += ["-f", str(frame.number)] with NoErrorDialogContext(): process = subprocess.Popen([ BLENDER_PATH, "-b", "-noaudio", job_full_path, "-t", str(threads), "-o", os.path.join(job_prefix, "######"), "-E", job.render, "-F", "MULTILAYER" ] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) elif job.subtype == netrender.model.JOB_SUB_BAKING: tasks = [] for frame in job.frames: tasks.append( netrender.baking.commandToTask(frame.command)) with NoErrorDialogContext(): process = netrender.baking.bake(job, tasks) elif job.type == netrender.model.JOB_PROCESS: command = job.frames[0].command with NoErrorDialogContext(): process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) headers = {"slave-id": slave_id} results = [] line = "" class ProcessData: def __init__(self): self.lock = threading.Lock() self.stdout = bytes() self.cancelled = False self.start_time = time.time() self.last_time = time.time() data = ProcessData() def run_process(process, data): while not data.cancelled and process.poll() is None: buf = process.stdout.read(1024) data.lock.acquire() data.stdout += buf data.lock.release() process_thread = threading.Thread(target=run_process, args=(process, data)) process_thread.start() while not data.cancelled and process_thread.is_alive(): time.sleep(CANCEL_POLL_SPEED / 2) current_time = time.time() data.cancelled = engine.test_break() if current_time - data.last_time > CANCEL_POLL_SPEED: data.lock.acquire() # update logs if needed if data.stdout: # (only need to update on one frame, they are linked with ConnectionContext(): conn.request("PUT", logURL(job.id, first_frame), data.stdout, headers=headers) responseStatus(conn) stdout_text = str(data.stdout, encoding='utf8') # Also output on console if netsettings.use_slave_output_log: print(stdout_text, end="") lines = stdout_text.split("\n") lines[0] = line + lines[0] line = lines.pop() if job.subtype == netrender.model.JOB_SUB_BAKING: results.extend( netrender.baking.resultsFromOuput(lines)) data.stdout = bytes() data.lock.release() data.last_time = current_time if testCancel(conn, job.id, first_frame): engine.update_stats("", "Job canceled by Master") data.cancelled = True process_thread.join() del process_thread if job.type == netrender.model.JOB_BLENDER: netrender.repath.reset(job) # read leftovers if needed data.stdout += process.stdout.read() if data.cancelled: # kill process if needed if process.poll() is None: try: process.terminate() except OSError: pass continue # to next frame # flush the rest of the logs if data.stdout: stdout_text = str(data.stdout, encoding='utf8') # Also output on console if netsettings.use_slave_output_log: print(stdout_text, end="") lines = stdout_text.split("\n") lines[0] = line + lines[0] if job.subtype == netrender.model.JOB_SUB_BAKING: results.extend( netrender.baking.resultsFromOuput(lines)) # (only need to update on one frame, they are linked with ConnectionContext(): conn.request("PUT", logURL(job.id, first_frame), data.stdout, headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue total_t = time.time() - data.start_time avg_t = total_t / len(job.frames) status = process.returncode print("status", status) headers = { "job-id": job.id, "slave-id": slave_id, "job-time": str(avg_t) } if status == 0: # non zero status is error headers["job-result"] = str(netrender.model.FRAME_DONE) for frame in job.frames: headers["job-frame"] = str(frame.number) if job.hasRenderResult(): # send image back to server filename = os.path.join(job_prefix, "%06d.exr" % frame.number) # thumbnail first if netsettings.use_slave_thumb: thumbname = thumbnail.generate(filename) if thumbname: f = open(thumbname, 'rb') with ConnectionContext(): conn.request("PUT", "/thumb", f, headers=headers) f.close() responseStatus(conn) f = open(filename, 'rb') with ConnectionContext(): conn.request("PUT", "/render", f, headers=headers) f.close() if responseStatus(conn) == http.client.NO_CONTENT: continue elif job.subtype == netrender.model.JOB_SUB_BAKING: index = job.frames.index(frame) frame_results = [ result_filepath for task_index, result_filepath in results if task_index == index ] for result_filepath in frame_results: result_path, result_filename = os.path.split( result_filepath) headers["result-filename"] = result_filename headers["job-finished"] = str( result_filepath == frame_results[-1]) f = open(result_filepath, 'rb') with ConnectionContext(): conn.request("PUT", "/result", f, headers=headers) f.close() if responseStatus( conn) == http.client.NO_CONTENT: continue elif job.type == netrender.model.JOB_PROCESS: with ConnectionContext(): conn.request("PUT", "/render", headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue else: headers["job-result"] = str(netrender.model.FRAME_ERROR) for frame in job.frames: headers["job-frame"] = str(frame.number) # send error result back to server with ConnectionContext(): conn.request("PUT", "/render", headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue engine.update_stats( "", "Network render connected to master, waiting for jobs") else: bisleep.sleep() conn.close() if netsettings.use_slave_clear: clearSlave(NODE_PREFIX)
def do_GET(self): if self.path == "/version": self.send_head() self.server.stats("", "Version check") self.wfile.write(VERSION) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/render"): match = render_pattern.match(self.path) if match: job_id = match.groups()[0] frame_number = int(match.groups()[1]) job = self.server.getJobID(job_id) if job: frame = job[frame_number] if frame: if frame.status in (QUEUED, DISPATCHED): self.send_head(http.client.ACCEPTED) elif frame.status == DONE: self.server.stats("", "Sending result to client") filename = os.path.join(job.save_path, "%06d.exr" % frame_number) f = open(filename, 'rb') self.send_head(content = "image/x-exr") shutil.copyfileobj(f, self.wfile) f.close() elif frame.status == ERROR: self.send_head(http.client.PARTIAL_CONTENT) else: # no such frame self.send_head(http.client.NO_CONTENT) else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid url self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/thumb"): match = thumb_pattern.match(self.path) if match: job_id = match.groups()[0] frame_number = int(match.groups()[1]) job = self.server.getJobID(job_id) if job: frame = job[frame_number] if frame: if frame.status in (QUEUED, DISPATCHED): self.send_head(http.client.ACCEPTED) elif frame.status == DONE: filename = os.path.join(job.save_path, "%06d.exr" % frame_number) thumbname = thumbnail.generate(filename) if thumbname: f = open(thumbname, 'rb') self.send_head(content = "image/jpeg") shutil.copyfileobj(f, self.wfile) f.close() else: # thumbnail couldn't be generated self.send_head(http.client.PARTIAL_CONTENT) return elif frame.status == ERROR: self.send_head(http.client.PARTIAL_CONTENT) else: # no such frame self.send_head(http.client.NO_CONTENT) else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid url self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/log"): match = log_pattern.match(self.path) if match: job_id = match.groups()[0] frame_number = int(match.groups()[1]) job = self.server.getJobID(job_id) if job: frame = job[frame_number] if frame: if not frame.log_path or frame.status in (QUEUED, DISPATCHED): self.send_head(http.client.PROCESSING) else: self.server.stats("", "Sending log to client") f = open(frame.log_path, 'rb') self.send_head(content = "text/plain") shutil.copyfileobj(f, self.wfile) f.close() else: # no such frame self.send_head(http.client.NO_CONTENT) else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid URL self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path == "/status": job_id = self.headers.get('job-id', "") job_frame = int(self.headers.get('job-frame', -1)) if job_id: job = self.server.getJobID(job_id) if job: if job_frame != -1: frame = job[frame] if frame: message = frame.serialize() else: # no such frame self.send_heat(http.client.NO_CONTENT) return else: message = job.serialize() else: # no such job id self.send_head(http.client.NO_CONTENT) return else: # status of all jobs message = [] for job in self.server: message.append(job.serialize()) self.server.stats("", "Sending status") self.send_head() self.wfile.write(bytes(json.dumps(message), encoding='utf8')) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path == "/job": self.server.balance() slave_id = self.headers['slave-id'] slave = self.server.getSeenSlave(slave_id) if slave: # only if slave id is valid job, frames = self.server.newDispatch(slave_id) if job and frames: for f in frames: print("dispatch", f.number) f.status = DISPATCHED f.slave = slave slave.job = job slave.job_frames = [f.number for f in frames] self.send_head(headers={"job-id": job.id}) message = job.serialize(frames) self.wfile.write(bytes(json.dumps(message), encoding='utf8')) self.server.stats("", "Sending job to slave") else: # no job available, return error code slave.job = None slave.job_frames = [] self.send_head(http.client.ACCEPTED) else: # invalid slave id self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path.startswith("/file"): match = file_pattern.match(self.path) if match: slave_id = self.headers['slave-id'] slave = self.server.getSeenSlave(slave_id) if not slave: # invalid slave id print("invalid slave id") job_id = match.groups()[0] file_index = int(match.groups()[1]) job = self.server.getJobID(job_id) if job: render_file = job.files[file_index] if render_file: self.server.stats("", "Sending file to slave") f = open(render_file.filepath, 'rb') self.send_head() shutil.copyfileobj(f, self.wfile) f.close() else: # no such file self.send_head(http.client.NO_CONTENT) else: # no such job id self.send_head(http.client.NO_CONTENT) else: # invalid url self.send_head(http.client.NO_CONTENT) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- elif self.path == "/slaves": message = [] self.server.stats("", "Sending slaves status") for slave in self.server.slaves: message.append(slave.serialize()) self.send_head() self.wfile.write(bytes(json.dumps(message), encoding='utf8')) # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- else: # hand over the rest to the html section netrender.master_html.get(self)
def render_slave(engine, netsettings, threads): bisleep = BreakableIncrementedSleep(INCREMENT_TIMEOUT, 1, MAX_TIMEOUT, engine.test_break) engine.update_stats("", "Network render node initiation") slave_path = bpy.path.abspath(netsettings.path) if not os.path.exists(slave_path): print("Slave working path ( %s ) doesn't exist" % netsettings.path) return if not os.access(slave_path, os.W_OK): print("Slave working path ( %s ) is not writable" % netsettings.path) return conn = clientConnection(netsettings) if not conn: print("Connection failed, will try connecting again at most %i times" % MAX_CONNECT_TRY) bisleep.reset() for i in range(MAX_CONNECT_TRY): bisleep.sleep() conn = clientConnection(netsettings) if conn or engine.test_break(): break print("Retry %i failed, waiting %is before retrying" % (i + 1, bisleep.current)) if conn: with ConnectionContext(): conn.request("POST", "/slave", json.dumps(slave_Info(netsettings).serialize())) response = conn.getresponse() response.read() slave_id = response.getheader("slave-id") NODE_PREFIX = os.path.join(slave_path, "slave_" + slave_id) verifyCreateDir(NODE_PREFIX) engine.update_stats("", "Network render connected to master, waiting for jobs") while not engine.test_break(): with ConnectionContext(): conn.request("GET", "/job", headers={"slave-id":slave_id}) response = conn.getresponse() if response.status == http.client.OK: bisleep.reset() job = netrender.model.RenderJob.materialize(json.loads(str(response.read(), encoding='utf8'))) engine.update_stats("", "Network render processing job from master") job_prefix = os.path.join(NODE_PREFIX, "job_" + job.id) verifyCreateDir(job_prefix) # set tempdir for fsaa temp files # have to set environ var because render is done in a subprocess and that's the easiest way to propagate the setting os.environ["TMP"] = job_prefix if job.type == netrender.model.JOB_BLENDER: job_path = job.files[0].original_path # original path of the first file main_path, main_file = os.path.split(job_path) job_full_path = testFile(conn, job.id, slave_id, job.files[0], job_prefix) print("Fullpath", job_full_path) print("File:", main_file, "and %i other files" % (len(job.files) - 1,)) for rfile in job.files[1:]: testFile(conn, job.id, slave_id, rfile, job_prefix, main_path) print("\t", rfile.filepath) netrender.repath.update(job) engine.update_stats("", "Render File " + main_file + " for job " + job.id) elif job.type == netrender.model.JOB_VCS: if not job.version_info: # Need to return an error to server, incorrect job type pass job_path = job.files[0].filepath # path of main file main_path, main_file = os.path.split(job_path) job.version_info.update() # For VCS jobs, file path is relative to the working copy path job_full_path = os.path.join(job.version_info.wpath, job_path) engine.update_stats("", "Render File " + main_file + " for job " + job.id) # announce log to master logfile = netrender.model.LogFile(job.id, slave_id, [frame.number for frame in job.frames]) with ConnectionContext(): conn.request("POST", "/log", bytes(json.dumps(logfile.serialize()), encoding='utf8')) response = conn.getresponse() response.read() first_frame = job.frames[0].number # start render start_t = time.time() if job.rendersWithBlender(): frame_args = [] for frame in job.frames: print("frame", frame.number) frame_args.append(str(frame.number)) # arguments to the script are passed in specific order # [0] threads (imitates -t behaviour) # [1] directory to save frame(s) in (imitates -o behaviour) # [2] renderer (imitates -E behaviour) # [3] type (imitates -F behaviour) # [4] Override render device CPU/GPU # [5] Render device CPU/GPU # [6] Override tiles # [7] Tile order # [8] Tile size X # [9] Tile size Y # [10] frame numbers to render (imitates -f behaviour) with NoErrorDialogContext(): process = subprocess.Popen( [bpy.app.binary_path, "-b", "-y", "-noaudio", job_full_path, "-P", os.path.join(os.path.dirname(os.path.realpath(__file__)), "slave_render.py"), "--", str(threads), os.path.join(job_prefix, ""), job.render, "OPEN_EXR_MULTILAYER", str(int(netsettings.slave_override_cycles_compute_device_enabled)), str(bpy.context.scene.cycles.device), str(int(netsettings.slave_override_cycles_tiles_enabled)), str(bpy.context.scene.cycles.tile_order), str(bpy.context.scene.render.tile_x), str(bpy.context.scene.render.tile_y), ] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) elif job.subtype == netrender.model.JOB_SUB_BAKING: tasks = [] for frame in job.frames: tasks.append(netrender.baking.commandToTask(frame.command)) with NoErrorDialogContext(): process = netrender.baking.bake(job, tasks) elif job.type == netrender.model.JOB_PROCESS: command = job.frames[0].command with NoErrorDialogContext(): process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) headers = {"slave-id":slave_id} results = [] line = "" class ProcessData: def __init__(self): self.lock = threading.Lock() self.stdout = bytes() self.cancelled = False self.start_time = time.time() self.last_time = time.time() data = ProcessData() def run_process(process, data): while not data.cancelled and process.poll() is None: buf = process.stdout.read(1024) data.lock.acquire() data.stdout += buf data.lock.release() process_thread = threading.Thread(target=run_process, args=(process, data)) process_thread.start() while not data.cancelled and process_thread.is_alive(): time.sleep(CANCEL_POLL_SPEED / 2) current_time = time.time() data.cancelled = engine.test_break() if current_time - data.last_time > CANCEL_POLL_SPEED: data.lock.acquire() # update logs if needed if data.stdout: # (only need to update on one frame, they are linked with ConnectionContext(): conn.request("PUT", logURL(job.id, first_frame), data.stdout, headers=headers) responseStatus(conn) stdout_text = str(data.stdout, encoding='utf8') # Also output on console if netsettings.use_slave_output_log: print(stdout_text, end="") lines = stdout_text.split("\n") lines[0] = line + lines[0] line = lines.pop() if job.subtype == netrender.model.JOB_SUB_BAKING: results.extend(netrender.baking.resultsFromOuput(lines)) data.stdout = bytes() data.lock.release() data.last_time = current_time if testCancel(conn, job.id, first_frame): engine.update_stats("", "Job canceled by Master") data.cancelled = True process_thread.join() del process_thread if job.type == netrender.model.JOB_BLENDER: netrender.repath.reset(job) # read leftovers if needed data.stdout += process.stdout.read() if data.cancelled: # kill process if needed if process.poll() is None: try: process.terminate() except OSError: pass continue # to next frame # flush the rest of the logs if data.stdout: stdout_text = str(data.stdout, encoding='utf8') # Also output on console if netsettings.use_slave_output_log: print(stdout_text, end="") lines = stdout_text.split("\n") lines[0] = line + lines[0] if job.subtype == netrender.model.JOB_SUB_BAKING: results.extend(netrender.baking.resultsFromOuput(lines)) # (only need to update on one frame, they are linked with ConnectionContext(): conn.request("PUT", logURL(job.id, first_frame), data.stdout, headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue total_t = time.time() - data.start_time avg_t = total_t / len(job.frames) status = process.returncode print("status", status) headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)} if status == 0: # non zero status is error headers["job-result"] = str(netrender.model.FRAME_DONE) for frame in job.frames: headers["job-frame"] = str(frame.number) if job.hasRenderResult(): # send image back to server filename = os.path.join(job_prefix, "%06d.exr" % frame.number) # thumbnail first if netsettings.use_slave_thumb: thumbname = thumbnail.generate(filename) if thumbname: f = open(thumbname, 'rb') with ConnectionContext(): conn.request("PUT", "/thumb", f, headers=headers) f.close() responseStatus(conn) f = open(filename, 'rb') with ConnectionContext(): conn.request("PUT", "/render", f, headers=headers) f.close() if responseStatus(conn) == http.client.NO_CONTENT: continue elif job.subtype == netrender.model.JOB_SUB_BAKING: index = job.frames.index(frame) frame_results = [result_filepath for task_index, result_filepath in results if task_index == index] for result_filepath in frame_results: result_path, result_filename = os.path.split(result_filepath) headers["result-filename"] = result_filename headers["job-finished"] = str(result_filepath == frame_results[-1]) f = open(result_filepath, 'rb') with ConnectionContext(): conn.request("PUT", "/result", f, headers=headers) f.close() if responseStatus(conn) == http.client.NO_CONTENT: continue elif job.type == netrender.model.JOB_PROCESS: with ConnectionContext(): conn.request("PUT", "/render", headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue else: headers["job-result"] = str(netrender.model.FRAME_ERROR) for frame in job.frames: headers["job-frame"] = str(frame.number) # send error result back to server with ConnectionContext(): conn.request("PUT", "/render", headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue engine.update_stats("", "Network render connected to master, waiting for jobs") else: bisleep.sleep() conn.close() if netsettings.use_slave_clear: clearSlave(NODE_PREFIX)
def render_slave(engine, netsettings, threads): # timeout = 1 # UNUSED bisleep = BreakableIncrementedSleep(INCREMENT_TIMEOUT, 1, MAX_TIMEOUT, engine.test_break) engine.update_stats("", "Network render node initiation") slave_path = bpy.path.abspath(netsettings.path) if not os.path.exists(slave_path): print("Slave working path ( %s ) doesn't exist" % netsettings.path) return if not os.access(slave_path, os.W_OK): print("Slave working path ( %s ) is not writable" % netsettings.path) return conn = clientConnection(netsettings.server_address, netsettings.server_port) if not conn: # timeout = 1 # UNUSED print("Connection failed, will try connecting again at most %i times" % MAX_CONNECT_TRY) bisleep.reset() for i in range(MAX_CONNECT_TRY): bisleep.sleep() conn = clientConnection(netsettings.server_address, netsettings.server_port) if conn or engine.test_break(): break print("Retry %i failed, waiting %is before retrying" % (i + 1, bisleep.current)) if conn: conn.request("POST", "/slave", json.dumps(slave_Info().serialize())) response = conn.getresponse() response.read() slave_id = response.getheader("slave-id") NODE_PREFIX = os.path.join(slave_path, "slave_" + slave_id) if not os.path.exists(NODE_PREFIX): os.mkdir(NODE_PREFIX) engine.update_stats("", "Network render connected to master, waiting for jobs") while not engine.test_break(): conn.request("GET", "/job", headers={"slave-id":slave_id}) response = conn.getresponse() if response.status == http.client.OK: bisleep.reset() job = netrender.model.RenderJob.materialize(json.loads(str(response.read(), encoding='utf8'))) engine.update_stats("", "Network render processing job from master") JOB_PREFIX = os.path.join(NODE_PREFIX, "job_" + job.id) if not os.path.exists(JOB_PREFIX): os.mkdir(JOB_PREFIX) # set tempdir for fsaa temp files # have to set environ var because render is done in a subprocess and that's the easiest way to propagate the setting os.environ["TMP"] = JOB_PREFIX if job.type == netrender.model.JOB_BLENDER: job_path = job.files[0].filepath # path of main file main_path, main_file = os.path.split(job_path) job_full_path = testFile(conn, job.id, slave_id, job.files[0], JOB_PREFIX) print("Fullpath", job_full_path) print("File:", main_file, "and %i other files" % (len(job.files) - 1,)) for rfile in job.files[1:]: testFile(conn, job.id, slave_id, rfile, JOB_PREFIX, main_path) print("\t", rfile.filepath) netrender.repath.update(job) engine.update_stats("", "Render File "+ main_file+ " for job "+ job.id) elif job.type == netrender.model.JOB_VCS: if not job.version_info: # Need to return an error to server, incorrect job type pass job_path = job.files[0].filepath # path of main file main_path, main_file = os.path.split(job_path) job.version_info.update() # For VCS jobs, file path is relative to the working copy path job_full_path = os.path.join(job.version_info.wpath, job_path) engine.update_stats("", "Render File "+ main_file+ " for job "+ job.id) # announce log to master logfile = netrender.model.LogFile(job.id, slave_id, [frame.number for frame in job.frames]) conn.request("POST", "/log", bytes(json.dumps(logfile.serialize()), encoding='utf8')) response = conn.getresponse() response.read() first_frame = job.frames[0].number # start render start_t = time.time() if job.rendersWithBlender(): frame_args = [] for frame in job.frames: print("frame", frame.number) frame_args += ["-f", str(frame.number)] val = SetErrorMode() process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-t", str(threads), "-o", os.path.join(JOB_PREFIX, "######"), "-E", "BLENDER_RENDER", "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) RestoreErrorMode(val) elif job.type == netrender.model.JOB_PROCESS: command = job.frames[0].command val = SetErrorMode() process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) RestoreErrorMode(val) headers = {"slave-id":slave_id} cancelled = False stdout = bytes() run_t = time.time() while not cancelled and process.poll() is None: stdout += process.stdout.read(1024) current_t = time.time() cancelled = engine.test_break() if current_t - run_t > CANCEL_POLL_SPEED: # update logs if needed if stdout: # (only need to update on one frame, they are linked conn.request("PUT", logURL(job.id, first_frame), stdout, headers=headers) responseStatus(conn) # Also output on console if netsettings.use_slave_output_log: print(str(stdout, encoding='utf8'), end="") stdout = bytes() run_t = current_t if testCancel(conn, job.id, first_frame): cancelled = True if job.type == netrender.model.JOB_BLENDER: netrender.repath.reset(job) # read leftovers if needed stdout += process.stdout.read() if cancelled: # kill process if needed if process.poll() is None: try: process.terminate() except OSError: pass continue # to next frame # flush the rest of the logs if stdout: # Also output on console if netsettings.use_slave_thumb: print(str(stdout, encoding='utf8'), end="") # (only need to update on one frame, they are linked conn.request("PUT", logURL(job.id, first_frame), stdout, headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue total_t = time.time() - start_t avg_t = total_t / len(job.frames) status = process.returncode print("status", status) headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)} if status == 0: # non zero status is error headers["job-result"] = str(DONE) for frame in job.frames: headers["job-frame"] = str(frame.number) if job.hasRenderResult(): # send image back to server filename = os.path.join(JOB_PREFIX, "%06d.exr" % frame.number) # thumbnail first if netsettings.use_slave_thumb: thumbname = thumbnail.generate(filename) if thumbname: f = open(thumbname, 'rb') conn.request("PUT", "/thumb", f, headers=headers) f.close() responseStatus(conn) f = open(filename, 'rb') conn.request("PUT", "/render", f, headers=headers) f.close() if responseStatus(conn) == http.client.NO_CONTENT: continue elif job.type == netrender.model.JOB_PROCESS: conn.request("PUT", "/render", headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue else: headers["job-result"] = str(ERROR) for frame in job.frames: headers["job-frame"] = str(frame.number) # send error result back to server conn.request("PUT", "/render", headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue engine.update_stats("", "Network render connected to master, waiting for jobs") else: bisleep.sleep() conn.close() if netsettings.use_slave_clear: clearSlave(NODE_PREFIX)
def render_slave(engine, netsettings, threads): timeout = 1 bisleep = BreakableIncrementedSleep(INCREMENT_TIMEOUT, 1, MAX_TIMEOUT, engine.test_break) engine.update_stats("", "Network render node initiation") conn = clientConnection(netsettings.server_address, netsettings.server_port) if not conn: timeout = 1 print("Connection failed, will try connecting again at most %i times" % MAX_CONNECT_TRY) bisleep.reset() for i in range(MAX_CONNECT_TRY): bisleep.sleep() conn = clientConnection(netsettings.server_address, netsettings.server_port) if conn or engine.test_break(): break print("Retry %i failed, waiting %is before retrying" % (i + 1, bisleep.current)) if conn: conn.request("POST", "/slave", json.dumps(slave_Info().serialize())) response = conn.getresponse() response.read() slave_id = response.getheader("slave-id") NODE_PREFIX = os.path.join(bpy.path.abspath(netsettings.path), "slave_" + slave_id) if not os.path.exists(NODE_PREFIX): os.mkdir(NODE_PREFIX) engine.update_stats( "", "Network render connected to master, waiting for jobs") while not engine.test_break(): conn.request("GET", "/job", headers={"slave-id": slave_id}) response = conn.getresponse() if response.status == http.client.OK: bisleep.reset() job = netrender.model.RenderJob.materialize( json.loads(str(response.read(), encoding='utf8'))) engine.update_stats( "", "Network render processing job from master") JOB_PREFIX = os.path.join(NODE_PREFIX, "job_" + job.id) if not os.path.exists(JOB_PREFIX): os.mkdir(JOB_PREFIX) # set tempdir for fsaa temp files # have to set environ var because render is done in a subprocess and that's the easiest way to propagate the setting os.environ["TMP"] = JOB_PREFIX if job.type == netrender.model.JOB_BLENDER: job_path = job.files[0].filepath # path of main file main_path, main_file = os.path.split(job_path) job_full_path = testFile(conn, job.id, slave_id, job.files[0], JOB_PREFIX) print("Fullpath", job_full_path) print("File:", main_file, "and %i other files" % (len(job.files) - 1, )) for rfile in job.files[1:]: testFile(conn, job.id, slave_id, rfile, JOB_PREFIX, main_path) print("\t", rfile.filepath) netrender.repath.update(job) engine.update_stats( "", "Render File " + main_file + " for job " + job.id) elif job.type == netrender.model.JOB_VCS: if not job.version_info: # Need to return an error to server, incorrect job type pass job_path = job.files[0].filepath # path of main file main_path, main_file = os.path.split(job_path) job.version_info.update() # For VCS jobs, file path is relative to the working copy path job_full_path = os.path.join(job.version_info.wpath, job_path) engine.update_stats( "", "Render File " + main_file + " for job " + job.id) # announce log to master logfile = netrender.model.LogFile( job.id, slave_id, [frame.number for frame in job.frames]) conn.request( "POST", "/log", bytes(json.dumps(logfile.serialize()), encoding='utf8')) response = conn.getresponse() response.read() first_frame = job.frames[0].number # start render start_t = time.time() if job.rendersWithBlender(): frame_args = [] for frame in job.frames: print("frame", frame.number) frame_args += ["-f", str(frame.number)] val = SetErrorMode() process = subprocess.Popen([ BLENDER_PATH, "-b", "-noaudio", job_full_path, "-t", str(threads), "-o", os.path.join(JOB_PREFIX, "######"), "-E", "BLENDER_RENDER", "-F", "MULTILAYER" ] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) RestoreErrorMode(val) elif job.type == netrender.model.JOB_PROCESS: command = job.frames[0].command val = SetErrorMode() process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) RestoreErrorMode(val) headers = {"slave-id": slave_id} cancelled = False stdout = bytes() run_t = time.time() while not cancelled and process.poll() is None: stdout += process.stdout.read(1024) current_t = time.time() cancelled = engine.test_break() if current_t - run_t > CANCEL_POLL_SPEED: # update logs if needed if stdout: # (only need to update on one frame, they are linked conn.request("PUT", logURL(job.id, first_frame), stdout, headers=headers) response = conn.getresponse() response.read() # Also output on console if netsettings.use_slave_output_log: print(str(stdout, encoding='utf8'), end="") stdout = bytes() run_t = current_t if testCancel(conn, job.id, first_frame): cancelled = True if job.type == netrender.model.JOB_BLENDER: netrender.repath.reset(job) # read leftovers if needed stdout += process.stdout.read() if cancelled: # kill process if needed if process.poll() is None: try: process.terminate() except OSError: pass continue # to next frame # flush the rest of the logs if stdout: # Also output on console if netsettings.use_slave_thumb: print(str(stdout, encoding='utf8'), end="") # (only need to update on one frame, they are linked conn.request("PUT", logURL(job.id, first_frame), stdout, headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue total_t = time.time() - start_t avg_t = total_t / len(job.frames) status = process.returncode print("status", status) headers = { "job-id": job.id, "slave-id": slave_id, "job-time": str(avg_t) } if status == 0: # non zero status is error headers["job-result"] = str(DONE) for frame in job.frames: headers["job-frame"] = str(frame.number) if job.hasRenderResult(): # send image back to server filename = os.path.join(JOB_PREFIX, "%06d.exr" % frame.number) # thumbnail first if netsettings.use_slave_thumb: thumbname = thumbnail.generate(filename) if thumbname: f = open(thumbname, 'rb') conn.request("PUT", "/thumb", f, headers=headers) f.close() responseStatus(conn) f = open(filename, 'rb') conn.request("PUT", "/render", f, headers=headers) f.close() if responseStatus(conn) == http.client.NO_CONTENT: continue elif job.type == netrender.model.JOB_PROCESS: conn.request("PUT", "/render", headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue else: headers["job-result"] = str(ERROR) for frame in job.frames: headers["job-frame"] = str(frame.number) # send error result back to server conn.request("PUT", "/render", headers=headers) if responseStatus(conn) == http.client.NO_CONTENT: continue engine.update_stats( "", "Network render connected to master, waiting for jobs") else: bisleep.sleep() conn.close() if netsettings.use_slave_clear: clearSlave(NODE_PREFIX)