#!/usr/bin/env python3 # -*- coding: utf-8 -*- ############################################################################## # mythsgu -- MythTV Storage Group Utility # # Utilities for managing files in MythTV storage groups. # # Major functions: # Fill archive drives # Pack archive drives # Balance the free space of recording storage groups # Copy entire directories of recordings between operating MythTV PCs # # Fill Archive Drives # =================== # Archive drives are storage devices that are used to store MythTV recordings # in bulk (such as Seagate ST8000AS0002 SMR drives). They are intended to be # filled up with recordings and then spend most of their time powered down # except when needed. They are not intended to be used for making recordings # (and SMR drives would fail if used as such due to the shingle rewriting # process causing excessively slow write speeds at times). The recordings on # archive drives are mounted in partitions belonging to an "Archives" storage # group that is never used in recording rules, but allows the recordings to be # seen by MythTV when the drives are online and mounted. mythsgu is used to # move old recording files from the recording drives to the archive drives so # that the archive drives are kept full, allowing the most free space on the # recording drives. It also keeps track of what files are on what archive # drives, so that a specific archive drive can be mounted as necessary when a # recording on it is wanted for playback or deletion. # # Pack archive drives # =================== # Move the recording files on the archive drives to the lower numbered drives # so that the free space is on the highest numbered drive. # # Balance Free Space # ================== # To be implemented later. Until then, use balance_storage.sh Bash script. # # Copy A File # =========== # Copy one file, with a progress bar and the ability to abort the copy. No # checking for MythTV activity is done. # # Copy Directories Of Recordings # ============================== # Copy an entire directory of recording files to another directory, where # the other directory can be on another PC. When copying to a destination PC # drive that is used by MythTV on that PC, "mythsgu event" must be set up to # be called by the MythTV event system and it must send its event # notifications to mythsgu on the source PC. The source PC must be able to # access the destination PC database and services API. The copying process # will pause when either PC's MythTV system is busy, and resume when it # becomes idle again. All files in the directory will be copied, regardless # of whether they are actually MythTV recording or related files or not. # # # Author: J S Worthington # Created: 2016-05-27 # ############################################################################## ############################################################################## # Dependencies: # MythTV API interface # py-dateutil # Can be installed using: # pip install py-dateutil ############################################################################## import atexit import asyncore import calendar import datetime import glob import lxml import math import MythTV import operator import os import queue import sys import shutil import signal import stat import socket import subprocess import termios import time import threading import traceback import tty try: import dateutil.parser except: print('Please install py-dateutil.') print('In Ubuntu:') print(' sudo apt install python-dateutil') print('If it is not available as a package, try "pip install py-dateutil".') exit(2) import MythTV.services_api.send as api import MythTV.services_api.utilities as util program_name = os.path.basename(sys.argv[0]) program_name_len = len(program_name) ############################################################################## # # Version: 0.1 2016-06-07 # Version: 0.2 2017-05-05 # Version: 0.3 2017-08-07 # Version: 0.4 2017-11-25 # Fill command working. # Version: 0.5 2019-06-08 # MythTV v30 compatibility update - use MythTV.services_api instead of # Utilities.py. # Version: 0.6 2019-06-09 # Copydir command implemented. This is specialised code for a specific # site's requirements. It may be generalised later. # Version: 0.7 2019-09-03 # Make copydir work for copying between drives on one PC. # Version: 0.8 2019-10-22 # Playing with test2 command. # Version: 0.9 2020-05-15 # Fix to work with MythTV v30 Python bindings, especially mythsgu fill. # Fix mythsgu fill to work with updated MythTVStatusMonitor. # Version: 0.10 2020-06-21 # Fix exception when there is a .sql file with no matching recording file. # Add decrementing files left display to command_copydir. # Version 0.11 2020-07-16 # Convert to Python 3 and MythTV v31. # Remove all code for backports and Utilities.py. # Version 0.12 2020-10-15 # Fix bugs in Python 3 / MythTV v31 conversion. # Version 0.13 2021-08-17 # Add pack command. # Fix optional storage groups. # ############################################################################## VERSION = '0.13' ############################################################################## # Configuration ############################################################################## # Set this to get debug output. #DEBUG_OUTPUT = True DEBUG_OUTPUT = False # MySQL WHERE clause that selects recordings to be excluded from being moved # to archive drives. Only references to the "recorded" table are allowed. # Default: False ("0") = do not exclude anything. #EXCLUDED_RECORDINGS_SQL = "recgroup = 'CRW' OR recgroup = 'CRW or JSW'" EXCLUDED_RECORDINGS_SQL = "0" # Directory to find the local MythTV config.xml in. LOCAL_CONFDIR = '/etc/mythtv' # Names of all the storage groups where non-archived recordings can be found. # These storage groups must be present. For most MythTV setups, this will be # just one name, 'Default'. #NORMAL_SG_NAMES = ['Default', 'Default_crw-pvr'] NORMAL_SG_NAMES = ['Default'] # Names of all the storage groups where non-archived recordings may be found. # These storage groups are optional - they may or may not be present. For # most MythTV setups, this will list will be empty ([]). #OPTIONAL_SG_NAMES = ['Noisy'] OPTIONAL_SG_NAMES = [] # Name of the Archives storage group. Default: Archives. ARCHIVE_SG_NAME = 'Archives' # Archive path prefix and suffix. Used to create short archive directory # name. ARCHIVE_PATH_PREFIX = '/mnt/' ARCHIVE_PATH_SUFFIX = '/recordings/' # Stop moving files to storage group partitions when free space would become # less than this. # # It is vital that this number exceeds the limit at which MythTV will expire # recordings. Otherwise, despite the archive drives not being used for # recordings, the regular checks for free space *will* expire enough # recordings so that whatever size of free space you have specified in your # MythTV settings is available. And then a later fill operation will top # up the recordings on the drive, which will then cause more recordings to # be expired. Units: bytes. Default: 20 Gibibytes. # 20 Gibibytes MIN_FREE_SPACE = 20*1024*1024*1024 # Minimum time allowed before the next recording. This must be long enoungh # that moving of the longest recording file between the slowest hard drives # will finish before the recording will start. If you have storage groups # accessed via a lan connection (eg NAS server), then this value may need to # be increased. Especially if you only have 100 Mbit Ethernet. Units: # seconds. Default: 5 minutes (300 seconds). MIN_TIME_BEFORE_NEXT_RECORDING=300 # File extensions for recording files. RECORDING_EXT = ['.nuv', '.mpg', '.ts'] # Size of the database VARCHAR fields used to store the locations of files. # When changing this setting, always run "mythsgu clean" to drop all the # mythsgu tables so they will be re-created with the new path length. # Units: characters. Default: 1024 MAX_PATH_LENGTH = 1024 # The IP address of the mythbackend to talk to. Default: 127.0.0.1 HOST = '127.0.0.1' # Port number to use to talk to a running instance of mythsgu. Default: 15007 PORT = 15007 # When MythTV is busy, use this interval between checks to see if it is no # longer busy. Units: seconds (float). Default: 60 s. MYTHTV_BUSY_CHECK_INTERVAL = 60.0 # When an event has been received, recheck the MythTV busy status after this # shorter time. Units: seconds (float). Default: 5 s. MYTHTV_BUSY_RECHECK_TIMEOUT = 5.0 # Minimum interval between connections to a backend to retrieve its status. # If the same query is repeated before this interval has elapsed, the # previously retrieved data is used. Units: seconds (float). Default: 100 ms. MIN_BACKEND_QUERY_INTERVAL = 0.1 # Maximum number of recordings to check to make sure we get a future one. MAX_RECORDINGS_COUNT = 20 ############################################################################## # Constants ############################################################################## # Control-C character. CTRL_C = chr(3) # Name of the table used by mythsgu to store its list of files on archive # drives. ARCHIVED_TABLE = program_name + '_archived' # Name of the table used by mythsgu to store the names of the files selected # to be moved to the Archive drives. MOVE_TABLE = program_name + '_move' # Name of the table used by mythsgu to store the locations of all the # recordings in the non-archive storage groups. FIND_TABLE = program_name + '_find' # Socket buffer size. This should be a power of two. Since only small # messages are ever received, a small buffer is used. SOCKET_BUFFER_SIZE = 128 ############################################################################## # Globals ############################################################################## # Set True to request that an in-progress copy or move be aborted as soon as # possible. global abort_request abort_request = False # Cleared when MythTV is busy or about to become busy (such as a recording # time is too close). Causes file operations to be paused until mythtv_idle # is set again. global mythtv_idle mythtv_idle = threading.Event() mythtv_idle.set() ############################################################################## # Debug output window. # Modified from: # https://stackoverflow.com/questions/19479504/how-can-i-open-two-consoles-from-a-single-script ############################################################################## if DEBUG_OUTPUT: global mythsgu_debug mythsgu_debug = None def dprint_init(): global mythsgu_debug if mythsgu_debug == None: mythsgu_debug = open('mythsgu-debug.log', 'w', 1) dprint(program_name + ' debug output started') def dprint(s): global mythsgu_debug if mythsgu_debug != None: print(datetime.datetime.now().strftime('%H:%M:%S.%f')[:-3] + ' ' + s, file=mythsgu_debug) else: def dprint_init(): pass def dprint(): pass ############################################################################## # Abort request handling ############################################################################## def request_abort(): global abort_request global mythtv_idle abort_request = True dprint('request_abort: abort_request = True') print('request_abort: abort_request = True') mythtv_idle.set() dprint('request_abort() called') class AbortRequest(Exception): def __init__(self): dprint('AbortRequest exception') print('AbortRequest exception') request_abort() ############################################################################## # Current UTC time in a format able to be compared to the MythTV next # recording time. ############################################################################## def mythtv_now(): t = calendar.timegm(datetime.datetime.utcnow().utctimetuple()) dprint('mythtv_now: ' + str(t)) return t ############################################################################## # Fully expanded absolute path. ############################################################################## def fullpath(p): return os.path.abspath(os.path.expanduser(os.path.expandvars(p))) ############################################################################## # Get the default IP address for the PC (the address with a default route). # If there is no default route, returns 127.0.0.1. # See https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib ############################################################################## #def get_ip(): # s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # try: # # doesn't even have to be reachable # s.connect(('10.255.255.255', 1)) # IP = s.getsockname()[0] # except: # IP = '127.0.0.1' # finally: # s.close() # return IP ############################################################################## # KBHit ############################################################################## """ A Python class implementing KBHIT, the standard keyboard-interrupt poller. Works transparently on Windows and Posix (Linux, Mac OS X). Doesn't work with IDLE. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. From http://home.wlu.edu/~levys/software/kbhit.py """ import os # Windows if os.name == 'nt': import msvcrt # Posix (Linux, OS X) else: import sys import termios import atexit from select import select class KBHit: def __init__(self): '''Creates a KBHit object that you can call to do various keyboard things. ''' if os.name == 'nt': pass else: # Save the terminal settings self.fd = sys.stdin.fileno() self.new_term = termios.tcgetattr(self.fd) self.old_term = termios.tcgetattr(self.fd) # New terminal setting unbuffered self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO) termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term) # Support normal-terminal reset at exit atexit.register(self.set_normal_term) def set_normal_term(self): ''' Resets to normal terminal. On Windows this is a no-op. ''' if os.name == 'nt': pass else: termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term) def getch(self): ''' Returns a keyboard character after kbhit() has been called. Should not be called in the same program as getarrow(). ''' s = '' if os.name == 'nt': return msvcrt.getch().decode('utf-8') else: return sys.stdin.read(1) # def getarrow(self): # ''' Returns an arrow-key code after kbhit() has been called. Codes are # 0 : up # 1 : right # 2 : down # 3 : left # Should not be called in the same program as getch(). # ''' # # if os.name == 'nt': # msvcrt.getch() # skip 0xE0 # c = msvcrt.getch() # vals = [72, 77, 80, 75] # # else: # c = sys.stdin.read(3)[2] # vals = [65, 67, 66, 68] # # return vals.index(ord(c.decode('utf-8'))) def kbhit(self): ''' Returns True if keyboard character was hit, False otherwise. ''' if os.name == 'nt': return msvcrt.kbhit() else: dr,dw,de = select([sys.stdin], [], [], 0) return dr != [] ## Test #if __name__ == "__main__": # # kb = KBHit() # # print('Hit any key, or ESC to exit') # # while True: # # if kb.kbhit(): # c = kb.getch() # if ord(c) == 27: # ESC # break # print(c) # # kb.set_normal_term() ############################################################################## # Read a single character from stdin. The character is not echoed. # # From: http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user ############################################################################## def getChar(): fd = sys.stdin.fileno() oldSettings = termios.tcgetattr(fd) try: tty.setraw(fd) answer = sys.stdin.read(1) finally: #dprint('getChar finally ran\n') termios.tcsetattr(fd, termios.TCSADRAIN, oldSettings) return answer ############################################################################## # Read a single character from stdin, with timeout. # Timeout is in units of seconds. ############################################################################## def getChar_with_timeout(timeout): class TimeoutError(Exception): pass def interrupted(signum, frame): raise TimeoutError() signal.signal(signal.SIGALRM, interrupted) signal.alarm(timeout) try: ch = getChar() except TimeoutError(): ch = '' # Disable the alarm. signal.alarm(0) return ch ############################################################################## # Single instance check # Modified from: # http://code.activestate.com/recipes/578453-python-single-instance-cross-platform/ ############################################################################## try: import fcntl except ImportError: fcntl = None #LOCK_PATH = os.path.abspath(sys.argv[0] + ".lock") LOCK_PATH = '/tmp/' + program_name + '.lock' OS_WIN = False if 'win32' in sys.platform.lower(): OS_WIN = True class SingleInstance: def __init__(self): self.fh = None self.is_running = False self.do_magic() self.cleanup_required = True atexit.register(self.clean_up) def do_magic(self): if OS_WIN: try: if os.path.exists(LOCK_PATH): os.unlink(LOCK_PATH) self.fh = os.open(LOCK_PATH, os.O_CREAT | os.O_EXCL | os.O_RDWR) except EnvironmentError as err: if err.errno == 13: self.is_running = True else: raise else: try: self.fh = open(LOCK_PATH, 'w') fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError as err: if self.fh is not None: self.is_running = True else: raise def clean_up(self): if self.cleanup_required == True: self.cleanup_required = False # this is not really needed try: if self.fh is not None: if OS_WIN: os.close(self.fh) os.unlink(LOCK_PATH) else: fcntl.lockf(self.fh, fcntl.LOCK_UN) self.fh.close() # ??? os.unlink(LOCK_PATH) except Exception as err: # logger.exception(err) raise # for debugging purposes, do not raise it on production ############################################################################## # Humanly readable file size formatting. ############################################################################## def sizeof_fmt(num, suffix='B'): """ From http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size """ for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']: if abs(num) < 1024.0: return "%3.1f %s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f %s%s" % (num, 'Yi', suffix) ############################################################################## # Progress bar. ############################################################################## class progressBar: """ Creates a text-based progress bar. Call the object with the `print' command to see the progress bar, which looks something like this: [=======> 22% ] You may specify the progress bar's width, min and max values on init. Modified from http://code.activestate.com/recipes/168639/ """ def __init__(self, minValue = 0, maxValue = 100, totalWidth=80): self.progBar = "[]" # This holds the progress bar string self.min = minValue self.max = maxValue self.span = maxValue - minValue self.width = totalWidth self.allFull = self.width - 2 self.prevNumHashes = self.allFull self.prevPercentDone = 101 self.amount = 0 # When amount == max, we are 100% done self.updateAmount(0) # Build progress bar string def updateAmount(self, newAmount = 0, paused = False): """ Update the progress bar with the new amount (with min and max values set at initialization; if it is over or under, it takes the min or max value as a default. """ if newAmount < self.min: newAmount = self.min if newAmount > self.max: newAmount = self.max self.amount = newAmount if paused: progressChar = '*' arrowheadChar = '*' else: progressChar = '=' arrowheadChar = '>' # Figure out the new percent done, round to an integer diffFromMin = float(self.amount - self.min) self.percentDone = (diffFromMin / float(self.span)) * 100.0 self.percentDone = int(round(self.percentDone)) # Figure out how many hash bars the percentage should be self.numHashes = int(round((self.percentDone / 100.0) * self.allFull)) # Build a progress bar with an arrow of equal signs; special cases for # empty and full if self.numHashes == 0: self.progBar = "[%s%s]" % (arrowheadChar, ' '*(self.allFull-1)) elif self.numHashes == self.allFull: self.progBar = "[%s]" % (progressChar*self.allFull) else: self.progBar = "[%s%s%s]" % (progressChar*(self.numHashes-1), arrowheadChar, ' '*(self.allFull-self.numHashes)) # figure out where to put the percentage, roughly centered percentPlace = round_to_int(len(self.progBar) / 2) - len(str(self.percentDone)) percentString = str(self.percentDone) + "%" # slice the percentage into the bar self.progBar = ''.join([self.progBar[0:percentPlace], percentString, self.progBar[percentPlace+len(percentString):] ]) def __str__(self): return str(self.progBar) def __call__(self, value, paused = False): """ Updates the amount, and writes to stdout. Prints a carriage return first, so it will overwrite the current line in stdout.""" self.updateAmount(value, paused) if (self.numHashes != self.prevNumHashes or self.percentDone != self.prevPercentDone or paused): sys.stdout.write('\r' + str(self)) sys.stdout.flush() self.prevNumHashes = self.numHashes self.prevPercentDone = self.percentDone ############################################################################## # Modified versions of the standard Python 2 library shutil.copy2 and # shutil.move2 that support a progress indicator and the ability to abort in # the middle of a copy/move, and also copys the user and group ownership. ############################################################################## class CopyFileObjAborted(Exception): """Custom exception raised when a long file copy/move is aborted by the callback. """ def copyfileobjc(fsrc, fdst, callback, length=16*1024): """copy data from file-like object fsrc to file-like object fdst. A callback provides a way to give progress indications, and to abort the copy if necessary. """ global abort_request global mythtv_idle copied = 0 while 1: mythtv_busy = not mythtv_idle.is_set() if mythtv_busy: callback(copied, mythtv_busy) dprint('Waiting on mythtv_idle') kb = KBHit() while not kb.kbhit(): if mythtv_idle.wait(1.0): break dprint('Resuming after waiting on mythtv_idle') buf = fsrc.read(length) if not buf: break fdst.write(buf) copied += len(buf) if abort_request or callback(copied, mythtv_busy): raise CopyFileObjAborted def copyfilec(src, dst, callback, follow_symlinks=True): """Copy data from src to dst. If follow_symlinks is not set and src is a symbolic link, a new symlink will be created instead of copying the file it points to. """ if shutil._samefile(src, dst): raise SameFileError("{!r} and {!r} are the same file".format(src, dst)) for fn in [src, dst]: try: st = os.stat(fn) except OSError: # File most likely does not exist pass else: # XXX What about other special files? (sockets, devices...) if stat.S_ISFIFO(st.st_mode): raise SpecialFileError("`%s` is a named pipe" % fn) if not follow_symlinks and os.path.islink(src): os.symlink(os.readlink(src), dst) else: with open(src, 'rb') as fsrc: with open(dst, 'wb') as fdst: try: copyfileobjc(fsrc, fdst, callback) except CopyFileObjAborted: fdst.close() os.remove(dst) raise return dst def copy2c(src, dst, callback, follow_symlinks=True): """Copy data and all stat info ("cp -p src dst"). Return the file's destination." The destination may be a directory. If follow_symlinks is false, symlinks won't be followed. This resembles GNU's "cp -P src dst". Modified to also copy the user and group ownership. """ if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) dsttmp = dst + '.tmp' try: copyfilec(src, dsttmp, callback, follow_symlinks=follow_symlinks) except CopyFileObjAborted: raise else: os.rename(dsttmp, dst) shutil.copystat(src, dst) st = os.stat(src) try: os.chown(dst, st.st_uid, st.st_gid) except OSError as e: dprint('copy2c: os.chown failed') dprint('OSError: ' + str(e)) return dst def move2c(src, dst, callback, follow_symlinks=True): """Move a file by copying with copy2c, then deleting the src file. """ try: copy2c(src, dst, callback, follow_symlinks=follow_symlinks) except CopyFileObjAborted: raise else: os.remove(src) ############################################################################### ## Extract fields required for database access from a MythTV config.xml file. ## Modified from /usr/lib/python2.7/MythTV/database.py ############################################################################### # #class _ConfigXml: # # def read_xml(self, confdir): # filename = os.path.join(confdir, 'config.xml') # if not os.access(filename, os.R_OK): # return False # # try: # config = lxml.etree.parse(filename) # # name = config.xpath('/Configuration/LocalHostName/text()') # if len(name): # self.profile = name[0] # # for child in config.xpath('/Configuration/Database')[0].getchildren(): # if child.tag in self._conf_trans: # setattr(self, self._conf_trans[child.tag], child.text) # except: # return False # return True # # def read_old_xml(self, confdir): # filename = os.path.join(confdir, 'config.xml') # if not os.access(filename, os.R_OK): # return False # # try: # config = lxml.etree.parse(filename) # # trans = {'DBHostName':'hostname', 'DBUserName':'username', # 'DBPassword':'password', 'DBName':'database', # 'DBPort':'port'} # for child in config.xpath('/Configuration/UPnP/MythFrontend/' + 'DefaultBackend')[0].getchildren(): # if child.tag in trans: # setattr(self, trans[child.tag], child.text) # except: # raise # return False # return True # # def __init__(self, confdir): # global logger # # _conf_trans = {'PingHost':'pinghost', 'Host':'hostname', # 'UserName':'username', 'Password':'password', # 'DatabaseName':'database', 'Port':'port'} # # self.pinghost = MythTV.QuickProperty('_pinghost', False, bool) # self.port = MythTV.QuickProperty('_port', 3306, int) # self.pin = MythTV.QuickProperty('_pin', 0000, int) # self.hostname = MythTV.QuickProperty('_hostname', '127.0.0.1') # self.username = MythTV.QuickProperty('_username', 'mythtv') # self.password = MythTV.QuickProperty('_password', 'mythtv') # self.database = MythTV.QuickProperty('_database', 'mythconverg') # # if confdir and (confdir != '/'): # confdir = confdir # if self.read_xml(confdir): # pass # elif self.read_old_xml(confdir): # pass # else: # logger.critical('Failed to read database credentials from: {0}'.format(os.path.join(confdir, 'config.xml'))) # exit(1) ############################################################################## # Round up to the next highest multiple. ############################################################################## def round_up(num, divisor): return (int(math.ceil((num - 1) / divisor)) + 1) * divisor ############################################################################## # Round a float to an int without having problems with the floating point # inaccuracies. ############################################################################## def round_to_int(fl): i = int(fl) if fl - float(i) >= 0.5: return i + 1 else: return i ############################################################################## # Server to receive communications in a running instance of mythsgu. ############################################################################## class MessageHandler(asyncore.dispatcher): def __init__(self, sock, addr, event_queue): self.event_queue = event_queue self.addr = addr asyncore.dispatcher.__init__(self, sock) def handle_read(self): global server if not server.stopping: data = self.recv(SOCKET_BUFFER_SIZE) # dprint('handle_read: data=' + str(data) + ' addr=' + str(self.addr)) data_field_index = program_name_len + 2 if data[0:data_field_index] == program_name + ': ': dprint( datetime.datetime.now().isoformat() + ' Event received from ' + repr(self.addr) + ': ' + data[data_field_index:] ) self.event_queue.put((self.addr, data[data_field_index:])) elif len(data) != 0: dprint( datetime.datetime.now().isoformat() + ' Invalid message received from ' + repr(self.addr) + ' (' + str(len(data)) + '): ' + data ) else: data = self.recv(SOCKET_BUFFER_SIZE) server.close() class MythsguServer(asyncore.dispatcher): def __init__(self, host, port, event_queue): asyncore.dispatcher.__init__(self) self.event_queue = event_queue self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.set_reuse_addr() self.bind((host, port)) self.listen(5) self.stopping = False def handle_accept(self): pair = self.accept() if pair is not None: sock, addr = pair dprint('Incoming connection from %s' % repr(addr)) if not server.stopping: handler = MessageHandler(sock, addr, self.event_queue) #class MessageHandler(asyncore.dispatcher): # # def __init__(self, sock): # asyncore.dispatcher.__init__(self, sock) # #print('MessageHandler.__init__ called') # # def handle_read(self): # global testserver # if testserver.stopping == False: # data = self.recv(SOCKET_BUFFER_SIZE) # data_field_index = program_name_len + 2 # if data[0:data_field_index] == program_name + ': ': # print( # datetime.datetime.now().isoformat() + # 'Event notification received: ' + data[data_field_index:] # ) # if data[data_field_index:] == 'stop': # print('Stop event received') # testserver.close() # #print('testserver.stopping=' + str(testserver.stopping)) # elif len(data) != 0: # print( # datetime.datetime.now().isoformat() + # 'Invalid message received (' + str(len(data)) + '): ' + # data # ) # #print('testserver.stopping=' + str(testserver.stopping)) # else: # data = self.recv(SOCKET_BUFFER_SIZE) # testserver.close() # #print('self=' + str(self)) # #print('testserver.stopping[2]=' + str(testserver.stopping)) # #class MythsguTestServer(asyncore.dispatcher): # # def __init__(self, host, port): # asyncore.dispatcher.__init__(self) # self.create_socket(socket.AF_INET, socket.SOCK_STREAM) # self.set_reuse_addr() # self.bind((host, port)) # self.listen(5) # self.stopping = False # # def handle_accept(self): # pair = self.accept() # if pair is not None: # sock, addr = pair # print('Incoming connection from %s' % repr(addr)) # if testserver.stopping == False: # handler = MessageHandler(sock) # #def command_test_server(): # global testserver # testserver = MythsguTestServer(HOST, PORT) # asyncore.loop() # print('test_server command exiting') ############################################################################## # Client for communications to a running instance of mythsgu. ############################################################################## class MythsguClient(asyncore.dispatcher): def __init__(self, addr, message): self.error = 0 asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect((addr, PORT)) self.buffer = message def handle_connect(self): pass def handle_close(self): self.close() def handle_read(self): self.recv(SOCKET_BUFFER_SIZE) def writable(self): return (len(self.buffer) > 0) def handle_write(self): sent = self.send(self.buffer) self.buffer = self.buffer[sent:] def handle_error(self): self.error = 1 ############################################################################## # Read config.xml file to get database settings only (other settings ignored). ############################################################################## class ConfigXml: DBHostName = 'localhost' DBUsername = 'mythtv' DBPassword = 'mythtv' DBName = 'mythconverg' DBPort = 3306 _conf_trans = { 'Host':'DBHostName', 'UserName':'DBUserName', 'Password':'DBPassword', 'DatabaseName':'DBName', 'Port':'DBPort' } def readXML(self, filename): if not os.access(filename, os.R_OK): dprint('File ' + filename + ' not accessible!') return False try: config = lxml.etree.parse(filename) for child in config.xpath('/Configuration/Database')[0].getchildren(): if child.tag in self._conf_trans: #dprint('child.tag=' + str(child.tag)) setattr(self, self._conf_trans[child.tag], child.text) except Exception as e: dprint(str(e)) dprint('lxml.etree.parse failed') return False return True ############################################################################## # Backend class. Handles communication with a backend and database selected # using a config.xml file. If no config.xml file is provided, the local # database and backend are used. ############################################################################## class Backend(): NO_SCHEDULED_RECORDINGS = sys.float_info.max def __init__(self, ready_event=None): self.ready_event = ready_event self.next_recording_time = self.NO_SCHEDULED_RECORDINGS self.last_get_next_recording_time = 0 self.inuseprograms = 0 self.last_inuseprograms = 0 def init(self, config_xml=''): dprint('config_xml="' + config_xml + '"') self.config_xml = config_xml if self.config_xml != '': self.remote_config = ConfigXml() if not self.remote_config.readXML(self.config_xml): dprint('Failed to read or correctly parse ' + self.config_xml + ' file, using ' + HOST) self.config_xml = '' self.host = HOST # dprint(str(self.remote_config.DBHostName)) # dprint(str(self.remote_config.DBUsername)) # dprint(str(self.remote_config.DBPassword)) # dprint(str(self.remote_config.DBName)) # dprint(str(self.remote_config.DBPort)) self.host = self.remote_config.DBHostName dprint('self.host = ' + self.host) else: self.host = HOST self.backend = api.Send(self.host) # Initialise the UTCOffset so that the conversion to local time works. api.Send(self.host) util.get_utc_offset(self.backend) # Connect to the database. if self.config_xml != '': try: self.db = MythTV.MythDB( DBHostName = self.remote_config.DBHostName, DBUsername = self.remote_config.DBUsername, DBPassword = self.remote_config.DBPassword, DBName = self.remote_config.DBName, DBPort = self.remote_config.DBPort ) except MythTV.exceptions.MythDBError as e: if e.ecode == MythTV.MythError.DB_SCHEMAMISMATCH: dprint('Schema mismatch connecting to ' + self.host + ' database, trying again with schema ' + str(e.remote)) MythTV.MythDB._schema_local = e.remote self.db = MythTV.MythDB( DBHostName = self.remote_config.DBHostName, DBUsername = self.remote_config.DBUsername, DBPassword = self.remote_config.DBPassword, DBName = self.remote_config.DBName, DBPort = self.remote_config.DBPort ) dprint('After retry') global abort_request dprint('Backend: abort_request=' + str(abort_request)) else: raise MythTV.MythSchema._schema_local = MythTV.SCHEMA_VERSION else: self.db = MythTV.MythDB() self.dbc = self.db.cursor() if self.ready_event != None: self.ready_event.set() # Get the next recording time from the backend. If there are no scheduled # recordings, returns a time very far in the future. def get_next_recording_time(self, error_response=NO_SCHEDULED_RECORDINGS): if time.time() < self.last_get_next_recording_time + MIN_BACKEND_QUERY_INTERVAL: return self.next_recording_time resp_dict = self.backend.send(endpoint='Dvr/GetUpcomingList?Count=' + str(MAX_RECORDINGS_COUNT)) if list(resp_dict.keys())[0] in ['Abort', 'Warning']: #sys.exit('\n{}\n'.format(list(resp_dict.values())[0])) dprint(self.host + ' get_next_recording_time(): Abort or Warning') dprint(str(resp_dict)) request_abort() return error_response count = int(resp_dict['ProgramList']['Count']) progs = resp_dict['ProgramList']['Programs'] if count < 1: dprint(self.host + ' get_next_recording_time(): No scheduled recordings') return error_response else: # Find the first future recording time. now = mythtv_now() found = False prog_index = 0 for prog in progs: prog_index += 1 recording_time = calendar.timegm( dateutil.parser.parse(prog['Recording']['StartTs']).utctimetuple() ) if recording_time >= now: found = True break if found: self.next_recording_time = recording_time dprint(self.host + ' get_next_recording_time()=' + util.utc_to_local(str(datetime.datetime.utcfromtimestamp(self.next_recording_time)), False, False) + ' count=' + str(count) + ' index=' + str(prog_index) ) else: self.next_recording_time = self.NO_SCHEDULED_RECORDINGS dprint(self.host + ' get_next_recording_time(): No future recording time found') return self.next_recording_time # Check the mythconverg.inuseprograms table to see if MythTV is busy. def check_mythtv_inuseprograms(self): if time.time() < self.last_inuseprograms + MIN_BACKEND_QUERY_INTERVAL: return self.inuseprograms self.dbc.execute("SELECT COUNT(*) AS count FROM inuseprograms") self.inuseprograms = self.dbc.fetchone()[0] dprint(self.host + ' check_mythtv_inuseprograms()=' + str(self.inuseprograms)) return self.inuseprograms != 0 ############################################################################## # Find a recording on a recording drive or archive drive and pop up a message # in mythfrontend saying where it is. This command is intended to be used # from a MythTV User Job. ############################################################################## def command_find(): print("Find command not implemented yet.") return ############################################################################## # Monitor events and stop a fill, copy or balance operation as necessary by # setting the abort_request flag. ############################################################################## class serverThread(threading.Thread): def __init__(self, addr, event_queue): threading.Thread.__init__(self) self.event_queue = event_queue self.addr = addr def run(self): global server server = MythsguServer(self.addr, PORT, self.event_queue) asyncore.loop(timeout=1) def stop(self): global server asyncore.close_all() server.close() class MonitoredBackend(): def __init__(self, ready_event, config_xml=''): be_ready_event = threading.Event() self.be = Backend(be_ready_event) self.be.init(config_xml) self.mythtv_busy = False self.timeout = MYTHTV_BUSY_CHECK_INTERVAL dprint('MonitoredBackend: __init__ be addr=' + self.be.host) self.status = self.be_status() next(self.status) dprint("MonitoredBackend: After self.be_status('') be addr=" + self.be.host) be_ready_event.wait() ready_event.set() def be_status(self): """ A "be_status" instance is run for each backend that is being monitored. It tracks the status of the backend and sets or clears its mythtv_busy flag as appropriate. "be_status" is called when an event is received from its corresonding backend, or then its next timeout occurs. """ global abort_request dprint('MonitoredBackend.be_status ' + self.be.host + ': Starting') # MythTV status ITER while not abort_request: # Running cycle dprint(self.be.host + ' Running cycle') if True: self.mythtv_busy = False dprint(self.be.host + ' mythtv_busy = False') # Not running dprint(self.be.host + ' State: Not running') while not abort_request: try: self.next_recording_time = self.be.get_next_recording_time() dprint(self.be.host + ' 1 next_recording_time=' + str(self.next_recording_time)) # If there was not an exception, then the connection # succeeded and the backend must be running. break except RuntimeError: # Can not connect to the backend. pass self.timeout = MYTHTV_BUSY_CHECK_INTERVAL yield # Running dprint(self.be.host + ' Running') if True: # Not busy dprint(self.be.host + ' State: Not busy') while (not abort_request and not self.be.check_mythtv_inuseprograms() and (self.next_recording_time == Backend.NO_SCHEDULED_RECORDINGS or mythtv_now() < self.next_recording_time - MIN_TIME_BEFORE_NEXT_RECORDING)): yield self.next_recording_time = self.be.get_next_recording_time() dprint(self.be.host + ' 2 next_recording_time=' + str(self.next_recording_time)) # Soon - a recording will start shortly, but MythTV is not actually busy yet. But we # treat it as busy so the file transfer will pause well before the recording starts. dprint(self.be.host + ' State: Soon') self.mythtv_busy = True while (not abort_request and self.next_recording_time != Backend.NO_SCHEDULED_RECORDINGS and not self.be.check_mythtv_inuseprograms()): now = mythtv_now() if now > self.next_recording_time: break timeout = self.next_recording_time - now if timeout < 0.0: timeout = 0.0 if timeout > MYTHTV_BUSY_CHECK_INTERVAL: timeout = MYTHTV_BUSY_CHECK_INTERVAL self.timeout = timeout yield self.next_recording_time = self.be.get_next_recording_time() dprint(self.be.host + ' 3 next_recording_time=' + str(self.next_recording_time)) dprint(self.be.host + ' mythtv_busy = True') # Busy dprint(self.be.host + ' State: Busy') while not abort_request and self.be.check_mythtv_inuseprograms(): self.timeout = MYTHTV_BUSY_CHECK_INTERVAL yield self.mythtv_busy = False while True: dprint(self.be.host + ' Aborted') yield class MythTVStatusMonitor(threading.Thread): def __init__(self, ready_event, remote_config_xml=''): threading.Thread.__init__(self) self.stopping = False self.ready_event = ready_event self.remote_config_xml = remote_config_xml self.event_queue = queue.Queue() self.server_thread = serverThread('0.0.0.0', self.event_queue) self.server_thread.name = 'serverThread' self.server_thread.start() def run(self): global abort_request global mythtv_idle global server e = None try: backends = [] timeout_be_list = [] # Local backend lbe_ready_event = threading.Event() lbe = MonitoredBackend(lbe_ready_event) backends.append(lbe) lbe_ready_event.wait() dprint('MythTVStatusMonitor: 1: abort_request=' + str(abort_request)) if self.remote_config_xml != None: # Remote backend rbe_ready_event = threading.Event() rbe = MonitoredBackend(rbe_ready_event, self.remote_config_xml) backends.append(rbe) rbe_ready_event.wait() else: rbe = None self.ready_event.set() dprint('MythTVStatusMonitor: 2: abort_request=' + str(abort_request)) while not abort_request and not self.stopping: dprint('MythTVStatusMonitor: Top of while not abort_request') timeout = MYTHTV_BUSY_CHECK_INTERVAL busy = False for be in backends: if be.mythtv_busy: busy = True if be.timeout < timeout: timeout = be.timeout timeout_be_list = [be] elif be.timeout == timeout: timeout_be_list.append(be) dprint(be.be.host + ' timeout=' + str(timeout)) if busy: dprint('mythtv_idle cleared') mythtv_idle.clear() else: dprint('mythtv_idle set') mythtv_idle.set() prev_time = time.time() try: (addr, event) = self.event_queue.get(block=True, timeout=timeout) dprint('Event ' + str(event) + ' from ' + str(addr)) timeout = int(round(time.time() - prev_time)) except queue.Empty: addr = '' event = 'timeout' dprint('Event: timeout') for be in backends: be.timeout -= timeout if be.timeout <= 0.0: be.timeout = MYTHTV_BUSY_CHECK_INTERVAL dprint(be.be.host + ' decremented be.timeout=' + str(be.timeout)) if event == 'timeout': for be in timeout_be_list: dprint(be.be.host + ' timeout next(be.status)') next(be.status) elif event[0:4] == 'key': dprint('Keystroke from ' + str(addr) + ': ' + event[5:] + 'Aborting!') dprint('MythTVStatusMonitor.run: abort_request = True') request_abort() elif event == 'stop': found = False for be in backends: if be.be.host == addr[0]: found = True if found: dprint('Event stop from backend ' + str(addr)) #request_abort() self.stop() else: dprint('Event stop from unknown backend ' + str(addr) + ' ignored') else: # Any other event type. found = False for be in backends: if be.be.host == addr[0]: found = True dprint(be.be.host + ' event ' + event + ' next(be.status)') next(be.status) if be.timeout > MYTHTV_BUSY_RECHECK_TIMEOUT: be.timeout = MYTHTV_BUSY_RECHECK_TIMEOUT if not found: dprint('Event "' + event + '" from unknown backend ' + str(addr) + ' ignored') except Exception as e: dprint('Exception in MythTVStatusMonitor.run') dprint(traceback.format_exc()) dprint('MythTVStatusMonitor.run: abort_request = True') request_abort() self.stop() raise e dprint('Stopping server thread') self.server_thread.stop() def stop(self): self.stopping = True self.event_queue.put(((HOST, 0), 'stop')) self.server_thread.stop() ############################################################################## # Check if a directory exists and ask for it to be mounted if it does not. # Returns: # '' if directory exists (or did not exist but now has been mounted and does) # 'S' if the directory is to be skipped ############################################################################## def check_mounted(dir, autoskip=False): while not os.path.isdir(dir): if autoskip: print( 'Can not find storage group directory "' + dir + '"' + ", automatically skipping this directory." ) return 'S' else: print( 'Can not find storage group directory "' + dir + '"' + ". Please mount it and hit , or hit 'S' to skip" ) retchar = getChar() if retchar == 'S' or retchar == 's': return 'S' if retchar == CTRL_C: exit(4) return '' ############################################################################## # Get the locations of all the recording files in a storage group and record # them in a table. ############################################################################## def find_recording_locations_in_SG( be, sg_name, location_table, autoskip=False, delete_old=False, optional=False ): storagegroup = sorted(be.db.getStorageGroup(sg_name), key=operator.attrgetter('groupname', 'hostname', 'dirname')) if len(storagegroup) == 0: if optional: print("Warning: Storage group %s does not exist." % sg_name) return 0 else: print("Error: Storage group %s does not exist!" % sg_name) exit(3) # Scan the archive storage group directories and create location records # for all recordings found. recordings_count = 0; for storagedir in storagegroup: dirname = storagedir.dirname print('Storage directory: ' + dirname) if check_mounted(dirname, autoskip) == 'S': be.dbc.execute("SELECT COUNT(*) AS recordings_count FROM " + ARCHIVED_TABLE + " WHERE location='" + dirname + "';") table_recordings_count = be.dbc.fetchone()[0] print('...skipping (%d recordings found in %s table)' % (table_recordings_count, ARCHIVED_TABLE)) recordings_count += table_recordings_count else: if len(dirname) > MAX_PATH_LENGTH: print( "Error: Storage directory name exceeds MAX_PATH_LENGTH (" + str(MAX_PATH_LENGTH) + ")" ) print(" '" + dirname + "'") print(" " + str(len(dirname)) + " bytes") print( "Change the MAX_PATH_LENGTH and run " + program_name + " clean before trying again" ) exit(4) if delete_old: print("...deleting old location records") be.dbc.execute( "DELETE FROM " + location_table + " where location='" + dirname + "'" ) print('...scanning') recordings_list = [] for ext in RECORDING_EXT: recordings_list.extend(glob.glob(os.path.join(dirname, '*' + ext))) for recording in sorted(recordings_list): #dprint(str(recording)) be.dbc.execute( "INSERT INTO " + location_table + "(basename, location) " + "VALUES('" + os.path.basename(recording) + "', '" + dirname + "')" ) print('...found ' + str(len(recordings_list)) + ' recordings') recordings_count += len(recordings_list) return recordings_count ############################################################################## # Get the free space on a filesystem. ############################################################################## def get_fs_freespace(pathname): "Get the free space of the filesystem containing pathname" stat = os.statvfs(pathname) # Use f_bfree for superuser, or f_bavail if filesystem has reserved space # for superuser. return stat.f_bavail*stat.f_bsize ############################################################################## # Move a file with callback abort capability (on user request or MythTV # activity) # # Returns: # True if the file was moved successfully # False otherwise ############################################################################## class moveCallback: def __init__(self, filesize, columns): if filesize == 0: # Prevent divide by 0. filesize = 1 self.prb = progressBar(0, filesize, columns) self.kb = KBHit() def callback(self, moved, paused): global mythtv_idle self.prb(moved, paused) if self.kb.kbhit(): ch = self.kb.getch() self.kb.set_normal_term() mythtv_idle.set() return True return False def move_file(src, dst): filesize = os.path.getsize(src) (columns, lines) = shutil.get_terminal_size() mcb = moveCallback(filesize, min(columns, 80)); print( "...moving " + str(filesize) + " byte (" + sizeof_fmt(filesize) + ") file " + src + " to " + dst ) print("...while moving is in progress, hit any key to abort") try: start_time = time.time() move2c(src, dst, mcb.callback) except CopyFileObjAborted: print("\n...moving aborted, partially copied destination file deleted") return False else: diff = time.time() - start_time print( "\r\033[K\033[A\r\033[K...moving complete (%2.2f s, %s/s)" % (diff, sizeof_fmt(filesize / diff)) ) return True ############################################################################## # Create the archived table. ############################################################################## def create_archived_table(lbe, basename_type): # No exception handling done for this table creation - we want to exit on # any error. result = lbe.dbc.execute( "CREATE TABLE IF NOT EXISTS " + ARCHIVED_TABLE + " (" + "basename " + basename_type + " PRIMARY KEY NOT NULL, " + "location VARCHAR(" + str(MAX_PATH_LENGTH) + ") NOT NULL" + ")" ) print('Created mythconverg database table ' + ARCHIVED_TABLE + '.') ############################################################################## # Get the type of the basename field in the mythconverg.recorded table. ############################################################################## def get_basename_type(lbe): return lbe.db.db.tablefields.recorded.basename.type ############################################################################## # Get the paths from the archived storage group. ############################################################################## def get_storagegroup(lbe): storagegroup = sorted( lbe.db.getStorageGroup(ARCHIVE_SG_NAME), key=operator.attrgetter('groupname', 'hostname', 'dirname') ) if len(storagegroup) == 0: print("Error: Storage group %s does not exist!" % ARCHIVE_SG_NAME) exit(3) return storagegroup ############################################################################## # Fill the archive drives with recordings by moving them from the recording # drives. ############################################################################## def command_fill(): global abort_request dprint_init() dprint('command_fill: 1: abort_request=' + str(abort_request)) si = SingleInstance() if si.is_running: print("Error: " + sys.argv[0] + " is already running!") exit(5) autoskip = False if len(sys.argv) >= 3: if sys.argv[2] == 'autoskip': autoskip = True; else: print('Error: Invalid option after fill command: ' + sys.argc[2]) exit(1) lbe = Backend() lbe.init() dprint('command_fill: 2: abort_request=' + str(abort_request)) basename_type = get_basename_type(lbe) #if lbe.check_mythtv_inuseprograms(): # print("MythTV is busy at the moment, please try again later") # exit(3) try: lbe.dbc.execute("SELECT 1 from " + ARCHIVED_TABLE) except MythTV.exceptions.MythDBError: create_archived_table(lbe, basename_type) try: lbe.dbc.execute("SELECT 1 from " + MOVE_TABLE) except MythTV.exceptions.MythDBError: pass else: lbe.dbc.execute("DROP TABLE " + MOVE_TABLE) # No exception handling done for this table creation - we want to exit on # any error. lbe.dbc.execute( "CREATE TABLE " + MOVE_TABLE + " (" + "basename " + basename_type + " PRIMARY KEY NOT NULL " + ")" ) print('Created mythconverg database table ' + MOVE_TABLE + '.') try: lbe.dbc.execute("SELECT 1 from " + FIND_TABLE) except MythTV.exceptions.MythDBError: pass else: lbe.dbc.execute("DROP TABLE " + FIND_TABLE) lbe.dbc.execute("CREATE TABLE " + FIND_TABLE + " LIKE " + ARCHIVED_TABLE) print('Created mythconverg database table ' + FIND_TABLE + '.') archived_recordings_count = find_recording_locations_in_SG( lbe, ARCHIVE_SG_NAME, ARCHIVED_TABLE, autoskip, delete_old=True ) # Fill the archive storage group directories with the oldest files not # already in an archive storage group. Ignore recordings that meet the # exclusion criteria. lbe.dbc.execute( "INSERT INTO " + MOVE_TABLE + " SELECT basename FROM recorded r " + "WHERE NOT (" + EXCLUDED_RECORDINGS_SQL + ") " + "AND (SELECT COUNT(*) FROM " + ARCHIVED_TABLE + " f WHERE f.basename=r.basename)=0 " "ORDER BY basename" ) # Find the locations of all the other recording files. normal_recordings_count = 0; for sg_name in NORMAL_SG_NAMES: normal_recordings_count += \ find_recording_locations_in_SG(lbe, sg_name, FIND_TABLE, autoskip) for sg_name in OPTIONAL_SG_NAMES: normal_recordings_count += \ find_recording_locations_in_SG(lbe, sg_name, FIND_TABLE, autoskip, optional=True) total_recordings = archived_recordings_count + normal_recordings_count print( 'Total of ', str(total_recordings) + ' recordings found' ) lbe.dbc.execute("SELECT COUNT(*) AS count FROM recorded") #dprint(str(dir(lbe.dbc))) #dprint(lbe.dbc.__dict__) recorded_count = lbe.dbc.fetchone()[0] if total_recordings != recorded_count: if total_recordings > recorded_count: print( "Error: Total recordings counted is greater than number of " "recordings in recorded table!" ) exit(5) else: print( "Warning: There are " + str(recorded_count - total_recordings) + " missing recording files (not found in the storage groups)!" ) storagegroup = get_storagegroup(lbe) #if lbe.check_mythtv_inuseprograms(): # print("MythTV is busy at the moment, please try again later") # exit(3) dprint('command_fill: 3: abort_request=' + str(abort_request)) ready = threading.Event() status_thread = MythTVStatusMonitor(ready) status_thread.name = 'MythTVStatusMonitor' status_thread.start() ready.wait() dprint('command_fill: 4: abort_request=' + str(abort_request)) try: dbc2 = lbe.db.cursor() for storagedir in storagegroup: if abort_request: break dirname = storagedir.dirname if check_mounted(dirname, autoskip) != 'S': # Move files to archive directory until it is full. print("\nMoving files to " + dirname + '\n') dst_blksize = os.statvfs(dirname).f_bsize lbe.dbc.execute( "SELECT basename FROM " + MOVE_TABLE + " ORDER BY substr(basename, instr(basename, '_') + 1, 14)" ) for fetched in lbe.dbc: basename = fetched[0] rows = dbc2.execute( "SELECT location FROM " + FIND_TABLE + " WHERE basename='" + basename + "'" ) if rows == 0: print('Error: File ' + basename + ' not found!\n') else: location = dbc2.fetchone()[0] src = os.path.join(location, basename) print('Moving ' + src + ' to ' + dirname) dst_free_required = round_up(os.path.getsize(src), dst_blksize) associated_files = glob.glob(src + "*png") for associated_file in associated_files: dst_free_required += round_up(os.path.getsize(associated_file), dst_blksize) if get_fs_freespace(dirname) < dst_free_required + MIN_FREE_SPACE: print('...insufficient free space') break if move_file(src, dirname) == False: dprint('command_fill: abort_request = True') request_abort() break for associated_file in associated_files: # Move file unconditionally (no abort possible), and # with its user and group ownership intact. dst = os.path.join(dirname, os.path.basename(associated_file)) shutil.copy2(associated_file, dst) st = os.stat(associated_file) os.chown(dst, st.st_uid, st.st_gid) os.remove(associated_file) rows = dbc2.execute("SELECT recordedid FROM recorded WHERE basename='" + basename + "'") if rows == 0: print('Error: Basename ' + basename + ' not found in recorded table!') else: recordedid = dbc2.fetchone()[0] rows = dbc2.execute("SELECT storagegroup FROM recordedfile WHERE recordedid=" + str(recordedid)) if rows == 0: print('Warning: No recordedfile entry for basename ' + basename + ' (recordedid=' + str(recordedid) + ')') else: dbc2.execute("UPDATE recordedfile SET storagegroup='" + ARCHIVE_SG_NAME + "' WHERE recordedid=" + str(recordedid)) dbc2.execute( "DELETE from " + MOVE_TABLE + " WHERE basename='" + basename + "'" ) except: traceback.print_exc() finally: status_thread.stop() if abort_request: print(time.strftime("%H:%M:%S Aborted!")) print("Dropping mythconverg database table " + FIND_TABLE) lbe.dbc.execute("DROP TABLE IF EXISTS " + FIND_TABLE) thread_count = threading.active_count() if thread_count > 1: print('threading.enumerate()=' + str(threading.enumerate())) si.clean_up() print("Shutting down...") if thread_count > 1: print("... waiting for threads to stop. PID=" + str(os.getpid())) return ############################################################################## # Called from EventCmdRecExpired to allow mythsgu to delete recordings expired # from archive drives from its tables. Should not be called manually. ############################################################################## def command_expire(): # Not implemented yet. return ############################################################################## # Clean - drop all mythsgu tables. ############################################################################## def do_clean(lbe): print("Dropping " + ARCHIVED_TABLE) lbe.dbc.execute("DROP TABLE IF EXISTS " + ARCHIVED_TABLE) print("Dropping " + MOVE_TABLE) lbe.dbc.execute("DROP TABLE IF EXISTS " + MOVE_TABLE) print("Dropping " + FIND_TABLE) lbe.dbc.execute("DROP TABLE IF EXISTS " + FIND_TABLE) def command_clean(): lbe = Backend() lbe.init() print("Clean up the mythconverg database by removing all mythsgu tables\n") do_clean(lbe) print("\nClean completed") ############################################################################## # Scan - drop all mythsgu tables and re-create them by scanning all the # archive drives. Skipping is not permitted as all archived drives must be # scanned. ############################################################################## def command_scan(): lbe = Backend() lbe.init() do_clean(lbe) print('All mythsgu tables dropped') create_archived_table(lbe, get_basename_type(lbe)) archived_recordings_count = find_recording_locations_in_SG( lbe, ARCHIVE_SG_NAME, ARCHIVED_TABLE, False, delete_old=False ) print('Scan completed, ' + str(archived_recordings_count) + ' archived recordings found') ############################################################################## # Copy a file with callback abort capability (on user request or MythTV # activity) # # Returns: # True if the file was moved successfully # False otherwise ############################################################################## class copyCallback: def __init__(self, filesize, columns): self.prb = progressBar(0, filesize, columns) self.kb = KBHit() def callback(self, copied, paused): global mythtv_idle self.prb(copied, paused) if self.kb.kbhit(): ch = self.kb.getch() self.kb.set_normal_term() dprint('copyCallback.callback: kbhit - mythtv_idle set') mythtv_idle.set() return True return False def command_copy(src, dst): filesize = os.path.getsize(src) (columns, lines) = shutil.get_terminal_size() ccb = copyCallback(filesize, min(columns, 80)); print( "...copying " + str(filesize) + " byte (" + sizeof_fmt(filesize) + ") file " + src + " to " + dst ) print("...while copying is in progress, hit any key to abort") try: start_time = time.time() copy2c(src, dst, ccb.callback) except CopyFileObjAborted: print("\n...copying aborted, partially copied destination file deleted") return False else: diff = time.time() - start_time print( "\r\033[K\033[A\r\033[K...Copying complete (%2.2f s, %s/s)" % (diff, sizeof_fmt(filesize / diff)) ) return True ############################################################################## # Copy an entire directory to another directory. This will often be over a # network connection to another PC. ############################################################################## def command_copydir(srcdir, dstdir, finished_dir, dst_config_xml=None): global abort_request dprint_init() abort_request = False dprint('command_copydir initialisation: abort_request = False') srcdir = fullpath(srcdir) dstdir = fullpath(dstdir) finished_dir = fullpath(finished_dir) if dst_config_xml != None: dst_config_xml = fullpath(dst_config_xml) if srcdir[-1] != '/': srcdir += '/' if dstdir[-1] != '/': dstdir += '/' if finished_dir[-1] != '/': finished_dir += '/' print('Copying exported recordings with .sql files') print(' from: ' + srcdir) print(' to: ' + dstdir) print(' when finished copying, moving recording files to: ') print(' ' + finished_dir) if dst_config_xml != None: print(' Destination MythTV box config.xml:') print(' ' + dst_config_xml) print() if not os.path.isdir(srcdir): print('Error: srcdir ("' + srcdir + '") is not a directory!') exit(1) if not os.path.isdir(dstdir): print('Error: dstdir ("' + dstdir + '") is not a directory!') exit(1) if not os.path.isdir(finished_dir): print('Error: finished_dir ("' + finished_dir + '") is not a directory!') exit(1) if dst_config_xml != None and not os.path.isfile(dst_config_xml): print('Error: dst_config_xml ("' + dst_config_xml + '") is not a file!') exit(1) si = SingleInstance() if si.is_running: print("Error: " + sys.argv[0] + " is already running!") exit(5) dst_blksize = os.statvfs(dstdir).f_bsize ready = threading.Event() status_thread = MythTVStatusMonitor(ready, dst_config_xml) status_thread.name = 'MythTVStatusMonitor' status_thread.start() ready.wait() try: abort_request = False dprint('command_copydir: abort_request = False') sql_files = glob.glob(srcdir + '*.sql') files_left = len(sql_files) print() print('Found ' + str(files_left) + ' *.sql files to copy') print() for sql_file in sql_files: # Remove file extension. src = os.path.splitext(sql_file)[0] print(str(files_left) + ': Copying ' + src + '* to ' + dstdir) dprint(str(files_left) + ': Copying ' + src + '* to ' + dstdir) files_left -= 1 dst_free_required = 0 matching_files = glob.glob(src + '*') recording_file = '' for matching_file in matching_files: dst_free_required += round_up(os.path.getsize(matching_file), dst_blksize) ext = os.path.splitext(matching_file)[1] if ext in RECORDING_EXT: if recording_file != '': print('Error: More than one recording file for recording ' + matching_file) dprint('command_copydir More than one recording file for recording: abort_request = True') raise AbortRequest recording_file = matching_file if recording_file == '': print('Error: No matching recording file for ' + sql_file) else: matching_files.remove(recording_file) matching_files.remove(sql_file) dprint('matching_files = ' + str(matching_files)) dprint('dst_free_required = ' + str(dst_free_required)) if get_fs_freespace(dstdir) < dst_free_required + MIN_FREE_SPACE: print('...insufficient free space') dprint('command_copydir insufficient free space: abort_request = True') raise AbortRequest st = os.stat(dstdir) dst = dstdir + os.path.basename(recording_file) if abort_request: dprint('After os.stat(dstdir): raising AbortRequest') raise AbortRequest if recording_file != '': dprint('Copying recording file') try: if not command_copy(recording_file, dst): dprint('command_copydir: command_copy raising AbortRequest') raise AbortRequest except IOError as e: # Handle permissions error if e.errno == 13: dprint(traceback.format_exc()) dprint('Permissions error - aborting') traceback.print_exc() print('Permissions error - aborting') raise AbortRequest else: raise os.chown(dst, st.st_uid, st.st_gid) if abort_request: dprint('After os.chown(dst): raising AbortRequest') raise AbortRequest dprint('Copying matching files') for matching_file in matching_files: # Copy file unconditionally (no abort possible), and # with its user and group ownership intact. dst = os.path.join(dstdir, os.path.basename(matching_file)) try: shutil.copy2(matching_file, dst) os.chown(dst, st.st_uid, st.st_gid) except Exception as e: print() print(e) dprint('command_copydir shutil.copy2/os.chown exception: abort_request = True') request_abort() raise e if abort_request: dprint('After copying matching files: raising AbortRequest') raise AbortRequest dst = os.path.join(dstdir, os.path.basename(sql_file)) + '.tmp' dprint('Copying .sql file') dprint('sql_file="' + sql_file + '"') dprint('dst="' + dst + '"') try: shutil.copy2(sql_file, dst) os.chown(dst, st.st_uid, st.st_gid) dprint('.sql file copied') # Remove the .tmp to signal that copying of this recording's files has been completed sucessfully. os.rename(dst, os.path.splitext(dst)[0]) dprint('.sql file renamed to "' + os.path.splitext(dst)[0] + '"') except Exception as e: print() print(str(e)) dprint('command_copydir os.rename exception: abort_request = True') request_abort() raise e if abort_request: dprint('After copying .sql file: raising AbortRequest') raise AbortRequest try: dprint('Moving files to finished_dir') if recording_file != '': matching_files.append(recording_file) matching_files.append(sql_file) for matching_file in matching_files: st = os.stat(matching_file) dst = finished_dir + os.path.basename(matching_file) shutil.move(matching_file, dst) os.chown(dst, st.st_uid, st.st_gid) except Exception as e: print(e) dprint('command_copydir os.chown exception: abort_request = True') request_abort() raise e except AbortRequest: pass except: traceback.print_exc() finally: dprint('command_copydir: status_thread.stop()') status_thread.stop() dprint('command_copydir: status_thread.join()') status_thread.join(2.0) if status_thread.is_alive(): dprint('command_copydir: status_thread.is_alive()') if abort_request: print(time.strftime('%H:%M:%S Aborted!')) if not abort_request: print('Copying complete') thread_count = threading.active_count() if thread_count > 1: dprint('threading.enumerate()=' + str(threading.enumerate())) main_thread = threading.currentThread() for t in threading.enumerate(): if not t is main_thread: dprint('Joining ' + t.getName()) t.join() dprint('Joining complete') si.clean_up() print("Shutting down...") if thread_count > 1: print("... waiting for threads to stop. PID=" + str(os.getpid())) return ############################################################################## # The event command is called by MythTV (or other) events to notify a possible # running copy of mythsqu about the event, such as playback or recording is # starting. This notifies mythsgu that it might need to pause or resume # a current operation. # # Since the event command is run from a separate instance of mythsgu, it # connects to the already running copy of mythsgu and sends a message about # the event that has happened. If there is no running copy of mythsgu to send # a message to, then it exits with a 0 exit code indicating no error. ############################################################################## def command_event(args): if args[0] == '-h': # args[1] should be an IP address or DNS name. # args[2] should be an event name. client = MythsguClient(args[1], program_name + ': ' + args[2]) else: # args[0] should be an event name. client = MythsguClient(HOST, program_name + ': ' + args[0]) asyncore.loop(count=1) return 0 ############################################################################## # Create *.sql files with the exported database entries for all MythTV # recording files found in the given directory. Creates the same exported # data as mythexport, so that mythimport can be used to add the recording # to another MythTV box's database. ############################################################################## def command_exportdir(srcdir='.'): srcdir = fullpath(srcdir) if srcdir[-1] != '/': srcdir += '/' print('Generating *.sql export files for all recording files in directory ' + str(srcdir)) print() if not os.path.isdir(srcdir): print('Error: "' + srcdir + '" is not a directory!') exit(1) be = Backend() be.init() recordings_list = [] exported_count = 0 for ext in RECORDING_EXT: recordings_list.extend(glob.glob(os.path.join(srcdir, '*' + ext))) for recording in recordings_list: sql_file = recording + '.sql' basename = os.path.basename(recording) if os.path.exists(sql_file): print('Skipping ' + recording + ' as ' + sql_file + ' exists') else: print('Dumping SQL data for ' + recording) result = be.dbc.execute("SELECT chanid,starttime,progstart FROM recorded WHERE basename='" + basename + "'") if result == 0: print('... Skipping ' + basename + ' as it is not in the recording database') elif result > 1: print('... Skipping ' + basename + ' as there are multiple matches the recording database - corrupt database!') else: (chanid, starttime, progstart) = be.dbc.fetchone() print('... sql_file=' + sql_file) dumpcmd = ( 'mysqldump -h' + be.host + ' -u' + be.db.dbconfig.username + ' -p' + be.db.dbconfig.password + ' -P' + str(be.db.dbconfig.port) + ' ' + be.db.dbconfig.database + ' recorded recordedseek recordedrating recordedmarkup recordedcredits' ' --where="chanid=' + str(chanid) + " and starttime='" + str(starttime) + "'" + '"' + ' --no-create-db --no-create-info > ' + sql_file + ' 2>&1' ) #print('... dumpcmd=' + dumpcmd) result = os.system(dumpcmd) if result != 0: print('... First mysqldump command failed with return code ' + str(result)) else: dumpcmd = ( 'mysqldump -h' + be.host + ' -u' + be.db.dbconfig.username + ' -p' + be.db.dbconfig.password + ' -P' + str(be.db.dbconfig.port) + ' ' + be.db.dbconfig.database + ' recordedprogram' + ' --where="chanid=' + str(chanid) + " and starttime='" + str(progstart) + "'" + '"' + ' --no-create-db --no-create-info >> ' + sql_file + ' 2>&1' ) #print('... dumpcmd=' + dumpcmd) result = os.system(dumpcmd) if result != 0: print('... Second mysqldump command failed with return code ' + str(result)) exported_count += 1 if result != 0: os.remove(sql_file) print('... SQL file ' + sql_file + 'deleted') print() print('Exportdir command finished') print('Exported ' + str(exported_count) + ' recordings') ############################################################################## # Display the free space on the partitions of a storage group. ############################################################################## def display_free_archive_space(storagegroup): free_list = [] max_short_sg_name = 0 max_free_space = 0 for sg in storagegroup: if sg.dirname.startswith(ARCHIVE_PATH_PREFIX) and sg.dirname.endswith(ARCHIVE_PATH_SUFFIX): short_sg_name = sg.dirname[len(ARCHIVE_PATH_PREFIX):len(sg.dirname) - len(ARCHIVE_PATH_SUFFIX)] else: short_sg_name = sg.dirname free_space = get_fs_freespace(sg.dirname) - MIN_FREE_SPACE if free_space < 0: free_space = 0 free_list.append((short_sg_name, sizeof_fmt(free_space))) if len(short_sg_name) > max_short_sg_name: max_short_sg_name = len(short_sg_name) free_space_size = len(str(free_space)) if free_space_size > max_free_space: max_free_space = free_space_size for fl in free_list: print(f' {fl[0]:{max_short_sg_name}}:{fl[1]:>{max_free_space}}') print() ############################################################################## # Pack an archive partition by moving recordings from the next archive # partition until there is no free space left. ############################################################################## def pack(be, source, dest): global abort_request retcode = 0 print(f'Packing archived recording files from {source} to {dest}') try: be.dbc.execute("SELECT 1 from " + MOVE_TABLE) except MythTV.exceptions.MythDBError: pass else: be.dbc.execute("DROP TABLE " + MOVE_TABLE) basename_type = get_basename_type(be) # No exception handling done for this table creation - we want to exit on # any error. be.dbc.execute( "CREATE TABLE " + MOVE_TABLE + " (" + "basename " + basename_type + " PRIMARY KEY NOT NULL " + ")" ) print('Created mythconverg database table ' + MOVE_TABLE + '.') be.dbc.execute( "INSERT INTO " + MOVE_TABLE + " SELECT basename FROM " + ARCHIVED_TABLE + " WHERE location='" + source + "'" + " ORDER BY basename" ) dbc2 = be.db.cursor() dst_blksize = os.statvfs(dest).f_bsize be.dbc.execute( "SELECT basename FROM " + MOVE_TABLE + " ORDER BY substr(basename, instr(basename, '_') + 1, 14)" ) for fetched in be.dbc: basename = fetched[0] src = os.path.join(source, basename) print('Moving ' + src + ' to ' + dest) dst_free_required = round_up(os.path.getsize(src), dst_blksize) associated_files = glob.glob(src + "*png") for associated_file in associated_files: dst_free_required += round_up(os.path.getsize(associated_file), dst_blksize) if get_fs_freespace(dest) < dst_free_required + MIN_FREE_SPACE: print('...insufficient free space') break if move_file(src, dest) == False: dprint('pack: abort_request = True') request_abort() retcode = 1 break for associated_file in associated_files: # Move file unconditionally (no abort possible), and # with its user and group ownership intact. dst = os.path.join(dest, os.path.basename(associated_file)) shutil.copy2(associated_file, dst) st = os.stat(associated_file) os.chown(dst, st.st_uid, st.st_gid) os.remove(associated_file) dbc2.execute("UPDATE " + ARCHIVED_TABLE + " SET location='" + dest + "' WHERE basename='" + basename + "'") dbc2.execute( "DELETE from " + MOVE_TABLE + " WHERE basename='" + basename + "'" ) free_space = get_fs_freespace(dest) - MIN_FREE_SPACE if free_space < 0: free_space = 0 print(f'Available free space on {dest} partition is now {sizeof_fmt(free_space)}') print("Dropping mythconverg database table " + MOVE_TABLE) be.dbc.execute("DROP TABLE IF EXISTS " + MOVE_TABLE) return retcode ############################################################################## # Pack the archive partitons so that all the free space is on the highest # numbered partition. ############################################################################## def command_pack(): global abort_request dprint_init() si = SingleInstance() if si.is_running: print("Error: " + sys.argv[0] + " is already running!") exit(5) autoskip = False # autoskip is not supported for the pack command. print('Packing archive directories\n') lbe = Backend() lbe.init() archive_storagegroups_found = True storagegroup = get_storagegroup(lbe) if len(storagegroup) < 2: print("Pack aborted, needs at least two storage group directories") else: for sg in storagegroup: result = check_mounted(sg.dirname) if result == 'S': archive_storagegroups_found = False break if archive_storagegroups_found: command_scan() print('Free recording space in archive partitions before packing:') display_free_archive_space(storagegroup) dprint('pack: 3: abort_request=' + str(abort_request)) ready = threading.Event() status_thread = MythTVStatusMonitor(ready) status_thread.name = 'MythTVStatusMonitor' status_thread.start() ready.wait() dprint('pack: 4: abort_request=' + str(abort_request)) try: previous_sg = storagegroup[0] for sg in storagegroup[1:]: print(f'Packing {sg.dirname} to {previous_sg.dirname}') retcode = pack(lbe, sg.dirname, previous_sg.dirname) if retcode != 0: print('Pack failed, error code %d' % retcode) break previous_sg = sg print('\nFree recording space in archive partitions after packing:') display_free_archive_space(storagegroup) print('Pack finished') except: traceback.print_exc() finally: status_thread.stop() if abort_request: print(time.strftime("%H:%M:%S Aborted!")) thread_count = threading.active_count() if thread_count > 1: print('threading.enumerate()=' + str(threading.enumerate())) else: print('One or more archive directories not found, pack aborted!') si.clean_up() ############################################################################## # Used for debugging to test parts of the code. ############################################################################## def command_test(dst_config_xml=''): #print('Test command start') #client = MythsguClient(program_name + ': test event') #asyncore.loop(count=1) #print('Test event sent') #client = MythsguClient(program_name + ': stop') #asyncore.loop(count=1) #print('Test command exiting') print('Test command start') # Local backend lbe = Backend() lbe.init() if dst_config_xml != '': # Remote backend rbe = Backend() rbe.init(dst_config_xml) else: rbe = None print() print('Local backend:') next_recording_time = lbe.get_next_recording_time() if next_recording_time == Backend.NO_SCHEDULED_RECORDINGS: print('No scheduled recordings') else: print('next_recording_time=' + str(next_recording_time)) print('UTC: ' + str(datetime.datetime.utcfromtimestamp(next_recording_time))) print('Local: ' + util.utc_to_local(str(datetime.datetime.utcfromtimestamp(next_recording_time)), False, False)) if rbe != None: print() print('Remote backend:') next_recording_time = rbe.get_next_recording_time() if next_recording_time == Backend.NO_SCHEDULED_RECORDINGS: print('No scheduled recordings') else: print('next_recording_time=' + str(next_recording_time)) print('UTC: ' + str(datetime.datetime.utcfromtimestamp(next_recording_time))) print('Local: ' + util.utc_to_local(str(datetime.datetime.utcfromtimestamp(next_recording_time)), False, False)) print() lbe.dbc.execute("SELECT * FROM settings WHERE value LIKE 'EventCmd%'") print('Local backend is ' + ('busy' if check_mythtv_inuseprograms(lbe.db) else 'idle')) print('Local EventCmd settings:') for fetched in lbe.dbc: print(fetched[0], fetched[1]) if rbe != None: print() print('Remote backend is ' + ('busy' if check_mythtv_inuseprograms(rbe.db) else 'idle')) rbe.dbc.execute("SELECT * FROM settings WHERE value LIKE 'EventCmd%'") print('Remote EventCmd settings from ' + rbe.remote_config.DBHostName + ':') for fetched in rbe.dbc: print(fetched[0], fetched[1]) print() lbe.dbc.execute("SELECT * FROM settings WHERE value LIKE 'EventCmd%'") print('Local EventCmd settings:') for fetched in lbe.dbc: print(fetched[0], fetched[1]) if rbe != None: print() rbe.dbc.execute("SELECT * FROM settings WHERE value LIKE 'EventCmd%'") print('Remote EventCmd settings from ' + rbe.remote_config.DBHostName + ':') for fetched in rbe.dbc: print(fetched[0], fetched[1]) ############################################################################## # Used for debugging to test parts of the code. ############################################################################## #def command_test2(addr): #event_queue = queue.Queue() #server_thread = serverThread(addr, event_queue) #server_thread.name = 'serverThread' #server_thread.start() #print('Waiting for event') # #try: # (rcv_addr, event) = event_queue.get(block=True, timeout=MYTHTV_BUSY_CHECK_INTERVAL) # print('Event received from ' + rcv_addr[0] + ': ' + event) #except queue.Empty: # print('Timed out waiting for an event') #server_thread.stop() def command_test2(filename): #ppid = os.getppid() #print(str(ppid)) #ppname = os.path.realpath('/proc/' + str(ppid) + '/exe') #print(ppname) try: output = subprocess.check_output( ['mythutil', '--message', '--bcastaddr', '10.0.2.4', '--message_text', filename], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print('Exception') print(str(e)) print('e.output:') print(e.output) else: print('Normal output:') print(output) ############################################################################## # Main ############################################################################## args = len(sys.argv) if args < 2: print('Error: Command required!') exit(1) command = sys.argv[1].lower() if command == 'clean': command_clean() elif command == 'copy': if args != 4: print("Error: Copy command requires source and destination") exit(1) command_copy(sys.argv[2], sys.argv[3]) elif command == 'copydir': if args != 5 and args != 6: print("Error: Copy directory command requires source, destination, finished directory, and optionally a config.xml file for the destination MythTV system.") exit(1) if args == 5: command_copydir(sys.argv[2], sys.argv[3], sys.argv[4]) else: command_copydir(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) elif command == 'fill': command_fill() elif command == 'find': command_find() elif command == 'event': if args < 3: print("Error: Event command requires event name") exit(1) command_event(sys.argv[2:]); elif command == 'expire': command_expire() elif command == 'exportdir': if args > 3: print('Error: exportdir command can only have one optional argument, the directory to export!') exit(1) if args == 2: command_exportdir() else: command_exportdir(sys.argv[2]) elif command == 'pack': command_pack() elif command == 'resume': # This command is normally issued to get mythsgu to check if it is safe # to resume a paused operation, and if it is, to do so. It is used on # MythTV 0.27 systems after using LiveTV, as the MythTV Event for "LiveTV # Stopped" was only added in 0.28. It can, however, be used at any time # if the user thinks that it might be safe to resume. command_event('resume') elif command == 'scan': command_scan() elif command == 'test': if args == 3: command_test(sys.argv[2]) else: command_test() elif command == 'test2': if args >= 3: command_test2(sys.argv[2]) else: print('Error: Test2 command requires arguments!') exit(1) #elif command == 'test_server': # command_test_server() elif command == '-v' or command == 'version': print(program_name + ' version ' + VERSION) else: print('Error: Invalid command (' + command + ')') exit(1) exit(0)