class LogInterface(object): """Interface that functions as a stdout and stderr handler and directs the output to the logging module, which in turn will output to either console, file or both.""" def __init__(self, level=None): import logging self.level = (level if level else logging.getLogger().info) def write(self, msg): for line in msg.strip().split("\n"): self.level(line) class GitAutoDeploy(object): _instance = None _server = None _config = None def __new__(cls, *args, **kwargs): """Overload constructor to enable Singleton access""" if not cls._instance: cls._instance = super(GitAutoDeploy, cls).__new__( cls, *args, **kwargs) return cls._instance @staticmethod def debug_diagnosis(port): import logging logger = logging.getLogger() pid = GitAutoDeploy.get_pid_on_port(port) if pid is False: logger.warning('I don\'t know the number of pid that is using my configured port') return logger.info('Process with PID %s is using port %s' % (pid, port)) with open("/proc/%s/cmdline" % pid) as f: cmdline = f.readlines() logger.info('Process with PID %s was started using the command: %s' % (pid, cmdline[0].replace('\x00', ' '))) @staticmethod def get_pid_on_port(port): import os with open("/proc/net/tcp", 'r') as f: file_content = f.readlines()[1:] pids = [int(x) for x in os.listdir('/proc') if x.isdigit()] conf_port = str(port) mpid = False for line in file_content: if mpid is not False: break _, laddr, _, _, _, _, _, _, _, inode = line.split()[:10] decport = str(int(laddr.split(':')[1], 16)) if decport != conf_port: continue for pid in pids: try: path = "/proc/%s/fd" % pid if os.access(path, os.R_OK) is False: continue for fd in os.listdir(path): cinode = os.readlink("/proc/%s/fd/%s" % (pid, fd)) minode = cinode.split(":") if len(minode) == 2 and minode[1][1:-1] == inode: mpid = pid except Exception as e: pass return mpid def clone_all_repos(self): """Iterates over all configured repositories and clones them to their configured paths.""" import os import re import logging from wrappers import GitWrapper logger = logging.getLogger() # Iterate over all configured repositories for repo_config in self._config['repositories']: # Only clone repositories with a configured path if 'path' not in repo_config: logger.info("Repository %s will not be cloned (no path configured)" % repo_config['url']) continue if os.path.isdir(repo_config['path']) and os.path.isdir(repo_config['path']+'/.git'): logger.info("Repository %s already present" % repo_config['url']) continue # Clone repository GitWrapper.clone(url=repo_config['url'], branch=repo_config['branch'], path=repo_config['path']) if os.path.isdir(repo_config['path']): logger.info("Repository %s successfully cloned" % repo_config['url']) else: logger.error("Unable to clone %s branch of repository %s" % (repo_config['branch'], repo_config['url'])) def ssh_key_scan(self): import re import logging from wrappers import ProcessWrapper logger = logging.getLogger() for repository in self._config['repositories']: url = repository['url'] logger.info("Scanning repository: %s" % url) m = re.match('.*@(.*?):', url) if m is not None: port = repository['port'] port = '' if port is None else ('-p' + port) ProcessWrapper().call(['ssh-keyscan -t ecdsa,rsa ' + port + ' ' + m.group(1) + ' >> ' + '$HOME/.ssh/known_hosts'], shell=True) else: logger.error('Could not find regexp match in path: %s' % url) def kill_conflicting_processes(self): import os import logging logger = logging.getLogger() pid = GitAutoDeploy.get_pid_on_port(self._config['port']) if pid is False: logger.error('[KILLER MODE] I don\'t know the number of pid ' + 'that is using my configured port\n[KILLER MODE] ' + 'Maybe no one? Please, use --force option carefully') return False os.kill(pid, signal.SIGKILL) return True def create_pid_file(self): import os with open(self._config['pidfilepath'], 'w') as f: f.write(str(os.getpid())) def read_pid_file(self): with open(self._config['pidfilepath'], 'r') as f: return f.readlines() def remove_pid_file(self): import os os.remove(self._config['pidfilepath']) def exit(self): import sys import logging logger = logging.getLogger() logger.info('Goodbye') self.remove_pid_file() sys.exit(0) @staticmethod def create_daemon(): import os try: # Spawn first child. Returns 0 in the child and pid in the parent. pid = os.fork() except OSError, e: raise Exception("%s [%d]" % (e.strerror, e.errno)) # First child if pid == 0: os.setsid() try: # Spawn second child pid = os.fork() except OSError, e: raise Exception("%s [%d]" % (e.strerror, e.errno)) if pid == 0: os.umask(0) else: # Kill first child os._exit(0) else: # Kill parent of first child os._exit(0) return 0 def run(self, config): """Start an instance of GAD based on the provided config object.""" import sys from BaseHTTPServer import HTTPServer import socket import os import logging from lock import Lock from httpserver import WebhookRequestHandler # Attatch config values to this instance self._config = config # Set up logging logger = logging.getLogger() logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s") # Enable console output? if ('quiet' in self._config and self._config['quiet']) or ('daemon-mode' in self._config and self._config['daemon-mode']): logger.addHandler(logging.NullHandler()) else: consoleHandler = logging.StreamHandler() consoleHandler.setFormatter(logFormatter) logger.addHandler(consoleHandler) # All logs are recording logger.setLevel(logging.NOTSET) if 'logfilepath' in self._config and self._config['logfilepath']: # Translate any ~ in the path into /home/ fileHandler = logging.FileHandler(self._config['logfilepath']) fileHandler.setFormatter(logFormatter) logger.addHandler(fileHandler) if 'ssh-keygen' in self._config and self._config['ssh-keygen']: logger.info('Scanning repository hosts for ssh keys...') self.ssh_key_scan() if 'force' in self._config and self._config['force']: logger.info('Attempting to kill any other process currently occupying port %s' % self._config['port']) self.kill_conflicting_processes() # Clone all repos once initially self.clone_all_repos() # Set default stdout and stderr to our logging interface (that writes # to file and console depending on user preference) sys.stdout = LogInterface(logger.info) sys.stderr = LogInterface(logger.error) if 'daemon-mode' in self._config and self._config['daemon-mode']: logger.info('Starting Git Auto Deploy in daemon mode') GitAutoDeploy.create_daemon() else: logger.info('Git Auto Deploy started') self.create_pid_file() # Clear any existing lock files, with no regard to possible ongoing processes for repo_config in self._config['repositories']: # Do we have a physical repository? if 'path' in repo_config: Lock(os.path.join(repo_config['path'], 'status_running')).clear() Lock(os.path.join(repo_config['path'], 'status_waiting')).clear() try: WebhookRequestHandler._config = self._config self._server = HTTPServer((self._config['host'], self._config['port']), WebhookRequestHandler) if 'ssl' in self._config and self._config['ssl']: import ssl logger.info("enabling ssl") self._server.socket = ssl.wrap_socket(self._server.socket, certfile=os.path.expanduser(self._config['ssl-pem']), server_side=True) sa = self._server.socket.getsockname() logger.info("Listening on %s port %s", sa[0], sa[1]) self._server.serve_forever() except socket.error, e: logger.critical("Error on socket: %s" % e) GitAutoDeploy.debug_diagnosis(self._config['port']) sys.exit(1) def stop(self): if self._server is not None: self._server.socket.close() def signal_handler(self, signum, frame): import logging logger = logging.getLogger() self.stop() if signum == 1: self.run(self._config) return elif signum == 2: logger.info('Requested close by keyboard interrupt signal') elif signum == 6: logger.info('Requested close by SIGABRT (process abort signal). Code 6.') self.exit() def main(): import signal from gitautodeploy import GitAutoDeploy from cli.config import * import sys import os app = GitAutoDeploy() signal.signal(signal.SIGHUP, app.signal_handler) signal.signal(signal.SIGINT, app.signal_handler) signal.signal(signal.SIGABRT, app.signal_handler) signal.signal(signal.SIGPIPE, signal.SIG_IGN) config = get_config_defaults() # Get config values from environment variables and commadn line arguments environment_config = get_config_from_environment() argv_config = get_config_from_argv(sys.argv[1:]) # Merge config values config.update(environment_config) config.update(argv_config) # Config file path provided? if 'config' in config and config['config']: config_file_path = os.path.realpath(config['config']) else: # Directories to scan for config files target_directories = [ os.getcwd(), # cwd os.path.dirname(os.path.realpath(__file__)) # script path ] config_file_path = find_config_file(target_directories) # Config file path provided or found? if config_file_path: file_config = get_config_from_file(config_file_path) config.update(file_config) # Extend config data with any repository defined by environment variables repo_config = get_repo_config_from_environment() if repo_config: if not 'repositories' in config: config['repositories'] = [] config['repositories'].append(repo_config) # Initialize config by expanding with missing values init_config(config) app.run(config)