sys.stdout.flush

Here are the examples of the python api sys.stdout.flush taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 51

Project: internetarchive
Source File: files.py
View license
    def download(self, file_path=None, verbose=None, silent=None, ignore_existing=None,
                 checksum=None, destdir=None, retries=None, ignore_errors=None):
        """Download the file into the current working directory.

        :type file_path: str
        :param file_path: Download file to the given file_path.

        :type verbose: bool
        :param verbose: (optional) Turn on verbose output.

        :type silent: bool
        :param silent: (optional) Suppress all output.

        :type ignore_existing: bool
        :param ignore_existing: Overwrite local files if they already
                                exist.

        :type checksum: bool
        :param checksum: (optional) Skip downloading file based on checksum.

        :type destdir: str
        :param destdir: (optional) The directory to download files to.

        :type retries: int
        :param retries: (optional) The number of times to retry on failed
                        requests.

        :type ignore_errors: bool
        :param ignore_errors: (optional) Don't fail if a single file fails to
                              download, continue to download other files.

        :rtype: bool
        :returns: True if file was successfully downloaded.
        """
        verbose = False if verbose is None else verbose
        silent = False if silent is None else silent
        ignore_existing = False if ignore_existing is None else ignore_existing
        checksum = False if checksum is None else checksum
        retries = 2 if not retries else retries
        ignore_errors = False if not ignore_errors else ignore_errors

        self.item.session._mount_http_adapter(max_retries=retries)
        file_path = self.name if not file_path else file_path

        if destdir:
            if not os.path.exists(destdir):
                os.mkdir(destdir)
            if os.path.isfile(destdir):
                raise IOError('{} is not a directory!'.format(destdir))
            file_path = os.path.join(destdir, file_path)

        if os.path.exists(file_path):
            if ignore_existing:
                msg = 'skipping {0}, file already exists.'.format(file_path)
                log.info(msg)
                if verbose:
                    print(' ' + msg)
                elif silent is False:
                    print('.', end='')
                    sys.stdout.flush()
                return
            elif checksum:
                md5_sum = utils.get_md5(open(file_path, 'rb'))
                if md5_sum == self.md5:
                    msg = ('skipping {0}, '
                           'file already exists based on checksum.'.format(file_path))
                    log.info(msg)
                    if verbose:
                        print(' ' + msg)
                    elif silent is False:
                        print('.', end='')
                        sys.stdout.flush()
                    return
            else:
                st = os.stat(file_path)
                if (st.st_mtime == self.mtime) and (st.st_size == self.size) \
                        or self.name.endswith('_files.xml') and st.st_size != 0:
                    msg = ('skipping {0}, file already exists '
                           'based on length and date.'.format(file_path))
                    log.info(msg)
                    if verbose:
                        print(' ' + msg)
                    elif silent is False:
                        print('.', end='')
                        sys.stdout.flush()
                    return

        parent_dir = os.path.dirname(file_path)
        if parent_dir != '' and not os.path.exists(parent_dir):
            os.makedirs(parent_dir)

        try:
            response = self.item.session.get(self.url, stream=True, timeout=12)
            response.raise_for_status()

            chunk_size = 2048
            with open(file_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=chunk_size):
                    if chunk:
                        f.write(chunk)
                        f.flush()
        except (RetryError, HTTPError, ConnectTimeout,
                ConnectionError, socket.error, ReadTimeout) as exc:
            msg = ('error downloading file {0}, '
                   'exception raised: {1}'.format(file_path, exc))
            log.error(msg)
            if os.path.exists(file_path):
                os.remove(file_path)
            if verbose:
                print(' ' + msg)
            elif silent is False:
                print('e', end='')
                sys.stdout.flush()
            if ignore_errors is True:
                return False
            else:
                raise exc

        # Set mtime with mtime from files.xml.
        os.utime(file_path, (0, self.mtime))

        msg = 'downloaded {0}/{1} to {2}'.format(self.identifier,
                                                 self.name,
                                                 file_path)
        log.info(msg)
        if verbose:
            print(' ' + msg)
        elif silent is False:
            print('d', end='')
            sys.stdout.flush()
        return True

Example 52

Project: internetarchive
Source File: files.py
View license
    def download(self, file_path=None, verbose=None, silent=None, ignore_existing=None,
                 checksum=None, destdir=None, retries=None, ignore_errors=None):
        """Download the file into the current working directory.

        :type file_path: str
        :param file_path: Download file to the given file_path.

        :type verbose: bool
        :param verbose: (optional) Turn on verbose output.

        :type silent: bool
        :param silent: (optional) Suppress all output.

        :type ignore_existing: bool
        :param ignore_existing: Overwrite local files if they already
                                exist.

        :type checksum: bool
        :param checksum: (optional) Skip downloading file based on checksum.

        :type destdir: str
        :param destdir: (optional) The directory to download files to.

        :type retries: int
        :param retries: (optional) The number of times to retry on failed
                        requests.

        :type ignore_errors: bool
        :param ignore_errors: (optional) Don't fail if a single file fails to
                              download, continue to download other files.

        :rtype: bool
        :returns: True if file was successfully downloaded.
        """
        verbose = False if verbose is None else verbose
        silent = False if silent is None else silent
        ignore_existing = False if ignore_existing is None else ignore_existing
        checksum = False if checksum is None else checksum
        retries = 2 if not retries else retries
        ignore_errors = False if not ignore_errors else ignore_errors

        self.item.session._mount_http_adapter(max_retries=retries)
        file_path = self.name if not file_path else file_path

        if destdir:
            if not os.path.exists(destdir):
                os.mkdir(destdir)
            if os.path.isfile(destdir):
                raise IOError('{} is not a directory!'.format(destdir))
            file_path = os.path.join(destdir, file_path)

        if os.path.exists(file_path):
            if ignore_existing:
                msg = 'skipping {0}, file already exists.'.format(file_path)
                log.info(msg)
                if verbose:
                    print(' ' + msg)
                elif silent is False:
                    print('.', end='')
                    sys.stdout.flush()
                return
            elif checksum:
                md5_sum = utils.get_md5(open(file_path, 'rb'))
                if md5_sum == self.md5:
                    msg = ('skipping {0}, '
                           'file already exists based on checksum.'.format(file_path))
                    log.info(msg)
                    if verbose:
                        print(' ' + msg)
                    elif silent is False:
                        print('.', end='')
                        sys.stdout.flush()
                    return
            else:
                st = os.stat(file_path)
                if (st.st_mtime == self.mtime) and (st.st_size == self.size) \
                        or self.name.endswith('_files.xml') and st.st_size != 0:
                    msg = ('skipping {0}, file already exists '
                           'based on length and date.'.format(file_path))
                    log.info(msg)
                    if verbose:
                        print(' ' + msg)
                    elif silent is False:
                        print('.', end='')
                        sys.stdout.flush()
                    return

        parent_dir = os.path.dirname(file_path)
        if parent_dir != '' and not os.path.exists(parent_dir):
            os.makedirs(parent_dir)

        try:
            response = self.item.session.get(self.url, stream=True, timeout=12)
            response.raise_for_status()

            chunk_size = 2048
            with open(file_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=chunk_size):
                    if chunk:
                        f.write(chunk)
                        f.flush()
        except (RetryError, HTTPError, ConnectTimeout,
                ConnectionError, socket.error, ReadTimeout) as exc:
            msg = ('error downloading file {0}, '
                   'exception raised: {1}'.format(file_path, exc))
            log.error(msg)
            if os.path.exists(file_path):
                os.remove(file_path)
            if verbose:
                print(' ' + msg)
            elif silent is False:
                print('e', end='')
                sys.stdout.flush()
            if ignore_errors is True:
                return False
            else:
                raise exc

        # Set mtime with mtime from files.xml.
        os.utime(file_path, (0, self.mtime))

        msg = 'downloaded {0}/{1} to {2}'.format(self.identifier,
                                                 self.name,
                                                 file_path)
        log.info(msg)
        if verbose:
            print(' ' + msg)
        elif silent is False:
            print('d', end='')
            sys.stdout.flush()
        return True

Example 53

Project: prettytensor
Source File: local_trainer.py
View license
  def run_model(self,
                op_list,
                num_steps,
                feed_vars=(),
                feed_data=None,
                print_every=100,
                allow_initialize=True):
    """Runs `op_list` for `num_steps`.

    Args:
      op_list: A list of ops to run.
      num_steps: Number of steps to run this for.  If feeds are used, this is a
        maximum.
      feed_vars: The variables to feed.
      feed_data: An iterator that feeds data tuples.
      print_every: Print a log line and checkpoing every so many steps.
      allow_initialize: If True, the model will be initialized if any variable
        is uninitialized, if False the model will not be initialized.
    Returns:
      The final run result as a list.
    Raises:
      ValueError: If feed_data doesn't match feed_vars.
    """
    feed_data = feed_data or itertools.repeat(())

    ops = [bookkeeper.global_step()]
    ops.extend(op_list)

    sess = tf.get_default_session()
    self.prepare_model(sess, allow_initialize=allow_initialize)
    results = []
    try:
      for i, data in zip(xrange(num_steps), feed_data):
        log_this_time = print_every and i % print_every == 0
        if len(data) != len(feed_vars):
          raise ValueError(
              'feed_data and feed_vars must be the same length: %d vs %d' % (
                  len(data), len(feed_vars)))
        if self._coord.should_stop():
          print('Coordinator stopped')
          sys.stdout.flush()
          self.stop_queues()
          break
        if len(feed_vars) != len(data):
          raise ValueError('Feed vars must be the same length as data.')

        if log_this_time and self._summary_writer:
          results = sess.run(ops + [self._summaries],
                             dict(zip(feed_vars, data)))
          self._summary_writer.add_summary(results[-1], results[0])
          results = results[:-1]
        else:
          results = sess.run(ops, dict(zip(feed_vars, data)))
        if log_this_time:
          self._log_and_save(sess, results)

      # Print the last line if it wasn't just printed
      if print_every and not log_this_time:
        self._log_and_save(sess, results)
    except tf.errors.OutOfRangeError as ex:
      print('Done training -- epoch limit reached %s' % ex)
      sys.stdout.flush()
      self.stop_queues()
    except BaseException as ex:
      print('Exception -- stopping threads: %s' % ex, file=sys.stderr)
      sys.stdout.flush()
      self.stop_queues()
      raise
    return results

Example 54

Project: AI_Reader
Source File: build_imagenet_data.py
View license
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
                               synsets, labels, humans, bboxes, num_shards):
  """Processes and saves list of images as TFRecord in 1 thread.

  Args:
    coder: instance of ImageCoder to provide TensorFlow image coding utils.
    thread_index: integer, unique batch to run index is within [0, len(ranges)).
    ranges: list of pairs of integers specifying ranges of each batches to
      analyze in parallel.
    name: string, unique identifier specifying the data set
    filenames: list of strings; each string is a path to an image file
    synsets: list of strings; each string is a unique WordNet ID
    labels: list of integer; each integer identifies the ground truth
    humans: list of strings; each string is a human-readable label
    bboxes: list of bounding boxes for each image. Note that each entry in this
      list might contain from 0+ entries corresponding to the number of bounding
      box annotations for the image.
    num_shards: integer number of shards for this data set.
  """
  # Each thread produces N shards where N = int(num_shards / num_threads).
  # For instance, if num_shards = 128, and the num_threads = 2, then the first
  # thread would produce shards [0, 64).
  num_threads = len(ranges)
  assert not num_shards % num_threads
  num_shards_per_batch = int(num_shards / num_threads)

  shard_ranges = np.linspace(ranges[thread_index][0],
                             ranges[thread_index][1],
                             num_shards_per_batch + 1).astype(int)
  num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]

  counter = 0
  for s in xrange(num_shards_per_batch):
    # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
    shard = thread_index * num_shards_per_batch + s
    output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
    output_file = os.path.join(FLAGS.output_directory, output_filename)
    writer = tf.python_io.TFRecordWriter(output_file)

    shard_counter = 0
    files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
    for i in files_in_shard:
      filename = filenames[i]
      label = labels[i]
      synset = synsets[i]
      human = humans[i]
      bbox = bboxes[i]

      image_buffer, height, width = _process_image(filename, coder)

      example = _convert_to_example(filename, image_buffer, label,
                                    synset, human, bbox,
                                    height, width)
      writer.write(example.SerializeToString())
      shard_counter += 1
      counter += 1

      if not counter % 1000:
        print('%s [thread %d]: Processed %d of %d images in thread batch.' %
              (datetime.now(), thread_index, counter, num_files_in_thread))
        sys.stdout.flush()

    print('%s [thread %d]: Wrote %d images to %s' %
          (datetime.now(), thread_index, shard_counter, output_file))
    sys.stdout.flush()
    shard_counter = 0
  print('%s [thread %d]: Wrote %d images to %d shards.' %
        (datetime.now(), thread_index, counter, num_files_in_thread))
  sys.stdout.flush()

Example 55

Project: pokedex
Source File: load.py
View license
def _get_verbose_prints(verbose):
    """If `verbose` is true, returns three functions: one for printing a
    starting message, one for printing an interim status update, and one for
    printing a success or failure message when finished.

    If `verbose` is false, returns no-op functions.
    """

    if not verbose:
        # Return dummies
        def dummy(*args, **kwargs):
            pass

        return dummy, dummy, dummy

    ### Okay, verbose == True; print stuff

    def print_start(thing):
        # Truncate to 66 characters, leaving 10 characters for a success
        # or failure message
        truncated_thing = thing[:66]

        # Also, space-pad to keep the cursor in a known column
        num_spaces = 66 - len(truncated_thing)

        print("%s...%s" % (truncated_thing, ' ' * num_spaces), end='')
        sys.stdout.flush()

    if sys.stdout.isatty():
        # stdout is a terminal; stupid backspace tricks are OK.
        # Don't use print, because it always adds magical spaces, which
        # makes backspace accounting harder

        backspaces = [0]
        def print_status(msg):
            # Overwrite any status text with spaces before printing
            sys.stdout.write('\b' * backspaces[0])
            sys.stdout.write(' ' * backspaces[0])
            sys.stdout.write('\b' * backspaces[0])
            sys.stdout.write(msg)
            sys.stdout.flush()
            backspaces[0] = len(msg)

        def print_done(msg='ok'):
            # Overwrite any status text with spaces before printing
            sys.stdout.write('\b' * backspaces[0])
            sys.stdout.write(' ' * backspaces[0])
            sys.stdout.write('\b' * backspaces[0])
            sys.stdout.write(msg + "\n")
            sys.stdout.flush()
            backspaces[0] = 0

    else:
        # stdout is a file (or something); don't bother with status at all
        def print_status(msg):
            pass

        def print_done(msg='ok'):
            print(msg)

    return print_start, print_status, print_done

Example 56

Project: reseg
Source File: helper_dataset.py
View license
def preprocess_dataset(train, valid, test,
                       input_to_float,
                       preprocess_type,
                       patch_size, max_patches):

    if input_to_float and preprocess_type is None:
        train_norm = train[0].astype(floatX) / 255.
        train = (train_norm, train[1])
        valid_norm = valid[0].astype(floatX) / 255.
        valid = (valid_norm, valid[1])
        test_norm = test[0].astype(floatX) / 255.
        test = (test_norm, test[1])

    if preprocess_type is None:
        return train, valid, test

    # whiten, LCN, GCN, Local Mean Subtract, or normalize
    if len(train[0]) > 0:
        train_pre = []
        print ""
        print "Preprocessing {} images of the train set with {} {} ".format(
            len(train[0]), preprocess_type, patch_size),
        print ""
        i = 0
        print "Progress: {0:.3g} %".format(i * 100 / len(train[0])),
        for i, x in enumerate(train[0]):
            img = np.expand_dims(x, axis=0)
            x_pre = preprocess(img, preprocess_type,
                               patch_size,
                               max_patches)
            train_pre.append(x_pre[0])
            print "\rProgress: {0:.3g} %".format(i * 100 / len(train[0])),
            sys.stdout.flush()

        if input_to_float:
            train_pre = np.array(train_pre).astype(floatX) / 255.
        train = (np.array(train_pre), np.array(train[1]))

    if len(valid[0]) > 0:
        valid_pre = []
        print ""
        print "Preprocessing {} images of the valid set with {} {} ".format(
            len(valid[0]), preprocess_type, patch_size),
        print ""
        i = 0
        print "Progress: {0:.3g} %".format(i * 100 / len(valid[0])),
        for i, x in enumerate(valid[0]):
            img = np.expand_dims(x, axis=0)
            x_pre = preprocess(img, preprocess_type,
                               patch_size,
                               max_patches)
            valid_pre.append(x_pre[0])
            print "\rProgress: {0:.3g} %".format(i * 100 / len(valid[0])),
            sys.stdout.flush()

        if input_to_float:
            valid_pre = np.array(valid_pre).astype(floatX) / 255.
        valid = (np.array(valid_pre), np.array(valid[1]))

    if len(test[0]) > 0:
        test_pre = []
        print ""
        print "Preprocessing {} images of the test set with {} {} ".format(
            len(test[0]), preprocess_type, patch_size),
        print ""
        i = 0
        print "Progress: {0:.3g} %".format(i * 100 / len(test[0])),
        for i, x in enumerate(test[0]):
            img = np.expand_dims(x, axis=0)
            x_pre = preprocess(img, preprocess_type,
                               patch_size,
                               max_patches)
            test_pre.append(x_pre[0])
            print "\rProgress: {0:.3g} %".format(i * 100 / len(test[0])),
            sys.stdout.flush()

        if input_to_float:
            test_pre = np.array(test_pre).astype(floatX) / 255.
        test = (np.array(test_pre), np.array(test[1]))

    return train, valid, test

Example 57

Project: AI_Reader
Source File: build_image_data.py
View license
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
                               texts, labels, num_shards):
  """Processes and saves list of images as TFRecord in 1 thread.

  Args:
    coder: instance of ImageCoder to provide TensorFlow image coding utils.
    thread_index: integer, unique batch to run index is within [0, len(ranges)).
    ranges: list of pairs of integers specifying ranges of each batches to
      analyze in parallel.
    name: string, unique identifier specifying the data set
    filenames: list of strings; each string is a path to an image file
    texts: list of strings; each string is human readable, e.g. 'dog'
    labels: list of integer; each integer identifies the ground truth
    num_shards: integer number of shards for this data set.
  """
  # Each thread produces N shards where N = int(num_shards / num_threads).
  # For instance, if num_shards = 128, and the num_threads = 2, then the first
  # thread would produce shards [0, 64).
  num_threads = len(ranges)
  assert not num_shards % num_threads
  num_shards_per_batch = int(num_shards / num_threads)

  shard_ranges = np.linspace(ranges[thread_index][0],
                             ranges[thread_index][1],
                             num_shards_per_batch + 1).astype(int)
  num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]

  counter = 0
  for s in xrange(num_shards_per_batch):
    # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
    shard = thread_index * num_shards_per_batch + s
    output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
    output_file = os.path.join(FLAGS.output_directory, output_filename)
    writer = tf.python_io.TFRecordWriter(output_file)

    shard_counter = 0
    files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
    for i in files_in_shard:
      filename = filenames[i]
      label = labels[i]
      text = texts[i]

      image_buffer, height, width = _process_image(filename, coder)

      example = _convert_to_example(filename, image_buffer, label,
                                    text, height, width)
      writer.write(example.SerializeToString())
      shard_counter += 1
      counter += 1

      if not counter % 1000:
        print('%s [thread %d]: Processed %d of %d images in thread batch.' %
              (datetime.now(), thread_index, counter, num_files_in_thread))
        sys.stdout.flush()

    print('%s [thread %d]: Wrote %d images to %s' %
          (datetime.now(), thread_index, shard_counter, output_file))
    sys.stdout.flush()
    shard_counter = 0
  print('%s [thread %d]: Wrote %d images to %d shards.' %
        (datetime.now(), thread_index, counter, num_files_in_thread))
  sys.stdout.flush()

Example 58

Project: crunch.io-dashboard
Source File: models.py
View license
    def launch(self,msg=''):
        """
        In order to launch a ClusterInstance the following requirements must be
        satisfied:
            * A snapshot of the home volume must exist and be shared with the
              launching user (this should have been created at the
              ClusterTemplate creation time)
            * The user has AWS credentials including an ssh keypair.
            * A valid ClusterTemplate has been created.
        """
        from boto.ec2.connection import EC2Connection
        import scwrapper
        import random
        import time
        import datetime
        # FIXME: Add timestamps to logs.

        # Assigning Cluster Parameters
        aws_key_id     = self.cluster_template.user_profile.awscredential.aws_key_id
        aws_secret_key = self.cluster_template.user_profile.awscredential.aws_secret_key
        is_demo        = self.cluster_template.is_demo

        if ( not aws_key_id ) or ( not aws_secret_key ):
            raise 'AwsCredentialError'

        # Randomly selecting a us-east-1{a,b,c,d} availability zone
        # Maybe someday Amazon will give us capacity ideas
        availability_zone = 'us-east-1' + random.choice(('a','b','c','d'))

        # The availability zone needs to be saved for later actions on this
        # ClusterInstance
        self.availability_zone = availability_zone
        self.save()

        # Get latest_snapshot_id
        # TODO: When we support multiple disks, do this for all snapshots
        home_disk = self.cluster_template.disk_set.filter(name='Home')[0]
        latest_snapshot_id = home_disk.latest_snapshot_id
        size = int(home_disk.size)

        if is_demo:
            time.sleep(10)
            home_disk.home_volume_id = 'vol-aaaa1111'
            home_disk.save()
            self.cluster_template.status = 'running'
            self.cluster_template.save()
            print "DEMO: Launching cluster %s in availability zone %s" % \
                    ('demo-cluster', self.availability_zone)
            sys.stdout.flush()
        else:
            # create volume from snapshot in availability_zone
            print "Creating EBS volume from snapshot: %s" % latest_snapshot_id
            sys.stdout.flush()
            conn = EC2Connection(str(aws_key_id), str(aws_secret_key))
            volume = conn.create_volume( size, availability_zone, latest_snapshot_id)
            home_volume_id = volume.id

            # The home_volume_id needs to be saved for later.
            home_disk.home_volume_id = home_volume_id
            home_disk.save()

            star_cluster = scwrapper.Cluster(
                    self.cluster_template,
                    self.availability_zone,
                    )
            print "Launching cluster %s in availability zone %s" % \
                    (star_cluster.cluster_name, self.availability_zone)
            sys.stdout.flush()
            star_cluster.launch()
            sys.stdout.flush()
            self.cluster_template.status = 'running'
            self.cluster_template.save()

            print "Cluster started, saving nodes"
            sys.stdout.flush()
            
            # A new object must be created to get the updated node information.
            running_cluster = scwrapper.Cluster(
                    self.cluster_template,
                    self.availability_zone,
                    )

            # Create the Ec2Instance (Node) objects
            for node in running_cluster.sc.nodes:
                print "Saving node: %s, %s, %s" % ( 
                        node.alias,
                        node.ip_address,
                        node.id
                        )
                sys.stdout.flush()
                instance = Ec2Instance(
                    cluster_instance = self,
                    instance_type    = Ec2InstanceType.objects.filter(api_name = node.instance_type)[0],
                    alias            = node.alias,
                    arch             = node.arch,
                    instance_id      = node.id,
                    image_id         = node.image_id,
                    launch_time = datetime.datetime.strptime(
                        node.launch_time,
                        "%Y-%m-%dT%H:%M:%S.000Z"
                        ),
                    placement          = node.placement,
                    ip_address         = node.ip_address,
                    dns_name           = node.dns_name,
                    private_ip_address = node.private_ip_address,
                    public_dns_name    = node.public_dns_name,
                    state              = node.state
                )
                instance.save()
                print "Saved node: %s" % node.alias
                sys.stdout.flush()

            print "Launching of cluster completed."
            sys.stdout.flush()
 
        return "finished"

Example 59

Project: text2image
Source File: alignDraw.py
View license
    def train(self, lr, epochs, save=False, validateAfter=0):
        self._build_train_function()
        sys.stdout.flush()

        if save == True:
            curr_time = datetime.datetime.now()
            weights_f_name = ("./attention-vae-%s-%s-%s-%s-%s-%s.h5" % (curr_time.year, curr_time.month, curr_time.day, curr_time.hour, curr_time.minute, curr_time.second))
            print weights_f_name

        all_outputs = np.array([0.0,0.0,0.0])
        iter_outputs = np.array([0.0,0.0,0.0])
        curr_iter = 0
        print_after = 100
        seen_examples = 0
        total_seen_examples = 0
        prev_outputs = np.array([float("inf"),float("inf"),float("inf")])
        prev_val_results = np.array([float("inf"),float("inf"),float("inf")])

        for epoch in xrange(0, epochs):
            a = datetime.datetime.now()
            
            self.train_iter.reset()
            while True:
                index_cap, index_im, cap_len = self.train_iter.next()
                if type(index_cap) == int:
                    break
                [kl, logpxz, log_likelihood, c_ts, read_attent_params, write_attent_params] = self._train_function(index_im, index_cap, cap_len, lr, self.runSteps)
                kl_total = kl * index_im.shape[0]
                logpxz_total = logpxz * index_im.shape[0]
                log_likelihood_total = log_likelihood * index_im.shape[0]
                all_outputs[0] = all_outputs[0] + kl_total
                all_outputs[1] = all_outputs[1] + logpxz_total
                all_outputs[2] = all_outputs[2] + log_likelihood_total
                iter_outputs[0] = iter_outputs[0] + kl_total
                iter_outputs[1] = iter_outputs[1] + logpxz_total
                iter_outputs[2] = iter_outputs[2] + log_likelihood_total
                seen_examples = seen_examples + index_im.shape[0]
                total_seen_examples = total_seen_examples + index_im.shape[0]

                if curr_iter % print_after == 0 and curr_iter != 0:
                    print 'Iteration %d ; Processed %d entries' % (curr_iter, total_seen_examples)
                    iter_outputs = iter_outputs / seen_examples
                    print float(iter_outputs[0]), float(iter_outputs[1]), float(iter_outputs[2])
                    print '\n'
                    iter_outputs = np.array([0.0,0.0,0.0])
                    seen_examples = 0
                    sys.stdout.flush()

                if curr_iter % (print_after*10) == 0 and curr_iter != 0:
                    self.save_weights(weights_f_name, c_ts, read_attent_params, write_attent_params)
                    print 'Done Saving Weights'
                    print '\n'
                    sys.stdout.flush()
                
                curr_iter = curr_iter + 1
            b = datetime.datetime.now()
            print("Epoch %d took %s" % (epoch, (b-a)))

            if save == True:
                self.save_weights(weights_f_name, c_ts, read_attent_params, write_attent_params)
                print 'Done Saving Weights'

            all_outputs = all_outputs / (self.input_shape[0] * 5) # 5 captions per image
            print 'Train Results'
            print float(all_outputs[0]), float(all_outputs[1]), float(all_outputs[2])

            if validateAfter != 0:
                if epoch % validateAfter == 0:
                    print 'Validation Results'
                    val_results = self.validate()
                    print float(val_results[0]), float(val_results[1]), float(val_results[2])
                    print '\n'

            if float(val_results[-1]) > float(prev_val_results[-1]):
                print("Learning Rate Decreased")
                lr = lr * 0.1
            elif self.reduceLRAfter != 0:
                if epoch == self.reduceLRAfter:
                    print ("Learning Rate Manually Decreased")
                    lr = lr * 0.1
            else:
                prev_val_results = np.copy(val_results)

            print '\n'
            all_outputs = np.array([0.0,0.0,0.0])
            sys.stdout.flush()

Example 60

Project: nrvr-commander
Source File: download.py
View license
    @classmethod
    def fromUrl(cls, url,
                force=False,
                dontDownload=False,
                ticker=True):
        """Download file or use previously downloaded file.
        
        As implemented uses urllib2.
        
        dontDownload
            whether you don't want to start a download, for some reason.
        
        Return file path."""
        urlFilename = cls.basename(url)
        downloadDir = ScriptUser.loggedIn.userHomeRelative("Downloads")
        downloadPath = os.path.join(downloadDir, urlFilename)
        semaphorePath = downloadPath + cls.semaphoreExtenstion
        #
        if os.path.exists(downloadPath) and not force:
            if not os.path.exists(semaphorePath):
                # file exists and not download in progress,
                # assume it is good
                return downloadPath
            else:
                # file exists and download in progress,
                # presumably from another script running in another process or thread,
                # wait for it to complete
                printed = False
                ticked = False
                # check the essential condition, initially and then repeatedly
                while os.path.exists(semaphorePath):
                    if not printed:
                        # first time only printing
                        print "waiting for " + semaphorePath + " to go away on completion"
                        sys.stdout.flush()
                        printed = True
                    if ticker:
                        if not ticked:
                            # first time only printing
                            sys.stdout.write("[")
                        sys.stdout.write(".")
                        sys.stdout.flush()
                        ticked = True
                    time.sleep(5)
                if ticked:
                    # final printing
                    sys.stdout.write("]\n")
                    sys.stdout.flush()
        elif not dontDownload: # it is normal to download
            if not os.path.exists(downloadDir): # possibly on an international version OS
                try:
                    os.makedirs(downloadDir)
                except OSError:
                    if os.path.exists(downloadDir): # concurrently made
                        pass
                    else: # failure
                        raise
            #
            # try downloading
            pid = os.getpid()
            try:
                with open(semaphorePath, "w") as semaphoreFile:
                    # create semaphore file
                    semaphoreFile.write("pid=" + str(pid))
                #
                print "looking for " + url
                # open connection to server
                urlFileLikeObject = urllib2.urlopen(url)
                with open(downloadPath, "wb") as downloadFile:
                    print "starting to download " + url
                    if ticker:
                        sys.stdout.write("[")
                    # was shutil.copyfileobj(urlFileLikeObject, downloadFile)
                    try:
                        while True:
                            data = urlFileLikeObject.read(1000000)
                            if not data:
                                break
                            downloadFile.write(data)
                            if ticker:
                                sys.stdout.write(".")
                                sys.stdout.flush()
                    finally:
                        if ticker:
                            sys.stdout.write("]\n")
                            sys.stdout.flush()
            except: # apparently a problem
                if os.path.exists(downloadPath):
                    # don't let a bad file sit around
                    try:
                        os.remove(downloadPath)
                    except:
                        pass
                print "problem downloading " + url
                raise
            else:
                print "done downloading " + url
            finally:
                try:
                    # close connection to server
                    os.close(urlFileLikeObject)
                except:
                    pass
                try:
                    # delete semaphore file
                    os.remove(semaphorePath)
                except:
                    pass
        if os.path.exists(downloadPath):
            # file exists now, assume it is good
            return downloadPath
        else:
            # apparently download has failed
            raise IOError("file not found " + downloadPath)

Example 61

Project: CCLib
Source File: cc2510.py
View license
	def writeCODE(self, offset, data, erase=False, verify=False, showProgress=False):
		"""
		Fully automated function for writing the Flash memory.

		WARNING: This requires DMA operations to be unpaused ( use: self.pauseDMA(False) )
		"""

		# Prepare DMA-0 for DEBUG -> RAM (using DBG_BW trigger)
		self.configDMAChannel( 0, 0x6260, 0x0000, 0x1F, tlen=self.bulkBlockSize, srcInc=0, dstInc=1, priority=1, interrupt=True )
		# Prepare DMA-1 for RAM -> FLASH (using the FLASH trigger)
		self.configDMAChannel( 1, 0x0000, 0x6273, 0x12, tlen=self.bulkBlockSize, srcInc=1, dstInc=0, priority=2, interrupt=True )

		# Reset flags
		self.clearFlashStatus()
		self.clearDMAIRQ(0)
		self.clearDMAIRQ(1)
		self.disarmDMAChannel(0)
		self.disarmDMAChannel(1)
		flashRetries = 0

		# Split in 2048-byte chunks
		iOfs = 0
		while (iOfs < len(data)):

			# Check if we should show progress
			if showProgress:
				print "\r    Progress %0.0f%%... " % (iOfs*100/len(data)),
				sys.stdout.flush()

			# Get next page
			iLen = min( len(data) - iOfs, self.bulkBlockSize )

			# Update DMA configuration if we have less than bulk-block size data 
			if (iLen < self.bulkBlockSize):
				self.configDMAChannel( 0, 0x6260, 0x0000, 0x1F, tlen=iLen, srcInc=0, dstInc=1, priority=1, interrupt=True )
				self.configDMAChannel( 1, 0x0000, 0x6273, 0x12, tlen=iLen, srcInc=1, dstInc=0, priority=2, interrupt=True )

			# Upload to RAM through DMA-0
			self.armDMAChannel(0)
			self.brustWrite( data[iOfs:iOfs+iLen] )

			# Wait until DMA-0 raises interrupt
			while not self.isDMAIRQ(0):
				time.sleep(0.010)

			# Clear DMA IRQ flag
			self.clearDMAIRQ(0)

			# Calculate the page where this data belong to
			fAddr = offset + iOfs
			fPage = int( fAddr / self.flashPageSize )

			# Calculate FLASH address High/Low bytes
			# for writing (addressable as 32-bit words)
			fWordOffset = int(fAddr / 4)
			cHigh = (fWordOffset >> 8) & 0xFF
			cLow = fWordOffset & 0xFF
			self.writeXDATA( 0x6271, [cLow, cHigh] )

			# Debug
			#print "[@%04x: p=%i, ofs=%04x, %02x:%02x]" % (fAddr, fPage, fWordOffset, cHigh, cLow),
			#sys.stdout.flush()

			# Check if we should erase page first
			if erase:
				# Select the page to erase using FADDRH[7:1]
				#
				# NOTE: Specific to (CC2530, CC2531, CC2540, and CC2541),
				#       the CC2533 uses FADDRH[6:0]
				#
				cHigh = (fPage << 1)
				cLow = 0
				self.writeXDATA( 0x6271, [cLow, cHigh] )
				# Set the erase bit
				self.setFlashErase()
				# Wait until flash is not busy any more
				while self.isFlashBusy():
					time.sleep(0.010)

			# Upload to FLASH through DMA-1
			self.armDMAChannel(1)
			self.setFlashWrite()

			# Wait until DMA-1 raises interrupt
			while not self.isDMAIRQ(1):
				# Also check for errors
				if self.isFlashAbort():
					self.disarmDMAChannel(1)
					raise IOError("Flash page 0x%02x is locked!" % fPage)
				time.sleep(0.010)

			# Clear DMA IRQ flag
			self.clearDMAIRQ(1)

			# Check if we should verify
			if verify:
				verifyBytes = self.readCODE(fAddr, iLen)
				for i in range(0, iLen):
					if verifyBytes[i] != data[iOfs+i]:
						if flashRetries < 3:
							print "\n[Flash Error at @0x%04x, will retry]" % (fAddr+i)
							flashRetries += 1
							continue
						else:
							raise IOError("Flash verification error on offset 0x%04x" % (fAddr+i))
			flashRetries = 0

			# Forward to next page
			iOfs += iLen

		if showProgress:
			print "\r    Progress 100%... OK"

Example 62

Project: CCLib
Source File: cc254x.py
View license
	def writeCODE(self, offset, data, erase=False, verify=False, showProgress=False):
		"""
		Fully automated function for writing the Flash memory.

		WARNING: This requires DMA operations to be unpaused ( use: self.pauseDMA(False) )
		"""

		# Prepare DMA-0 for DEBUG -> RAM (using DBG_BW trigger)
		self.configDMAChannel( 0, 0x6260, 0x0000, 0x1F, tlen=self.bulkBlockSize, srcInc=0, dstInc=1, priority=1, interrupt=True )
		# Prepare DMA-1 for RAM -> FLASH (using the FLASH trigger)
		self.configDMAChannel( 1, 0x0000, 0x6273, 0x12, tlen=self.bulkBlockSize, srcInc=1, dstInc=0, priority=2, interrupt=True )

		# Reset flags
		self.clearFlashStatus()
		self.clearDMAIRQ(0)
		self.clearDMAIRQ(1)
		self.disarmDMAChannel(0)
		self.disarmDMAChannel(1)
		flashRetries = 0

		# Split in 2048-byte chunks
		iOfs = 0
		while (iOfs < len(data)):

			# Check if we should show progress
			if showProgress:
				print "\r    Progress %0.0f%%... " % (iOfs*100/len(data)),
				sys.stdout.flush()

			# Get next page
			iLen = min( len(data) - iOfs, self.bulkBlockSize )

			# Update DMA configuration if we have less than bulk-block size data 
			if (iLen < self.bulkBlockSize):
				self.configDMAChannel( 0, 0x6260, 0x0000, 0x1F, tlen=iLen, srcInc=0, dstInc=1, priority=1, interrupt=True )
				self.configDMAChannel( 1, 0x0000, 0x6273, 0x12, tlen=iLen, srcInc=1, dstInc=0, priority=2, interrupt=True )

			# Upload to RAM through DMA-0
			self.armDMAChannel(0)
			self.brustWrite( data[iOfs:iOfs+iLen] )

			# Wait until DMA-0 raises interrupt
			while not self.isDMAIRQ(0):
				time.sleep(0.010)

			# Clear DMA IRQ flag
			self.clearDMAIRQ(0)

			# Calculate the page where this data belong to
			fAddr = offset + iOfs
			fPage = int( fAddr / self.flashPageSize )

			# Calculate FLASH address High/Low bytes
			# for writing (addressable as 32-bit words)
			fWordOffset = int(fAddr / 4)
			cHigh = (fWordOffset >> 8) & 0xFF
			cLow = fWordOffset & 0xFF
			self.writeXDATA( 0x6271, [cLow, cHigh] )

			# Debug
			#print "[@%04x: p=%i, ofs=%04x, %02x:%02x]" % (fAddr, fPage, fWordOffset, cHigh, cLow),
			#sys.stdout.flush()

			# Check if we should erase page first
			if erase:
				# Select the page to erase using FADDRH[7:1]
				#
				# NOTE: Specific to (CC2530, CC2531, CC2540, and CC2541),
				#       the CC2533 uses FADDRH[6:0]
				#
				cHigh = (fPage << 1)
				cLow = 0
				self.writeXDATA( 0x6271, [cLow, cHigh] )
				# Set the erase bit
				self.setFlashErase()
				# Wait until flash is not busy any more
				while self.isFlashBusy():
					time.sleep(0.010)

			# Upload to FLASH through DMA-1
			self.armDMAChannel(1)
			self.setFlashWrite()

			# Wait until DMA-1 raises interrupt
			while not self.isDMAIRQ(1):
				# Also check for errors
				if self.isFlashAbort():
					self.disarmDMAChannel(1)
					raise IOError("Flash page 0x%02x is locked!" % fPage)
				time.sleep(0.010)

			# Clear DMA IRQ flag
			self.clearDMAIRQ(1)

			# Check if we should verify
			if verify:
				verifyBytes = self.readCODE(fAddr, iLen)
				for i in range(0, iLen):
					if verifyBytes[i] != data[iOfs+i]:
						if flashRetries < 3:
							print "\n[Flash Error at @0x%04x, will retry]" % (fAddr+i)
							flashRetries += 1
							continue
						else:
							raise IOError("Flash verification error on offset 0x%04x" % (fAddr+i))
			flashRetries = 0

			# Forward to next page
			iOfs += iLen

		if showProgress:
			print "\r    Progress 100%... OK"

Example 63

Project: threatshell
Source File: logo.py
View license
def logo():

    logos = []

    logos.append(r"""
            ,----,
          ,/   .`|
        ,`   .'  :  ,---,                                     ___              .--.--.     ,---,                ,--,    ,--,
      ;    ;     /,--.' |                                   ,--.'|_           /  /    '. ,--.' |              ,--.'|  ,--.'|
    .'___,/    ,' |  |  :      __  ,-.                      |  | :,'         |  :  /`. / |  |  :              |  | :  |  | :
    |    :     |  :  :  :    ,' ,'/ /|                      :  : ' :         ;  |  |--`  :  :  :              :  : '  :  : '
    ;    |.';  ;  :  |  |,--.'  | |' | ,---.     ,--.--.  .;__,'  /          |  :  ;_    :  |  |,--.   ,---.  |  ' |  |  ' |
    `----'  |  |  |  :  '   ||  |   ,'/     \   /       \ |  |   |            \  \    `. |  :  '   |  /     \ '  | |  '  | |
        '   :  ;  |  |   /' :'  :  / /    /  | .--.  .-. |:__,'| :             `----.   \|  |   /' : /    /  ||  | :  |  | :
        |   |  '  '  :  | | ||  | ' .    ' / |  \__\/: . .  '  : |__           __ \  \  |'  :  | | |.    ' / |'  : |__'  : |__
        '   :  |  |  |  ' | :;  : | '   ;   /|  ," .--.; |  |  | '.'|         /  /`--'  /|  |  ' | :'   ;   /||  | '.'|  | '.'|
        ;   |.'   |  :  :_:,'|  , ; '   |  / | /  /  ,.  |  ;  :    ;        '--'.     / |  :  :_:,''   |  / |;  :    ;  :    ;
        '---'     |  | ,'     ---'  |   :    |;  :   .'   \ |  ,   /           `--'---'  |  | ,'    |   :    ||  ,   /|  ,   /
                  `--''              \   \  / |  ,     .-./  ---`-'                      `--''       \   \  /  ---`-'  ---`-'
                                      `----'   `--`---'                                               `----'

    """)

    logos.append(r"""
     _____  _     ____  _____ ____  _____    ____  _     _____ _     _
    /__ __\/ \ /|/  __\/  __//  _ \/__ __\  / ___\/ \ /|/  __// \   / \
      / \  | |_|||  \/||  \  | / \|  / \    |    \| |_|||  \  | |   | |
      | |  | | |||    /|  /_ | |-||  | |    \___ || | |||  /_ | |_/\| |_/\
      \_/  \_/ \|\_/\_\\____\\_/ \|  \_/    \____/\_/ \|\____\\____/\____/

    """)

    logos.append(r"""
     /$$$$$$$$ /$$                                       /$$            /$$$$$$  /$$                 /$$ /$$
    |__  $$__/| $$                                      | $$           /$$__  $$| $$                | $$| $$
       | $$   | $$$$$$$   /$$$$$$   /$$$$$$   /$$$$$$  /$$$$$$        | $$  \__/| $$$$$$$   /$$$$$$ | $$| $$
       | $$   | $$__  $$ /$$__  $$ /$$__  $$ |____  $$|_  $$_/        |  $$$$$$ | $$__  $$ /$$__  $$| $$| $$
       | $$   | $$  \ $$| $$  \__/| $$$$$$$$  /$$$$$$$  | $$           \____  $$| $$  \ $$| $$$$$$$$| $$| $$
       | $$   | $$  | $$| $$      | $$_____/ /$$__  $$  | $$ /$$       /$$  \ $$| $$  | $$| $$_____/| $$| $$
       | $$   | $$  | $$| $$      |  $$$$$$$|  $$$$$$$  |  $$$$/      |  $$$$$$/| $$  | $$|  $$$$$$$| $$| $$
       |__/   |__/  |__/|__/       \_______/ \_______/   \___/         \______/ |__/  |__/ \_______/|__/|__/

    """)

    logos.append(r"""
      _______ _                    _      _____ _          _ _
     |__   __| |                  | |    / ____| |        | | |
        | |  | |__  _ __ ___  __ _| |_  | (___ | |__   ___| | |
        | |  | '_ \| '__/ _ \/ _` | __|  \___ \| '_ \ / _ \ | |
        | |  | | | | | |  __/ (_| | |_   ____) | | | |  __/ | |
        |_|  |_| |_|_|  \___|\__,_|\__| |_____/|_| |_|\___|_|_|

    """)

    logos.append(r"""
     ____  _   _  ____  ____    __   ____    ___  _   _  ____  __    __
    (_  _)( )_( )(  _ \( ___)  /__\ (_  _)  / __)( )_( )( ___)(  )  (  )
      )(   ) _ (  )   / )__)  /(__)\  )(    \__ \ ) _ (  )__)  )(__  )(__
     (__) (_) (_)(_)\_)(____)(__)(__)(__)   (___/(_) (_)(____)(____)(____)

     """)

    logos.append(r"""
    .------..------..------..------..------..------.     .------..------..------..------..------.
    |T.--. ||H.--. ||R.--. ||E.--. ||A.--. ||T.--. |.-.  |S.--. ||H.--. ||E.--. ||L.--. ||L.--. |
    | :/\: || :/\: || :(): || (\/) || (\/) || :/\: ((5)) | :/\: || :/\: || (\/) || :/\: || :/\: |
    | (__) || (__) || ()() || :\/: || :\/: || (__) |'-.-.| :\/: || (__) || :\/: || (__) || (__) |
    | '--'T|| '--'H|| '--'R|| '--'E|| '--'A|| '--'T| ((1)) '--'S|| '--'H|| '--'E|| '--'L|| '--'L|
    `------'`------'`------'`------'`------'`------'  '-'`------'`------'`------'`------'`------'
    """)

    logos.append(r"""
      _____    _   _    ____    U _____ u    _       _____        ____     _   _  U _____ u  _       _
     |_ " _|  |'| |'|U |  _"\ u \| ___"|/U  /"\  u  |_ " _|      / __"| u |'| |'| \| ___"|/ |"|     |"|
       | |   /| |_| |\\| |_) |/  |  _|"   \/ _ \/     | |       <\___ \/ /| |_| |\ |  _|" U | | u U | | u
      /| |\  U|  _  |u |  _ <    | |___   / ___ \    /| |\       u___) | U|  _  |u | |___  \| |/__ \| |/__
     u |_|U   |_| |_|  |_| \_\   |_____| /_/   \_\  u |_|U       |____/>> |_| |_|  |_____|  |_____| |_____|
     _// \\_  //   \\  //   \\_  <<   >>  \\    >>  _// \\_       )(  (__)//   \\  <<   >>  //  \\  //  \\
    (__) (__)(_") ("_)(__)  (__)(__) (__)(__)  (__)(__) (__)     (__)    (_") ("_)(__) (__)(_")("_)(_")("_)

    """)

    logos.append(r"""
                                         (
      *   )    )                     )   )\ )    )       (   (
    ` )  /( ( /(  (      (     )  ( /(  (()/( ( /(    (  )\  )\
     ( )(_)))\()) )(    ))\ ( /(  )\())  /(_)))\())  ))\((_)((_)
    (_(_())((_)\ (()\  /((_))(_))(_))/  (_)) ((_)\  /((_)_   _
    |_   _|| |(_) ((_)(_)) ((_)_ | |_   / __|| |(_)(_)) | | | |
      | |  | ' \ | '_|/ -_)/ _` ||  _|  \__ \| ' \ / -_)| | | |
      |_|  |_||_||_|  \___|\__,_| \__|  |___/|_||_|\___||_| |_|

    """)

    logos.append(r"""
    ___________.__                          __      _________.__           .__  .__
    \__    ___/|  |_________   ____ _____ _/  |_   /   _____/|  |__   ____ |  | |  |
      |    |   |  |  \_  __ \_/ __ \\__  \\   __\  \_____  \ |  |  \_/ __ \|  | |  |
      |    |   |   Y  \  | \/\  ___/ / __ \|  |    /        \|   Y  \  ___/|  |_|  |__
      |____|   |___|  /__|    \___  >____  /__|   /_______  /|___|  /\___  >____/____/
                    \/            \/     \/               \/      \/     \/
    """)

    logos.append(r"""
     ___________  __    __    _______    _______       __  ___________       ________  __    __    _______  ___      ___
    ("     _   ")/" |  | "\  /"      \  /"     "|     /""\("     _   ")     /"       )/" |  | "\  /"     "||"  |    |"  |
     )__/  \\__/(:  (__)  :)|:        |(: ______)    /    \)__/  \\__/     (:   \___/(:  (__)  :)(: ______)||  |    ||  |
        \\_ /    \/      \/ |_____/   ) \/    |     /' /\  \  \\_ /         \___  \   \/      \/  \/    |  |:  |    |:  |
        |.  |    //  __  \\  //      /  // ___)_   //  __'  \ |.  |          __/  \\  //  __  \\  // ___)_  \  |___  \  |___
        \:  |   (:  (  )  :)|:  __   \ (:      "| /   /  \\  \\:  |         /" \   :)(:  (  )  :)(:      "|( \_|:  \( \_|:  \
         \__|    \__|  |__/ |__|  \___) \_______)(___/    \___)\__|        (_______/  \__|  |__/  \_______) \_______)\_______)

    """)

    logos.append(r"""
    ████████╗██╗  ██╗██████╗ ███████╗ █████╗ ████████╗    ███████╗██╗  ██╗███████╗██╗     ██╗
    ╚══██╔══╝██║  ██║██╔══██╗██╔════╝██╔══██╗╚══██╔══╝    ██╔════╝██║  ██║██╔════╝██║     ██║
       ██║   ███████║██████╔╝█████╗  ███████║   ██║       ███████╗███████║█████╗  ██║     ██║
       ██║   ██╔══██║██╔══██╗██╔══╝  ██╔══██║   ██║       ╚════██║██╔══██║██╔══╝  ██║     ██║
       ██║   ██║  ██║██║  ██║███████╗██║  ██║   ██║       ███████║██║  ██║███████╗███████╗███████╗
       ╚═╝   ╚═╝  ╚═╝╚═╝  ╚═╝╚══════╝╚═╝  ╚═╝   ╚═╝       ╚══════╝╚═╝  ╚═╝╚══════╝╚══════╝╚══════╝

    """.decode("utf-8"))

    logos.append(r"""
    ▄▄▄█████▓ ██░ ██  ██▀███  ▓█████ ▄▄▄     ▄▄▄█████▓     ██████  ██░ ██ ▓█████  ██▓     ██▓
    ▓  ██▒ ▓▒▓██░ ██▒▓██ ▒ ██▒▓█   ▀▒████▄   ▓  ██▒ ▓▒   ▒██    ▒ ▓██░ ██▒▓█   ▀ ▓██▒    ▓██▒
    ▒ ▓██░ ▒░▒██▀▀██░▓██ ░▄█ ▒▒███  ▒██  ▀█▄ ▒ ▓██░ ▒░   ░ ▓██▄   ▒██▀▀██░▒███   ▒██░    ▒██░
    ░ ▓██▓ ░ ░▓█ ░██ ▒██▀▀█▄  ▒▓█  ▄░██▄▄▄▄██░ ▓██▓ ░      ▒   ██▒░▓█ ░██ ▒▓█  ▄ ▒██░    ▒██░
      ▒██▒ ░ ░▓█▒░██▓░██▓ ▒██▒░▒████▒▓█   ▓██▒ ▒██▒ ░    ▒██████▒▒░▓█▒░██▓░▒████▒░██████▒░██████▒
      ▒ ░░    ▒ ░░▒░▒░ ▒▓ ░▒▓░░░ ▒░ ░▒▒   ▓▒█░ ▒ ░░      ▒ ▒▓▒ ▒ ░ ▒ ░░▒░▒░░ ▒░ ░░ ▒░▓  ░░ ▒░▓  ░
        ░     ▒ ░▒░ ░  ░▒ ░ ▒░ ░ ░  ░ ▒   ▒▒ ░   ░       ░ ░▒  ░ ░ ▒ ░▒░ ░ ░ ░  ░░ ░ ▒  ░░ ░ ▒  ░
      ░       ░  ░░ ░  ░░   ░    ░    ░   ▒    ░         ░  ░  ░   ░  ░░ ░   ░     ░ ░     ░ ░
              ░  ░  ░   ░        ░  ░     ░  ░                 ░   ░  ░  ░   ░  ░    ░  ░    ░  ░

    """.decode("utf-8"))

    logos.append(r"""
        ███        ▄█    █▄       ▄████████    ▄████████    ▄████████     ███             ▄████████    ▄█    █▄       ▄████████  ▄█        ▄█
    ▀█████████▄   ███    ███     ███    ███   ███    ███   ███    ███ ▀█████████▄        ███    ███   ███    ███     ███    ███ ███       ███
       ▀███▀▀██   ███    ███     ███    ███   ███    █▀    ███    ███    ▀███▀▀██        ███    █▀    ███    ███     ███    █▀  ███       ███
        ███   ▀  ▄███▄▄▄▄███▄▄  ▄███▄▄▄▄██▀  ▄███▄▄▄       ███    ███     ███   ▀        ███         ▄███▄▄▄▄███▄▄  ▄███▄▄▄     ███       ███
        ███     ▀▀███▀▀▀▀███▀  ▀▀███▀▀▀▀▀   ▀▀███▀▀▀     ▀███████████     ███          ▀███████████ ▀▀███▀▀▀▀███▀  ▀▀███▀▀▀     ███       ███
        ███       ███    ███   ▀███████████   ███    █▄    ███    ███     ███                   ███   ███    ███     ███    █▄  ███       ███
        ███       ███    ███     ███    ███   ███    ███   ███    ███     ███             ▄█    ███   ███    ███     ███    ███ ███▌    ▄ ███▌    ▄
       ▄████▀     ███    █▀      ███    ███   ██████████   ███    █▀     ▄████▀         ▄████████▀    ███    █▀      ██████████ █████▄▄██ █████▄▄██
                                 ███    ███                                                                                     ▀         ▀
    """.decode("utf-8"))

    logos.append(r"""
     ▄▄▄▄▄▄▄▄▄▄▄  ▄         ▄  ▄▄▄▄▄▄▄▄▄▄▄  ▄▄▄▄▄▄▄▄▄▄▄  ▄▄▄▄▄▄▄▄▄▄▄  ▄▄▄▄▄▄▄▄▄▄▄       ▄▄▄▄▄▄▄▄▄▄▄  ▄         ▄  ▄▄▄▄▄▄▄▄▄▄▄  ▄            ▄
    ▐░░░░░░░░░░░▌▐░▌       ▐░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌     ▐░░░░░░░░░░░▌▐░▌       ▐░▌▐░░░░░░░░░░░▌▐░▌          ▐░▌
     ▀▀▀▀█░█▀▀▀▀ ▐░▌       ▐░▌▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀▀▀ ▐░█▀▀▀▀▀▀▀█░▌ ▀▀▀▀█░█▀▀▀▀      ▐░█▀▀▀▀▀▀▀▀▀ ▐░▌       ▐░▌▐░█▀▀▀▀▀▀▀▀▀ ▐░▌          ▐░▌
         ▐░▌     ▐░▌       ▐░▌▐░▌       ▐░▌▐░▌          ▐░▌       ▐░▌     ▐░▌          ▐░▌          ▐░▌       ▐░▌▐░▌          ▐░▌          ▐░▌
         ▐░▌     ▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄▄▄ ▐░█▄▄▄▄▄▄▄█░▌     ▐░▌          ▐░█▄▄▄▄▄▄▄▄▄ ▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄▄▄ ▐░▌          ▐░▌
         ▐░▌     ▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌     ▐░▌          ▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░▌          ▐░▌
         ▐░▌     ▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀█░█▀▀ ▐░█▀▀▀▀▀▀▀▀▀ ▐░█▀▀▀▀▀▀▀█░▌     ▐░▌           ▀▀▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀▀▀ ▐░▌          ▐░▌
         ▐░▌     ▐░▌       ▐░▌▐░▌     ▐░▌  ▐░▌          ▐░▌       ▐░▌     ▐░▌                    ▐░▌▐░▌       ▐░▌▐░▌          ▐░▌          ▐░▌
         ▐░▌     ▐░▌       ▐░▌▐░▌      ▐░▌ ▐░█▄▄▄▄▄▄▄▄▄ ▐░▌       ▐░▌     ▐░▌           ▄▄▄▄▄▄▄▄▄█░▌▐░▌       ▐░▌▐░█▄▄▄▄▄▄▄▄▄ ▐░█▄▄▄▄▄▄▄▄▄ ▐░█▄▄▄▄▄▄▄▄▄
         ▐░▌     ▐░▌       ▐░▌▐░▌       ▐░▌▐░░░░░░░░░░░▌▐░▌       ▐░▌     ▐░▌          ▐░░░░░░░░░░░▌▐░▌       ▐░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌
          ▀       ▀         ▀  ▀         ▀  ▀▀▀▀▀▀▀▀▀▀▀  ▀         ▀       ▀            ▀▀▀▀▀▀▀▀▀▀▀  ▀         ▀  ▀▀▀▀▀▀▀▀▀▀▀  ▀▀▀▀▀▀▀▀▀▀▀  ▀▀▀▀▀▀▀▀▀▀▀

    """.decode("utf-8"))

    logos.append(r"""
     ********** **                                 **          ******** **               **  **
    /////**/// /**                                /**         **////// /**              /** /**
        /**    /**      ******  *****   ******   ******      /**       /**       *****  /** /**
        /**    /****** //**//* **///** //////** ///**/       /*********/******  **///** /** /**
        /**    /**///** /** / /*******  *******   /**        ////////**/**///**/******* /** /**
        /**    /**  /** /**   /**////  **////**   /**               /**/**  /**/**////  /** /**
        /**    /**  /**/***   //******//********  //**        ******** /**  /**//****** *** ***
        //     //   // ///     //////  ////////    //        ////////  //   //  ////// /// ///

    """)

    logos.append(r"""
    '########:'##::::'##:'########::'########::::'###::::'########:::::'######::'##::::'##:'########:'##:::::::'##:::::::
    ... ##..:: ##:::: ##: ##.... ##: ##.....::::'## ##:::... ##..:::::'##... ##: ##:::: ##: ##.....:: ##::::::: ##:::::::
    ::: ##:::: ##:::: ##: ##:::: ##: ##::::::::'##:. ##::::: ##::::::: ##:::..:: ##:::: ##: ##::::::: ##::::::: ##:::::::
    ::: ##:::: #########: ########:: ######:::'##:::. ##:::: ##:::::::. ######:: #########: ######::: ##::::::: ##:::::::
    ::: ##:::: ##.... ##: ##.. ##::: ##...:::: #########:::: ##::::::::..... ##: ##.... ##: ##...:::: ##::::::: ##:::::::
    ::: ##:::: ##:::: ##: ##::. ##:: ##::::::: ##.... ##:::: ##:::::::'##::: ##: ##:::: ##: ##::::::: ##::::::: ##:::::::
    ::: ##:::: ##:::: ##: ##:::. ##: ########: ##:::: ##:::: ##:::::::. ######:: ##:::: ##: ########: ########: ########:
    :::..:::::..:::::..::..:::::..::........::..:::::..:::::..:::::::::......:::..:::::..::........::........::........::

    """)

    logos.append(r"""

     _|_|_|_|_|  _|                                        _|            _|_|_|  _|                  _|  _|
         _|      _|_|_|    _|  _|_|    _|_|      _|_|_|  _|_|_|_|      _|        _|_|_|      _|_|    _|  _|
         _|      _|    _|  _|_|      _|_|_|_|  _|    _|    _|            _|_|    _|    _|  _|_|_|_|  _|  _|
         _|      _|    _|  _|        _|        _|    _|    _|                _|  _|    _|  _|        _|  _|
         _|      _|    _|  _|          _|_|_|    _|_|_|      _|_|      _|_|_|    _|    _|    _|_|_|  _|  _|


    """)

    logos.append(r"""
                                              ,;                                       .                    ,;
               .    .      j.               f#i                                       ;W.    .            f#i            i              i
      GEEEEEEELDi   Dt     EW,            .E#t             .. GEEEEEEEL              f#EDi   Dt         .E#t            LE             LE
      ,;;L#K;;.E#i  E#i    E##j          i#W,             ;W, ,;;L#K;;.            .E#f E#i  E#i       i#W,            L#E            L#E
         t#E   E#t  E#t    E###D.       L#D.             j##,    t#E              iWW;  E#t  E#t      L#D.            G#W.           G#W.
         t#E   E#t  E#t    E#jG#W;    :K#Wfff;          G###,    t#E             L##LffiE#t  E#t    :K#Wfff;         D#K.           D#K.
         t#E   E########f. E#t t##f   i##WLLLLt       :E####,    t#E            tLLG##L E########f. i##WLLLLt       E#K.           E#K.
         t#E   E#j..K#j... E#t  :K#E:  .E#L          ;W#DG##,    t#E              ,W#i  E#j..K#j...  .E#L         .E#E.          .E#E.
         t#E   E#t  E#t    E#KDDDD###i   f#E:       j###DW##,    t#E             j#E.   E#t  E#t       f#E:      .K#E           .K#E
         t#E   E#t  E#t    E#f,t#Wi,,,    ,WW;     G##i,,G##,    t#E           .D#j     E#t  E#t        ,WW;    .K#D           .K#D
         t#E   f#t  f#t    E#t  ;#W:       .D#;  :K#K:   L##,    t#E          ,WK,      f#t  f#t         .D#;  .W#G           .W#G
          fE    ii   ii    DWi   ,KK:        tt ;##D.    L##,     fE          EG.        ii   ii           tt :W##########Wt :W##########Wt
           :                                    ,,,      .,,       :          ,                               :,,,,,,,,,,,,,.:,,,,,,,,,,,,,.

    """)

    logos.append(r"""

    ooooo 8                            o    .oPYo. 8             8 8
      8   8                            8    8      8             8 8
      8   8oPYo. oPYo. .oPYo. .oPYo.  o8P   `Yooo. 8oPYo. .oPYo. 8 8
      8   8    8 8  `' 8oooo8 .oooo8   8        `8 8    8 8oooo8 8 8
      8   8    8 8     8.     8    8   8         8 8    8 8.     8 8
      8   8    8 8     `Yooo' `YooP8   8    `YooP' 8    8 `Yooo' 8 8
    ::..::..:::....:::::.....::.....:::..::::.....:..:::..:.....:....
    :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
    :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
    """)

    logos.append(r"""
         _____                                                _____
     ___|__   |__  __   _  _____   ______  ____     __     __|___  |__  __   _  ______  ____    ____
    |_    _|     ||  |_| ||     | |   ___||    \  _|  |_  |   ___|    ||  |_| ||   ___||    |  |    |
     |    |      ||   _  ||     \ |   ___||     \|_    _|  `-.`-.     ||   _  ||   ___||    |_ |    |_
     |____|    __||__| |_||__|\__\|______||__|\__\ |__|   |______|  __||__| |_||______||______||______|
        |_____|                                              |_____|

    """)

    print color(random.choice(logos), random.randrange(31, 37))
    print "~ %s ~\n" % red("Huntin' Yo")
    print " Threat Shell v%s" % yellow(TS_VERSION)
    print
    sys.stdout.flush()

Example 64

Project: pywikibot-core
Source File: script_tests.py
View license
    def __new__(cls, name, bases, dct):
        """Create the new class."""
        def test_execution(script_name, args=[]):
            is_autorun = '-help' not in args and script_name in auto_run_script_list

            def test_skip_script(self):
                raise unittest.SkipTest(
                    'Skipping execution of auto-run scripts (set '
                    'PYWIKIBOT2_TEST_AUTORUN=1 to enable) "{0}"'.format(script_name))

            def testScript(self):
                cmd = [script_name]

                if args:
                    cmd += args

                data_in = script_input.get(script_name)

                timeout = 0
                if is_autorun:
                    timeout = 5

                if self._results and script_name in self._results:
                    error = self._results[script_name]
                    if isinstance(error, StringTypes):
                        stdout = None
                    else:
                        stdout, error = error
                else:
                    stdout = None
                    error = None

                test_overrides = {}
                if not hasattr(self, 'net') or not self.net:
                    test_overrides['pywikibot.Site'] = 'None'

                result = execute_pwb(cmd, data_in, timeout=timeout, error=error,
                                     overrides=test_overrides)

                stderr = result['stderr'].splitlines()
                stderr_sleep = [l for l in stderr
                                if l.startswith('Sleeping for ')]
                stderr_other = [l for l in stderr
                                if not l.startswith('Sleeping for ')]
                if stderr_sleep:
                    print(u'\n'.join(stderr_sleep))

                if result['exit_code'] == -9:
                    print(' killed', end='  ')

                if error:
                    self.assertIn(error, result['stderr'])

                    exit_codes = [0, 1, 2, -9]
                elif not is_autorun:
                    if stderr_other == []:
                        stderr_other = None
                    if stderr_other is not None:
                        self.assertIn('Use -help for further information.',
                                      stderr_other)
                        self.assertNotIn('-help', args)
                    else:
                        self.assertIn('Global arguments available for all',
                                      result['stdout'])

                    exit_codes = [0]
                else:
                    # auto-run
                    exit_codes = [0, -9]

                    if (not result['stdout'] and not result['stderr']):
                        print(' auto-run script unresponsive after %d seconds'
                              % timeout, end=' ')
                    elif 'SIMULATION: edit action blocked' in result['stderr']:
                        print(' auto-run script simulated edit blocked',
                              end='  ')
                    else:
                        print(' auto-run script stderr within %d seconds: %r'
                              % (timeout, result['stderr']), end='  ')

                self.assertNotIn('Traceback (most recent call last)',
                                 result['stderr'])
                self.assertNotIn('deprecated', result['stderr'].lower())

                # If stdout doesnt include global help..
                if 'Global arguments available for all' not in result['stdout']:
                    # Specifically look for deprecated
                    self.assertNotIn('deprecated', result['stdout'].lower())
                    if result['stdout'] == '':
                        result['stdout'] = None
                    # But also complain if there is any stdout
                    if stdout is not None and result['stdout'] is not None:
                        self.assertIn(stdout, result['stdout'])
                    else:
                        self.assertIsNone(result['stdout'])

                self.assertIn(result['exit_code'], exit_codes)

                sys.stdout.flush()

            if not enable_autorun_tests and is_autorun:
                return test_skip_script
            return testScript

        argument = '-' + dct['_argument']

        for script_name in script_list:
            # force login to be the first, alphabetically, so the login
            # message does not unexpectedly occur during execution of
            # another script.
            # unrunnable script tests are disabled by default in load_tests()

            if script_name == 'login':
                test_name = 'test__login'
            else:
                test_name = 'test_' + script_name

            cls.add_method(dct, test_name,
                           test_execution(script_name, [argument]),
                           'Test running %s %s.' % (script_name, argument))

            if script_name in dct['_expected_failures']:
                dct[test_name] = unittest.expectedFailure(dct[test_name])
            elif script_name in dct['_allowed_failures']:
                dct[test_name] = allowed_failure(dct[test_name])

            # Disable test by default in nosetests
            if script_name in unrunnable_script_list:
                # flag them as an expectedFailure due to py.test (T135594)
                dct[test_name] = unittest.expectedFailure(dct[test_name])
                dct[test_name].__test__ = False

        return super(TestScriptMeta, cls).__new__(cls, name, bases, dct)

Example 65

Project: writemdict
Source File: testSalsa20.py
View license
def test( module, module_name ):
    print("===== Testing", module_name, "version", module._version, "=====")
    from sys import stdout

    passed = True

    if 1:  # Test these if the module has them:
        if "rot32" in module.__dict__:
            passed &= test_rot32( module.rot32, module_name+".rot32" )
            # Compare to slow version:
            passed &= test_rot32( rot32long, "rot32long" )
        print()

        if "add32" in module.__dict__:
            passed &= test_add32( module.add32, module_name+".add32" )

    if 1 and passed:
        test_salsa20core( module, module_name )

    if 1 and passed:
        rounds  = 8                     # may be 8, 12, or 20
    
        if 0:
            message = loadfmfile('testdata.txt')
        else:
            message = b'Kilroy was here!  ...there, and everywhere.'
        key     = b'myKey67890123456'    # 16 or 32 bytes, exactly
        nonce   = b'aNonce'              # do better in real life
        IV      = (nonce+b'*'*8)[:8]     # force to exactly 64 bits

        print("Testing decrypt(encrypt(short_message))==short_message...")
        # encrypt
        s20 = salsa20_test_classes[module_name]( key, IV, rounds )
        ciphertxt = s20.encryptBytes(message)
        
        # decrypt
        s20 = salsa20_test_classes[module_name]( key, IV, rounds )
        plaintxt  = s20.encryptBytes(ciphertxt)
    
        if message == plaintxt:
            print('    *** good ***')
        else:
            print('    *** bad ***')
            passed = False


    if 1 and passed:    # one known good 8-round test vector 
        print("Testing known 64-byte message and key...")
        rounds = 8      # must be 8 for this test
        message = b'\x00'*64
        key = binascii.unhexlify('00000000000000000000000000000002')
        IV  = binascii.unhexlify('0000000000000000')
        out64 = bytestring("""
               06C80B8CEC60F0C2E73EB6ED5DCB1B9C
               39B210F1AB76FEDF1A6B7AE370DA0F20
               0CEBCAD6EF6E57AC80E4375C035FA44D
               3AE4DC2C2507757DAF37B14F36643489""")
        s20 = salsa20_test_classes[module_name]( key, IV, rounds )
        s20.setIV(IV)
        ciphertxt = s20.encryptBytes(message)
        s20 = salsa20_test_classes[module_name]( key, IV, rounds )
        plaintxt  = s20.encryptBytes(ciphertxt)
        if (message == plaintxt and
            ciphertxt[:64] == out64):
            print('    *** vector 1 good ***')
        else:
            print('    *** vector 1 bad ***')
            passed = False
        
    if 1 and passed:    # one known good 8-round test vector 
        print("Testing known key and 64k message...")
        rounds = 8      # must be 8 for this test
        message = b'\x00'*65536
        key = binascii.unhexlify('0053A6F94C9FF24598EB3E91E4378ADD')
        IV  = binascii.unhexlify('0D74DB42A91077DE')
        out64 = bytestring("""
               75FCAE3A3961BDC7D2513662C24ADECE
               995545599FF129006E7A6EE57B7F33A2
               6D1B27C51EA15E8F956693472DC23132
               FCD90FB0E352D26AF4DCE5427193CA26""")
        out65536 = bytestring("""
               EA75A566C431A10CED804CCD45172AD1
               EC4930E9869372B8EDDF303098A8910C
               EE123BF849C51A33554BA1445E6B6268
               4921F36B77EADC9681A2BB9DDFEC2FC8""")
        s20 = salsa20_test_classes[module_name]( key, IV, rounds )
        ciphertxt = s20.encryptBytes(message)
        s20 = salsa20_test_classes[module_name]( key, IV, rounds )
        plaintxt  = s20.encryptBytes(ciphertxt)
        if (message == plaintxt and
            ciphertxt[:64] == out64 and
            ciphertxt[65472:] == out65536):
            print('    *** vector 2 good ***')
        else:
            print('    *** vector 2 bad ***')
            passed = False

    if 1 and passed:    # some rough speed tests
        from time import time
        from math import ceil

        print("Speed tests...")
        names = {}
        speeds = {}
        message_lens = [ 64, 2**16 ]
        #                    64-byte message     65536-byte message
        # Salsa20/4000: 12345678.9 bytes/sec   12345678.9 bytes/sec
        namefmt = "%13s"
        print(namefmt % " ", end=" ")
        msg_len_fmt =  "%7d-byte message  "
        speed_fmt =    "%10.1f bytes/sec  "
        for msg_len in message_lens:
            print(msg_len_fmt % msg_len,end=" ")
        print()

        for nRounds in [ 8, 20, 4000 ]:
            names[ nRounds ] = "Salsa20/" + repr(nRounds) + ":"
            print(namefmt % names[ nRounds ], end=" ")
            speeds[ nRounds ] = {}
            if nRounds <= 20: lens = message_lens
            else:             lens = message_lens[ 0 : -1 ]
            for msg_len in lens:
                message = b'\x00' * msg_len
                key = binascii.unhexlify('00000000000000000000000000000002')
                IV  = binascii.unhexlify('0000000000000000')
                s20 = salsa20_test_classes[module_name]( key, IV, 20 )
                s20.force_nRounds( nRounds )
                nreps = 1
                duration = 4.0
                while duration < 5: # sec.
                    # Aim for 6 seconds:
                    nreps = int( ceil( nreps * min( 4, 6.0/duration ) ) )
                    start = time()
                    for i in range( nreps ):
                        ciphertxt = s20.encryptBytes(message)
                    duration = time() - start
                speeds[ nRounds ][ msg_len ] = msg_len * nreps / duration
                print(speed_fmt % speeds[ nRounds ][ msg_len ], end=" ")
                stdout.flush()
            print()

    return passed

Example 66

Project: tensorlayer
Source File: tutorial_translate.py
View license
def main_train():
    """Step 1 : Download Training and Testing data.
    Compare with Word2vec example, the dataset in this example is large,
    so we use TensorFlow's gfile functions to speed up the pre-processing.
    """
    print()
    print("Prepare raw data")
    train_path, dev_path = tl.files.load_wmt_en_fr_dataset(data_dir=data_dir)
    print("Training data : %s" % train_path)   # wmt/giga-fren.release2
    print("Testing data : %s" % dev_path)     # wmt/newstest2013

    """Step 2 : Create Vocabularies for both Training and Testing data.
    """
    print()
    print("Create vocabularies")
    fr_vocab_path = os.path.join(data_dir, "vocab%d.fr" % fr_vocab_size)
    en_vocab_path = os.path.join(data_dir, "vocab%d.en" % en_vocab_size)
    print("Vocabulary of French : %s" % fr_vocab_path)    # wmt/vocab40000.fr
    print("Vocabulary of English : %s" % en_vocab_path)   # wmt/vocab40000.en
    tl.nlp.create_vocabulary(fr_vocab_path, train_path + ".fr",
                fr_vocab_size, tokenizer=None, normalize_digits=normalize_digits,
                _DIGIT_RE=_DIGIT_RE, _START_VOCAB=_START_VOCAB)
    tl.nlp.create_vocabulary(en_vocab_path, train_path + ".en",
                en_vocab_size, tokenizer=None, normalize_digits=normalize_digits,
                _DIGIT_RE=_DIGIT_RE, _START_VOCAB=_START_VOCAB)

    """ Step 3 : Tokenize Training and Testing data.
    """
    print()
    print("Tokenize data")
    # Create tokenized file for the training data by using the vocabulary file.
    # normalize_digits=True means set all digits to zero, so as to reduce
    # vocabulary size.
    fr_train_ids_path = train_path + (".ids%d.fr" % fr_vocab_size)
    en_train_ids_path = train_path + (".ids%d.en" % en_vocab_size)
    print("Tokenized Training data of French : %s" % fr_train_ids_path)    # wmt/giga-fren.release2.ids40000.fr
    print("Tokenized Training data of English : %s" % en_train_ids_path)   # wmt/giga-fren.release2.ids40000.fr
    tl.nlp.data_to_token_ids(train_path + ".fr", fr_train_ids_path, fr_vocab_path,
                                tokenizer=None, normalize_digits=normalize_digits,
                                UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE)
    tl.nlp.data_to_token_ids(train_path + ".en", en_train_ids_path, en_vocab_path,
                                tokenizer=None, normalize_digits=normalize_digits,
                                UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE)

    # we should also create tokenized file for the development (testing) data.
    fr_dev_ids_path = dev_path + (".ids%d.fr" % fr_vocab_size)
    en_dev_ids_path = dev_path + (".ids%d.en" % en_vocab_size)
    print("Tokenized Testing data of French : %s" % fr_dev_ids_path)    # wmt/newstest2013.ids40000.fr
    print("Tokenized Testing data of English : %s" % en_dev_ids_path)   # wmt/newstest2013.ids40000.en
    tl.nlp.data_to_token_ids(dev_path + ".fr", fr_dev_ids_path, fr_vocab_path,
                                tokenizer=None, normalize_digits=normalize_digits,
                                UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE)
    tl.nlp.data_to_token_ids(dev_path + ".en", en_dev_ids_path, en_vocab_path,
                                tokenizer=None, normalize_digits=normalize_digits,
                                UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE)

    # You can get the word_to_id dictionary and id_to_word list as follow.
    # vocab, rev_vocab = tl.nlp.initialize_vocabulary(en_vocab_path)
    # print(vocab)
    # {b'cat': 1, b'dog': 0, b'bird': 2}
    # print(rev_vocab)
    # [b'dog', b'cat', b'bird']

    en_train = en_train_ids_path
    fr_train = fr_train_ids_path
    en_dev = en_dev_ids_path
    fr_dev = fr_dev_ids_path

    """Step 4 : Load both tokenized Training and Testing data into buckets
    and compute their size.

    Bucketing is a method to efficiently handle sentences of different length.
    When translating English to French, we will have English sentences of
    different lengths I on input, and French sentences of different
    lengths O on output. We should in principle create a seq2seq model
    for every pair (I, O+1) of lengths of an English and French sentence.

    For find the closest bucket for each pair, then we could just pad every
    sentence with a special PAD symbol in the end if the bucket is bigger
    than the sentence

    We use a number of buckets and pad to the closest one for efficiency.

    If the input is an English sentence with 3 tokens, and the corresponding
    output is a French sentence with 6 tokens, then they will be put in the
    first bucket and padded to length 5 for encoder inputs (English sentence),
    and length 10 for decoder inputs.
    If we have an English sentence with 8 tokens and the corresponding French
    sentence has 18 tokens, then they will be fit into (20, 25) bucket.

    Given a pair [["I", "go", "."], ["Je", "vais", "."]] in tokenized format.
    The training data of encoder inputs representing [PAD PAD "." "go" "I"]
    and decoder inputs [GO "Je" "vais" "." EOS PAD PAD PAD PAD PAD].
    see ``get_batch()``
    """
    print()
    print ("Read development (test) data into buckets")
    dev_set = read_data(en_dev, fr_dev, buckets, EOS_ID)

    if plot_data:
        # Visualize the development (testing) data
        print('dev data:', buckets[0], dev_set[0][0])    # (5, 10), [[13388, 4, 949], [23113, 8, 910, 2]]
        vocab_en, rev_vocab_en = tl.nlp.initialize_vocabulary(en_vocab_path)
        context = tl.nlp.word_ids_to_words(dev_set[0][0][0], rev_vocab_en)
        word_ids = tl.nlp.words_to_word_ids(context, vocab_en)
        print('en word_ids:', word_ids) # [13388, 4, 949]
        print('en context:', context)   # [b'Preventing', b'the', b'disease']
        vocab_fr, rev_vocab_fr = tl.nlp.initialize_vocabulary(fr_vocab_path)
        context = tl.nlp.word_ids_to_words(dev_set[0][0][1], rev_vocab_fr)
        word_ids = tl.nlp.words_to_word_ids(context, vocab_fr)
        print('fr word_ids:', word_ids) # [23113, 8, 910, 2]
        print('fr context:', context)   # [b'Pr\xc3\xa9venir', b'la', b'maladie', b'_EOS']

    print()
    print ("Read training data into buckets (limit: %d)" % max_train_data_size)
    train_set = read_data(en_train, fr_train, buckets, EOS_ID, max_train_data_size)
    if plot_data:
        # Visualize the training data
        print('train data:', buckets[0], train_set[0][0])   # (5, 10) [[1368, 3344], [1089, 14, 261, 2]]
        context = tl.nlp.word_ids_to_words(train_set[0][0][0], rev_vocab_en)
        word_ids = tl.nlp.words_to_word_ids(context, vocab_en)
        print('en word_ids:', word_ids) # [1368, 3344]
        print('en context:', context)   # [b'Site', b'map']
        context = tl.nlp.word_ids_to_words(train_set[0][0][1], rev_vocab_fr)
        word_ids = tl.nlp.words_to_word_ids(context, vocab_fr)
        print('fr word_ids:', word_ids) # [1089, 14, 261, 2]
        print('fr context:', context)   # [b'Plan', b'du', b'site', b'_EOS']
        print()

    train_bucket_sizes = [len(train_set[b]) for b in xrange(len(buckets))]
    train_total_size = float(sum(train_bucket_sizes))
    print('the num of training data in each buckets: %s' % train_bucket_sizes)    # [239121, 1344322, 5239557, 10445326]
    print('the num of training data: %d' % train_total_size)        # 17268326.0

    # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
    # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
    # the size if i-th training bucket, as used later.
    train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
                           for i in xrange(len(train_bucket_sizes))]
    print('train_buckets_scale:',train_buckets_scale)   # [0.013847375825543252, 0.09169638099257565, 0.3951164693091849, 1.0]


    """Step 6 : Create model
    """
    print()
    print("Create Embedding Attention Seq2seq Model")
    with tf.variable_scope("model", reuse=None):
        model = tl.layers.EmbeddingAttentionSeq2seqWrapper(
                          en_vocab_size,
                          fr_vocab_size,
                          buckets,
                          size,
                          num_layers,
                          max_gradient_norm,
                          batch_size,
                          learning_rate,
                          learning_rate_decay_factor,
                          forward_only=False)    # is_train = True

    sess.run(tf.initialize_all_variables())
    # model.print_params()
    tl.layers.print_all_variables()

    if resume:
        print("Load existing model" + "!"*10)
        if is_npz:
            # instead of using TensorFlow saver, we can use TensorLayer to restore a model
            load_params = tl.files.load_npz(name=model_file_name+'.npz')
            tl.files.assign_params(sess, load_params, model)
        else:
            saver = tf.train.Saver()
            saver.restore(sess, model_file_name+'.ckpt')

    """Step 7 : Training
    """
    print()
    step_time, loss = 0.0, 0.0
    current_step = 0
    previous_losses = []
    while True:
        # Choose a bucket according to data distribution. We pick a random number
        # in [0, 1] and use the corresponding interval in train_buckets_scale.
        random_number_01 = np.random.random_sample()
        bucket_id = min([i for i in xrange(len(train_buckets_scale))
                       if train_buckets_scale[i] > random_number_01])

        # Get a batch and make a step.
        # randomly pick ``batch_size`` training examples from a random bucket_id
        # the data format is described in readthedocs tutorial
        start_time = time.time()
        encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                train_set, bucket_id, PAD_ID, GO_ID, EOS_ID, UNK_ID)

        _, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
                                   target_weights, bucket_id, False)
        step_time += (time.time() - start_time) / steps_per_checkpoint
        loss += step_loss / steps_per_checkpoint
        current_step += 1

        # Once in a while, we save checkpoint, print statistics, and run evals.
        if current_step % steps_per_checkpoint == 0:
            # Print statistics for the previous epoch.
            perplexity = math.exp(loss) if loss < 300 else float('inf')
            print ("global step %d learning rate %.4f step-time %.2f perplexity "
                "%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
                            step_time, perplexity))
            # Decrease learning rate if no improvement was seen over last 3 times.
            if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
                sess.run(model.learning_rate_decay_op)
            previous_losses.append(loss)

            # Save model
            if is_npz:
                tl.files.save_npz(model.all_params, name=model_file_name+'.npz')
            else:
                print('Model is saved to: %s' % model_file_name+'.ckpt')
                checkpoint_path = os.path.join(train_dir, model_file_name+'.ckpt')
                model.saver.save(sess, checkpoint_path, global_step=model.global_step)

            step_time, loss = 0.0, 0.0
            # Run evals on development set and print their perplexity.
            for bucket_id in xrange(len(buckets)):
                if len(dev_set[bucket_id]) == 0:
                    print("  eval: empty bucket %d" % (bucket_id))
                    continue
                encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                        dev_set, bucket_id, PAD_ID, GO_ID, EOS_ID, UNK_ID)
                _, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
                                               target_weights, bucket_id, True)
                eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
                print("  eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
            sys.stdout.flush()

Example 67

Project: imagrium
Source File: test_re.py
View license
def run_re_tests():
    from test.re_tests import benchmarks, tests, SUCCEED, FAIL, SYNTAX_ERROR
    if verbose:
        print 'Running re_tests test suite'
    else:
        # To save time, only run the first and last 10 tests
        #tests = tests[:10] + tests[-10:]
        pass

    for t in tests:
        sys.stdout.flush()
        pattern = s = outcome = repl = expected = None
        if len(t) == 5:
            pattern, s, outcome, repl, expected = t
        elif len(t) == 3:
            pattern, s, outcome = t
        else:
            raise ValueError, ('Test tuples should have 3 or 5 fields', t)

        try:
            obj = re.compile(pattern)
        except re.error:
            if outcome == SYNTAX_ERROR: pass  # Expected a syntax error
            else:
                print '=== Syntax error:', t
        except KeyboardInterrupt: raise KeyboardInterrupt
        except:
            print '*** Unexpected error ***', t
            if verbose:
                traceback.print_exc(file=sys.stdout)
        else:
            try:
                result = obj.search(s)
            except re.error, msg:
                print '=== Unexpected exception', t, repr(msg)
            if outcome == SYNTAX_ERROR:
                # This should have been a syntax error; forget it.
                pass
            elif outcome == FAIL:
                if result is None: pass   # No match, as expected
                else: print '=== Succeeded incorrectly', t
            elif outcome == SUCCEED:
                if result is not None:
                    # Matched, as expected, so now we compute the
                    # result string and compare it to our expected result.
                    start, end = result.span(0)
                    vardict={'found': result.group(0),
                             'groups': result.group(),
                             'flags': result.re.flags}
                    for i in range(1, 100):
                        try:
                            gi = result.group(i)
                            # Special hack because else the string concat fails:
                            if gi is None:
                                gi = "None"
                        except IndexError:
                            gi = "Error"
                        vardict['g%d' % i] = gi
                    for i in result.re.groupindex.keys():
                        try:
                            gi = result.group(i)
                            if gi is None:
                                gi = "None"
                        except IndexError:
                            gi = "Error"
                        vardict[i] = gi
                    repl = eval(repl, vardict)
                    if repl != expected:
                        print '=== grouping error', t,
                        print repr(repl) + ' should be ' + repr(expected)
                else:
                    print '=== Failed incorrectly', t

                # Try the match on a unicode string, and check that it
                # still succeeds.
                try:
                    result = obj.search(unicode(s, "latin-1"))
                    if result is None:
                        print '=== Fails on unicode match', t
                except NameError:
                    continue # 1.5.2
                except TypeError:
                    continue # unicode test case

                # Try the match on a unicode pattern, and check that it
                # still succeeds.
                obj=re.compile(unicode(pattern, "latin-1"))
                result = obj.search(s)
                if result is None:
                    print '=== Fails on unicode pattern match', t

                # Try the match with the search area limited to the extent
                # of the match and see if it still succeeds.  \B will
                # break (because it won't match at the end or start of a
                # string), so we'll ignore patterns that feature it.

                if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
                               and result is not None:
                    obj = re.compile(pattern)
                    result = obj.search(s, result.start(0), result.end(0) + 1)
                    if result is None:
                        print '=== Failed on range-limited match', t

                # Try the match with IGNORECASE enabled, and check that it
                # still succeeds.
                obj = re.compile(pattern, re.IGNORECASE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on case-insensitive match', t

                # Try the match with LOCALE enabled, and check that it
                # still succeeds.
                obj = re.compile(pattern, re.LOCALE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on locale-sensitive match', t

                # Try the match with UNICODE locale enabled, and check
                # that it still succeeds.
                obj = re.compile(pattern, re.UNICODE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on unicode-sensitive match', t

Example 68

Project: pyeq2
Source File: generateOutput.py
View license
def PoorlyDefined_A():
    specificBaseName = inspect.stack()[0][3]
    htmlTitle = 'Data With A Poorly Defined Region'
    htmlText = '''
This can be mitigated by taking additional<br>
data in the region that is poorly defined.'''
    thumbnailAnimationName = specificBaseName + '_small.gif'
    fullsizeAnimationName = specificBaseName + '_large.gif'
    
    print specificBaseName

    Ymax = 1000.0
    Ymin = -1000.0
    
    equation = pyeq2.Models_2D.Polynomial.Linear('SSQABS')
    dimensionality = equation.GetDimensionality()
    equation.dataCache.DependentDataContainsZeroFlag = True # do not need relative error calculations, this flag turns them off
        
    xbase = numpy.array([0.0, 60.0, 120.0])
    equation.dataCache.allDataCacheDictionary['Weights'] = []
    
    #numpy.random.seed(3)
    #randomArray = (numpy.random.random_sample(len(xbase)+1) - 0.5) * 3.0
    randomArray = numpy.array([-50.0, 50.0, 51.0, -50.0])

    for i in range(0, 360):
        extraZerosString = ''
        if i < 100:
            extraZerosString = '0'
        if i < 10:
            extraZerosString = '00'
        
        numpy.random.seed(3)
        if not i % largeAnimationModulus:
            print i,;sys.stdout.flush()
            
            equation = pyeq2.Models_2D.Polynomial.Quadratic('SSQABS')
            xdata = xbase  * (0.45 * numpy.sin(numpy.radians(i)) + 0.5)
            xdata = numpy.append(xdata, numpy.array([120.0]))
            equation.dataCache.allDataCacheDictionary['IndependentData'] = numpy.array([xdata, numpy.ones(len(xdata))])
            IndependentDataArray = equation.dataCache.allDataCacheDictionary['IndependentData']
            equation.dataCache.allDataCacheDictionary['DependentData'] =  xdata + randomArray
            DependentDataArray = equation.dataCache.allDataCacheDictionary['DependentData']
            equation.dataCache.allDataCacheDictionary['Weights'] =  []
            equation.Solve()
            equation.CalculateModelErrors(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
            equation.CalculateCoefficientAndFitStatistics()

            fileName = specificBaseName + '_ci' + extraZerosString + str(i) + '_large.png'
            SaveModelScatterConfidence(fileName, equation, Ymax, Ymin)
            
            # small animation, create fewer frames
            if not (i % smallAnimationModulus):
                graphWidth = smallAnimationGraphWidth
                graphHeight = smallAnimationGraphHeight
                fileName = specificBaseName + '_ci' + extraZerosString + str(i) + '_small.png'
                SaveModelScatterConfidence(fileName, equation, Ymax, Ymin)
                
    print
    # convert all PNG files to GIF for gifsicle to use
    commandString = 'mogrify -format gif *png'
    print "Calling " + commandString
    os.popen(commandString)
    
    # make small GIF animation
    commandString = 'gifsicle --loop --colors 256 --delay ' + str(smallAnimationDelayBetweenFrames) + " " + specificBaseName + '_ci*small.gif > ' + specificBaseName + '_small.gif'
    print "Calling " + commandString
    os.popen(commandString)

    # make large GIF animation        
    commandString = 'gifsicle --loop --colors 256 --delay ' + str(largeAnimationDelayBetweenFrames) + " " + specificBaseName + '_ci*large.gif > ' + specificBaseName + '_large.gif'
    print "Calling " + commandString
    os.popen(commandString)

    # remove unused files, saving the ones in this list
    stillImageFileNameList = [specificBaseName + '_ci022_large.png',
                              specificBaseName + '_ci090_large.png',
                              specificBaseName + '_ci270_large.png']
    currentDir = os.listdir('.')
    for filename in currentDir:
        if (-1 != filename.find('_ci')) and (-1 != filename.find('small')):
            os.remove(filename)
        if (-1 != filename.find('_ci')) and (-1 != filename.find('large.gif')):
            os.remove(filename)
        if (-1 != filename.find(specificBaseName)) and (-1 != filename.find('_large.png')) and (filename not in stillImageFileNameList):
            os.remove(filename)

    return [htmlTitle, htmlText, specificBaseName, stillImageFileNameList]

Example 69

Project: pyeq2
Source File: generateOutput.py
View license
def RandomData_A():
    specificBaseName = inspect.stack()[0][3]
    htmlTitle = 'Fitting Random Data'
    htmlText = '''
This illustrates the effect of fitting completely<br>
random data that has no relationship of any kind.'''
    thumbnailAnimationName = specificBaseName + '_small.gif'
    fullsizeAnimationName = specificBaseName + '_large.gif'
    
    print specificBaseName

    Ymax = 1.0
    Ymin = 0.0
    
    equation = pyeq2.Models_2D.Polynomial.Linear('SSQABS')
    dimensionality = equation.GetDimensionality()
    equation.dataCache.DependentDataContainsZeroFlag = True # do not need relative error calculations, this flag turns them off
        
    numpy.random.seed(3) # yield repeatable results
    randomArrayX = numpy.random.random_sample(195)
    randomArrayY = numpy.random.random_sample(195)
    equation.dataCache.allDataCacheDictionary['Weights'] = []
    
    # ensure a X data range for the graph, as on
    # all other plots the X data range is constant
    randomArrayX[0] = 0.001
    randomArrayX[1] = 0.999
    
    for i in range(0, 360):
        extraZerosString = ''
        if i < 100:
            extraZerosString = '0'
        if i < 10:
            extraZerosString = '00'
        
        if not i % largeAnimationModulus:
            print i,;sys.stdout.flush()
            index = i
            if i > 180:
                index = 180 - (i%180)
            equation = pyeq2.Models_2D.Polynomial.Linear('SSQABS')
            xdata = randomArrayX[:15 + index/2]
            ydata = randomArrayY[:15 + index/2]
            equation.dataCache.allDataCacheDictionary['IndependentData'] = numpy.array([xdata, numpy.ones(len(xdata))])
            IndependentDataArray = equation.dataCache.allDataCacheDictionary['IndependentData']
            equation.dataCache.allDataCacheDictionary['DependentData'] =  ydata
            DependentDataArray = equation.dataCache.allDataCacheDictionary['DependentData']
            equation.dataCache.allDataCacheDictionary['Weights'] =  []
            equation.Solve()
            equation.CalculateModelErrors(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
            equation.CalculateCoefficientAndFitStatistics()
            
            fileName = specificBaseName + '_ci' + extraZerosString + str(i) + '_large.png'
            SaveModelScatterConfidence(fileName, equation, Ymax, Ymin)
            
            # small animation, create fewer frames
            if not (i % smallAnimationModulus):
                graphWidth = smallAnimationGraphWidth
                graphHeight = smallAnimationGraphHeight
                fileName = specificBaseName + '_ci' + extraZerosString + str(i) + '_small.png'
                SaveModelScatterConfidence(fileName, equation, Ymax, Ymin)
                
    print
    # convert all PNG files to GIF for gifsicle to use
    commandString = 'mogrify -format gif *png'
    print "Calling " + commandString
    os.popen(commandString)
    
    # make small GIF animation
    commandString = 'gifsicle --loop --colors 256 --delay ' + str(smallAnimationDelayBetweenFrames) + " " + specificBaseName + '_ci*small.gif > ' + specificBaseName + '_small.gif'
    print "Calling " + commandString
    os.popen(commandString)

    # make large GIF animation        
    commandString = 'gifsicle --loop --colors 256 --delay ' + str(largeAnimationDelayBetweenFrames) + " " + specificBaseName + '_ci*large.gif > ' + specificBaseName + '_large.gif'
    print "Calling " + commandString
    os.popen(commandString)

    # remove unused files, saving the ones in this list
    stillImageFileNameList = [specificBaseName + '_ci000_large.png',
                              specificBaseName + '_ci180_large.png']
    currentDir = os.listdir('.')
    for filename in currentDir:
        if (-1 != filename.find('_ci')) and (-1 != filename.find('small')):
            os.remove(filename)
        if (-1 != filename.find('_ci')) and (-1 != filename.find('large.gif')):
            os.remove(filename)
        if (-1 != filename.find(specificBaseName)) and (-1 != filename.find('_large.png')) and (filename not in stillImageFileNameList):
            os.remove(filename)

    return [htmlTitle, htmlText, specificBaseName, stillImageFileNameList]

Example 70

Project: pyeq3
Source File: generateOutput.py
View license
def RandomData_A():
    specificBaseName = inspect.stack()[0][3]
    htmlTitle = 'Fitting Random Data'
    htmlText = '''
This illustrates the effect of fitting completely<br>
random data that has no relationship of any kind.'''
    thumbnailAnimationName = specificBaseName + '_small.gif'
    fullsizeAnimationName = specificBaseName + '_large.gif'
    
    print(specificBaseName)

    Ymax = 1.0
    Ymin = 0.0
    
    equation = pyeq3.Models_2D.Polynomial.Linear('SSQABS')
    dimensionality = equation.GetDimensionality()
    equation.dataCache.DependentDataContainsZeroFlag = True # do not need relative error calculations, this flag turns them off
        
    numpy.random.seed(3) # yield repeatable results
    randomArrayX = numpy.random.random_sample(195)
    randomArrayY = numpy.random.random_sample(195)
    equation.dataCache.allDataCacheDictionary['Weights'] = []
    
    # ensure a X data range for the graph, as on
    # all other plots the X data range is constant
    randomArrayX[0] = 0.001
    randomArrayX[1] = 0.999
    
    for i in range(0, 360):
        extraZerosString = ''
        if i < 100:
            extraZerosString = '0'
        if i < 10:
            extraZerosString = '00'
        
        if not i % largeAnimationModulus:
            print(i, end=' ');sys.stdout.flush()
            index = i
            if i > 180:
                index = 180 - (i%180)
            equation = pyeq3.Models_2D.Polynomial.Linear('SSQABS')
            xdata = randomArrayX[:15 + index/2]
            ydata = randomArrayY[:15 + index/2]
            equation.dataCache.allDataCacheDictionary['IndependentData'] = numpy.array([xdata, numpy.ones(len(xdata))])
            IndependentDataArray = equation.dataCache.allDataCacheDictionary['IndependentData']
            equation.dataCache.allDataCacheDictionary['DependentData'] =  ydata
            DependentDataArray = equation.dataCache.allDataCacheDictionary['DependentData']
            equation.dataCache.allDataCacheDictionary['Weights'] =  []
            equation.Solve()
            equation.CalculateModelErrors(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
            equation.CalculateCoefficientAndFitStatistics()
            
            fileName = specificBaseName + '_ci' + extraZerosString + str(i) + '_large.png'
            SaveModelScatterConfidence(fileName, equation, Ymax, Ymin)
            
            # small animation, create fewer frames
            if not (i % smallAnimationModulus):
                graphWidth = smallAnimationGraphWidth
                graphHeight = smallAnimationGraphHeight
                fileName = specificBaseName + '_ci' + extraZerosString + str(i) + '_small.png'
                SaveModelScatterConfidence(fileName, equation, Ymax, Ymin)
                
    print()
    # convert all PNG files to GIF for gifsicle to use
    commandString = 'mogrify -format gif *png'
    print("Calling " + commandString)
    p = os.popen(commandString)
    p.close()
    
    # make small GIF animation
    commandString = 'gifsicle --loop --colors 256 --delay ' + str(smallAnimationDelayBetweenFrames) + " " + specificBaseName + '_ci*small.gif > ' + specificBaseName + '_small.gif'
    print("Calling " + commandString)
    p = os.popen(commandString)
    p.close()

    # make large GIF animation        
    commandString = 'gifsicle --loop --colors 256 --delay ' + str(largeAnimationDelayBetweenFrames) + " " + specificBaseName + '_ci*large.gif > ' + specificBaseName + '_large.gif'
    print("Calling " + commandString)
    p = os.popen(commandString)
    p.close()

    # remove unused files, saving the ones in this list
    stillImageFileNameList = [specificBaseName + '_ci000_large.png',
                              specificBaseName + '_ci180_large.png']
    currentDir = os.listdir('.')
    for filename in currentDir:
        if (-1 != filename.find('_ci')) and (-1 != filename.find('small')):
            os.remove(filename)
        if (-1 != filename.find('_ci')) and (-1 != filename.find('large.gif')):
            os.remove(filename)
        if (-1 != filename.find(specificBaseName)) and (-1 != filename.find('_large.png')) and (filename not in stillImageFileNameList):
            os.remove(filename)

    return [htmlTitle, htmlText, specificBaseName, stillImageFileNameList]

Example 71

Project: pyNastran
Source File: test_bdf.py
View license
def run_bdf(folder, bdf_filename, debug=False, xref=True, check=True, punch=False,
            cid=None, mesh_form='combined', is_folder=False, print_stats=False,
            sum_load=False, size=8, precision='single',
            quiet=False,
            reject=False, dynamic_vars=None):
    """
    Runs a single BDF

    Parameters
    ----------
    folder : str
        the folder where the bdf_filename is
    bdf_filename : str
        the bdf file to analyze
    debug : bool, optional
        run with debug logging (default=False)
    xref : bool / str, optional
        True : cross reference the model
        False  : don't cross reference the model
        'safe' : do safe cross referencing
    check : bool, optional
        validate cards for things like mass, area, etc.
    punch : bool, optional
        this is a PUNCH file (no executive/case control decks)
    cid : int / None, optional
        convert the model grids to an alternate coordinate system (default=None; no conversion)
    mesh_form : str, optional, {'combined', 'separate'}
        'combined' : interspersed=True
        'separate' : interspersed=False
    is_folder : bool, optional
        attach the test path and the folder to the bdf_filename
    print_stats : bool, optional
        get a nicely formatted message of all the cards in the model
    sum_load : bool, optional
        Sum the static loads (doesn't work for frequency-based loads)
    size : int, optional, {8, 16}
        The field width of the model
    is_double : bool, optional
        Is this a double precision model?
            True : size = 16
            False : six = {8, 16}
    reject : bool, optional
        True : all the cards are rejected
        False : the model is read
    nastran : str, optional
        the path to nastran (default=''; no analysis)
    post : int, optional
        the PARAM,POST,value to run
    dynamic vars : dict[str]=int / float / str / None
        support OpenMDAO syntax  %myvar; max variable length=7
    quiet : bool; default=False
        suppresses print messages
    dumplines: bool; default=False
        writes pyNastran_dump.bdf
    dictsort : bool; default=False
        writes pyNastran_dict.bdf
    dev : bool; default=False
        True : crashes if an Exception occurs
        False : doesn't crash; useful for running many tests
    """
    if not quiet:
        print('debug = %s' % debug)
    if dynamic_vars is None:
        dynamic_vars = {}

    # TODO: why do we need this?
    bdf_model = str(bdf_filename)
    if not quiet:
        print("bdf_model = %s" % bdf_model)
    if is_folder:
        bdf_model = os.path.join(test_path, folder, bdf_filename)

    assert os.path.exists(bdf_model), '%r doesnt exist' % bdf_model

    print("before read bdf, Memory usage: %s (Mb) " % memory_usage_psutil())
    #print('before read bdf, Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
    fem1 = BDF(debug=debug, log=None)
    if dynamic_vars:
        fem1.set_dynamic_syntax(dynamic_vars)

    fem1.log.info('starting fem1')
    sys.stdout.flush()
    fem2 = None
    diff_cards = []
    try:
        out_model = run_fem1(fem1, bdf_model, mesh_form, xref, punch, sum_load, size, precision, cid)
        fem2 = run_fem2(bdf_model, out_model, xref, punch, sum_load, size, precision, reject,
                        debug=debug, log=None)
        diff_cards = compare(fem1, fem2, xref=xref, check=check, print_stats=print_stats)

    except KeyboardInterrupt:
        sys.exit('KeyboardInterrupt...sys.exit()')
    #except IOError:
        #pass
    #except AttributeError:  # only temporarily uncomment this when running lots of tests
        #pass
    #except SyntaxError:  # only temporarily uncomment this when running lots of tests
        #pass
    #except AssertionError:  # only temporarily uncomment this when running lots of tests
        #pass
    except SystemExit:
        sys.exit('sys.exit...')
    except:
        #exc_type, exc_value, exc_traceback = sys.exc_info()
        #print("\n")
        traceback.print_exc(file=sys.stdout)
        #print msg
        print("-" * 80)
        raise

    print("-" * 80)
    return (fem1, fem2, diff_cards)

Example 72

Project: pyeq3
Source File: generateOutput.py
View license
def MissingOffset_A():
    specificBaseName = inspect.stack()[0][3]
    htmlTitle = 'Equation Missing An Offset'
    htmlText = '''
This illustrates the effect of fitting data with<br>
an offset to an equation that does not have one.<br>
<br>
This can be caused by experimental equipment<br>
introducing bias (such as a DC offset) during<br>
data acquisition.  Fitting the data to an<br>
equation with an offset will reveal the bias.'''
    thumbnailAnimationName = specificBaseName + '_small.gif'
    fullsizeAnimationName = specificBaseName + '_large.gif'
    
    print(specificBaseName)

    Ymax = 14.0
    Ymin = -4.0
    
    equation = pyeq3.Models_2D.Exponential.SimpleExponential('SSQABS')
    dimensionality = equation.GetDimensionality()
    equation.dataCache.DependentDataContainsZeroFlag = True # do not need relative error calculations, this flag turns them off
        
    fixedArray = numpy.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
    
    for i in range(0, 360):
        extraZerosString = ''
        if i < 100:
            extraZerosString = '0'
        if i < 10:
            extraZerosString = '00'
        
        equation.dataCache.allDataCacheDictionary['IndependentData'] = copy.copy(rawData[0])
        IndependentDataArray = equation.dataCache.allDataCacheDictionary['IndependentData']
        equation.dataCache.allDataCacheDictionary['DependentData'] = copy.copy(rawData[1])
        equation.dataCache.allDataCacheDictionary['Weights'] = []
        
        if not i % largeAnimationModulus:
            print(i, end=' ');sys.stdout.flush()
            offset = numpy.fabs(i-180) / 90.0
            equation.dataCache.allDataCacheDictionary['IndependentData'] =  numpy.array([fixedArray, numpy.ones(len(fixedArray))])
            IndependentDataArray = equation.dataCache.allDataCacheDictionary['IndependentData']
            equation.dataCache.allDataCacheDictionary['DependentData'] =  numpy.power(2.0, fixedArray + offset)
            DependentDataArray = equation.dataCache.allDataCacheDictionary['DependentData']
            equation.dataCache.allDataCacheDictionary['Weights'] =  []
            equation.Solve()
            equation.CalculateModelErrors(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
            equation.CalculateCoefficientAndFitStatistics()
            
            fileName = specificBaseName + '_ci' + extraZerosString + str(i) + '_large.png'
            SaveModelScatterConfidence(fileName, equation, Ymax, Ymin)
            
            # small animation, create fewer frames
            if not (i % smallAnimationModulus):
                graphWidth = smallAnimationGraphWidth
                graphHeight = smallAnimationGraphHeight
                fileName = specificBaseName + '_ci' + extraZerosString + str(i) + '_small.png'
                SaveModelScatterConfidence(fileName, equation, Ymax, Ymin)
                
    print()
    # convert all PNG files to GIF for gifsicle to use
    commandString = 'mogrify -format gif *png'
    print("Calling " + commandString)
    p = os.popen(commandString)
    p.close()
    
    # make small GIF animation
    commandString = 'gifsicle --loop --colors 256 --delay ' + str(smallAnimationDelayBetweenFrames) + " " + specificBaseName + '_ci*small.gif > ' + specificBaseName + '_small.gif'
    print("Calling " + commandString)
    p = os.popen(commandString)
    p.close()

    # make large GIF animation        
    commandString = 'gifsicle --loop --colors 256 --delay ' + str(largeAnimationDelayBetweenFrames) + " " + specificBaseName + '_ci*large.gif > ' + specificBaseName + '_large.gif'
    print("Calling " + commandString)
    p = os.popen(commandString)
    p.close()

    # remove unused files, saving the ones in this list
    stillImageFileNameList = [specificBaseName + '_ci180_large.png',
                              specificBaseName + '_ci270_large.png',
                              specificBaseName + '_ci000_large.png']
    currentDir = os.listdir('.')
    for filename in currentDir:
        if (-1 != filename.find('_ci')) and (-1 != filename.find('small')):
            os.remove(filename)
        if (-1 != filename.find('_ci')) and (-1 != filename.find('large.gif')):
            os.remove(filename)
        if (-1 != filename.find(specificBaseName)) and (-1 != filename.find('_large.png')) and (filename not in stillImageFileNameList):
            os.remove(filename)

    return [htmlTitle, htmlText, specificBaseName, stillImageFileNameList]

Example 73

Project: pyNastran
Source File: op2.py
View license
    def transform_gpforce_to_global(self, nids_all, nids_transform, i_transform, coords, xyz_cid0=None):
        """
        Transforms the ``data`` of GPFORCE results into the
        global coordinate system for those nodes with different output
        coordinate systems. Takes indicies and transformation matricies
        for nodes with their output in coordinate systems other than the
        global.

        Used in combination with ``BDF.get_displacement_index_transforms``
                          and/or ``BDF.get_displacement_index``

        Parameters
        ----------
        nids_transform : dict{int cid : int ndarray nds}
            Dictionary from coordinate id to corresponding node ids.

        i_transform : dict{int cid : int ndarray}
            Dictionary from coordinate id to index of the nodes in
            ``BDF.point_ids`` that their output (`CD`) in that
            coordinate system.

        coords : dict{int cid :Coord()}
            Dictionary of coordinate id to the coordinate object
            Use this if CD is only rectangular
            Use this if CD is not rectangular
        """
        disp_like_dicts = [
            # TODO: causes test_op2_solid_shell_bar_01_gpforce_xyz to fail
            #       even though it should be uncommented
            self.grid_point_forces,
        ]
        for disp_like_dict in disp_like_dicts:
            if not disp_like_dict:
                continue
            self.log.debug('-----------')
            for subcase, result in iteritems(disp_like_dict):
                self.log.debug('result.name = %s' % result.class_name)
                data = result.data

                # inode_xyz :
                #    the indices of the nodes in the model grid point list
                for cid, inode_xyz in iteritems(i_transform):
                    self.log.debug('cid = %s' % cid)
                    if cid in [-1, 0]:
                        continue
                    coord = coords[cid]
                    coord_type = coord.type
                    cid_transform = coord.beta()

                    is_global_cid = False
                    if np.diagonal(cid_transform).sum() == 3.:
                        is_global_cid = True
                    nids = np.array(nids_transform[cid])

                    #from pyNastran.op2.tables.ogf_gridPointForces.ogf_objects import RealGridPointForcesArray
                    #result = RealGridPointForcesArray()
                    if result.is_unique: # TODO: doesn't support preload
                        #self.node_element = zeros((self.ntimes, self.ntotal, 2), dtype='int32')
                        nids_all_gp = result.node_element[0, :, 0]

                        self.log.debug('nids_all_gp = %s' % list(nids_all_gp))
                        self.log.debug('nids = %s' % list(nids))

                        # the indices of the grid points that we're transforming
                        inode_gp = np.where(np.in1d(nids_all_gp, nids))[0]
                        self.log.debug('inode_gp = %s' % list(inode_gp))
                        nids_gp = nids_all_gp[inode_gp]
                        self.log.debug('nids_gp = %s' % list(nids_gp))
                        assert np.array_equal(np.unique(nids_gp), np.unique(nids)), 'nids_gp=%s nids=%s' % (nids_gp, nids)
                        self.log.debug('---------')
                        # the transformation index to go from xyz to the grid point forces
                        inode_gp_xyz = np.searchsorted(nids_all, nids_all_gp)
                        self.log.debug('nids_all = %s' % list(nids_all))
                        self.log.debug('nids_all_gp = %s' % list(nids_all_gp))
                        self.log.debug('inode_xyz = %s' % list(inode_xyz))
                        self.log.debug('inode_gp_xyz = %s' % list(inode_gp_xyz))
                        sys.stdout.flush()
                        assert len(inode_gp_xyz) == len(nids_all_gp), len(nids_all_gp)

                    else:
                        raise NotImplementedError(result)

                    if coord_type in ['CORD2R', 'CORD1R']:
                        if is_global_cid:
                            continue
                        #print('coord\n', coord)
                        #print(cid_transform)
                        #print('inode = %s' % inode)
                        #print('rectangular')
                        translation = data[:, inode_gp, :3]
                        rotation = data[:, inode_gp, 3:]
                        data[:, inode_gp, :3] = translation.dot(cid_transform)
                        data[:, inode_gp, 3:] = rotation.dot(cid_transform)
                    elif coord_type in ['CORD2C', 'CORD1C']:
                        #print('cylindrical')
                        if xyz_cid0 is None:
                            msg = ('xyz_cid is required for cylindrical '
                                   'coordinate transforms')
                            raise RuntimeError(msg)
                        xyzi = xyz_cid0[inode_xyz, :]
                        rtz_cid = coord.xyz_to_coord_array(xyzi)
                        theta_xyz = rtz_cid[inode_xyz, 1]
                        theta = rtz_cid[inode_gp_xyz, 1]
                        self.log.debug('coord\n%s' % coord)
                        self.log.debug(cid_transform)
                        #print('theta_xyz = %s' % list(theta_xyz))
                        #print('theta     = %s' % list(theta))

                        for itime in range(data.shape[0]):
                            #inode = np.where(np.in1d(nids_all, 2))[0]
                            #print('start', data[itime, inode_gp, :3])
                            translation = data[itime, inode_gp, :3]
                            rotation = data[itime, inode_gp, 3:]
                            if 0:
                                # horrible
                                translation = coord.coord_to_xyz_array(data[itime, inode_gp, :3])
                                rotation = coord.coord_to_xyz_array(data[itime, inode_gp, 3:])
                                data[itime, inode_gp, :3] = translation.dot(cid_transform)
                                data[itime, inode_gp, 3:] = rotation.dot(cid_transform)
                            elif 0:
                                # expectedly horrible
                                translation[:, 1] += theta
                                rotation[:, 1] += theta
                                translation = coord.coord_to_xyz_array(data[itime, inode_gp, :3])
                                rotation = coord.coord_to_xyz_array(data[itime, inode_gp, 3:])
                                data[itime, inode_gp, :3] = translation.dot(cid_transform)
                                data[itime, inode_gp, 3:] = rotation.dot(cid_transform)
                            elif 0:
                                # actually not bad...
                                translation = coord.xyz_to_coord_array(translation)
                                rotation = coord.xyz_to_coord_array(rotation)
                                translation[:, 1] += theta
                                rotation[:, 1] += theta
                                translation = coord.coord_to_xyz_array(translation)
                                rotation = coord.coord_to_xyz_array(rotation)
                                data[itime, inode_gp, :3] = translation.dot(cid_transform)
                                data[itime, inode_gp, 3:] = rotation.dot(cid_transform)
                            elif 0:
                                # not that bad, worse than previous
                                translation = coord.xyz_to_coord_array(translation)
                                rotation = coord.xyz_to_coord_array(rotation)
                                translation[:, 1] += theta
                                rotation[:, 1] += theta
                                translation = coord.coord_to_xyz_array(translation)
                                rotation = coord.coord_to_xyz_array(rotation)
                                data[itime, inode_gp, :3] = translation.dot(cid_transform.T)
                                data[itime, inode_gp, 3:] = rotation.dot(cid_transform.T)
                            elif 1:
                                # doesn't work...actually pretty close
                                data[itime, inode_gp, :3] = translation.dot(cid_transform)
                                data[itime, inode_gp, 3:] = rotation.dot(cid_transform)
                            elif 0:
                                # very, very close
                                data[itime, inode_gp, :3] = translation.dot(cid_transform.T)
                                data[itime, inode_gp, 3:] = rotation.dot(cid_transform.T)
                            elif 0:
                                # is this just the same as one of the previous?
                                data[itime, inode_gp, :3] = cid_transform.T.dot(translation.T).T
                                data[itime, inode_gp, 3:] = cid_transform.T.dot(rotation.T).T
                            else:
                                raise RuntimeError('no option selected...')

                            #if is_global_cid:
                                #data[itime, inode, :3] = translation
                                #data[itime, inode, 3:] = rotation
                                #continue
                            #data[itime, inode_gp, :3] = translation.dot(cid_transform)
                            #data[itime, inode_gp, 3:] = rotation.dot(cid_transform)
                            #print('end', data[itime, inode_gp, :3])

                    elif coord_type in ['CORD2S', 'CORD1S']:
                        self.log.debug('spherical')
                        if xyz_cid0 is None:
                            msg = ('xyz_cid is required for spherical '
                                   'coordinate transforms')
                            raise RuntimeError(msg)
                        xyzi = xyz_cid0[inode, :]
                        rtp_cid = coord.xyz_to_coord_array(xyzi)
                        theta = rtp_cid[:, 1]
                        phi = rtp_cid[:, 2]
                        for itime in range(data.shape[0]):
                            translation = data[itime, inode, :3]
                            rotation = data[itime, inode, 3:]
                            #if 0:
                                #translation[:, 1] += theta
                                #translation[:, 2] += phi
                                #rotation[:, 1] += theta
                                #rotation[:, 2] += phi
                            translation = coord.coord_to_xyz_array(data[itime, inode, :3])
                            rotation = coord.coord_to_xyz_array(data[itime, inode, 3:])
                            #if is_global_cid:
                                #data[itime, inode, :3] = translation
                                #data[itime, inode, 3:] = rotation
                                #continue
                            data[itime, inode, :3] = translation.dot(cid_transform)
                            data[itime, inode, 3:] = rotation.dot(cid_transform)
                    else:
                        raise RuntimeError(coord)
        self.log.debug('-----------')
        return

Example 74

Project: caringcaribou
Source File: xcp.py
View license
def xcp_memory_dump(args):
    """
    Performs a memory dump to file or stdout via XCP.

    :param args: A namespace containing src, dst, start, length and f
    """
    send_arb_id = int_from_str_base(args.src)
    rcv_arb_id = int_from_str_base(args.dst)
    start_address = int_from_str_base(args.start)
    length = int_from_str_base(args.length)
    dump_file = args.f
    # FIXME max size is 0xfc for test board
    max_segment_size = 0x7

    global byte_counter, bytes_left, dump_complete, segment_counter, timeout_start
    # Timeout timer
    dump_complete = False
    # Counters for data length
    byte_counter = 0
    segment_counter = 0

    def handle_upload_reply(msg):
        global byte_counter, bytes_left, dump_complete, timeout_start, segment_counter
        if msg.arbitration_id != rcv_arb_id:
            return
        if msg.data[0] == 0xfe:
            decode_xcp_error(msg)
            return
        if msg.data[0] == 0xff:
            # Reset timeout timer
            timeout_start = datetime.now()
            # Calculate end index of data to handle
            end_index = min(8, bytes_left + 1)

            if dump_file:
                with open(dump_file, "ab") as outfile:
                    outfile.write(bytearray(msg.data[1:end_index]))
            else:
                print(" ".join(["{0:02x}".format(i) for i in msg.data[1:end_index]]))
            # Update counters
            byte_counter += 7
            bytes_left -= 7  # FIXME Hmm
            if bytes_left < 1:
                if dump_file:
                    print "\rDumping segment {0} ({1} b, 0 b left)".format(segment_counter, length)
                print("Dump complete!")
                dump_complete = True
            elif byte_counter > max_segment_size-1:
                # Dump another segment
                segment_counter += 1
                if dump_file:
                    # Print progress
                    print "\rDumping segment {0} ({1} b, {2} b left)".format(
                        segment_counter, ((segment_counter+1)*max_segment_size + byte_counter), bytes_left),
                    stdout.flush()

                byte_counter = 0
                can_wrap.send_single_message_with_callback([0xf5, min(max_segment_size, bytes_left)],
                                                           handle_upload_reply)

    def handle_set_mta_reply(msg):
        if msg.arbitration_id != rcv_arb_id:
            return
        if msg.data[0] == 0xfe:
            decode_xcp_error(msg)
            return
        if msg.data[0] == 0xff:
            print("Set MTA acked")
            print("Dumping data:")
            # Initiate dumping
            if dump_file:
                print "\rDumping segment 0",
            can_wrap.send_single_message_with_callback([0xf5, min(max_segment_size, bytes_left)], handle_upload_reply)
        else:
            print("Unexpected reply: {0}\n".format(msg))

    def handle_connect_reply(msg):
        if msg.arbitration_id != rcv_arb_id:
            return
        if msg.data[0] == 0xfe:
            decode_xcp_error(msg)
            return
        if msg.data[0] == 0xff:
            print "Connected: Using",
            # Check connect reply to see whether to reverse byte order for MTA
            msb_format = msg.data[2] & 1
            if msb_format:
                print("Motorola format (MSB lower)")
            else:
                print("Intel format (LSB lower)")
                r.reverse()
            can_wrap.send_single_message_with_callback(
                [0xf6, 0x00, 0x00, 0x00, r[0], r[1], r[2], r[3]],
                handle_set_mta_reply)
        else:
            print("Unexpected connect reply: {0}\n".format(msg))

    # Calculate address bytes (4 bytes, least significant first)
    r = []
    n = start_address
    bytes_left = length
    # Calculate start address (r is automatically reversed after connect if needed)
    n &= 0xFFFFFFFF
    for i in range(4):
        r.append(n & 0xFF)
        n >>= 8
    # Make sure dump_file can be opened if specified (clearing it if it already exists)
    if dump_file:
        try:
            with open(dump_file, "w") as tmp:
                pass
        except IOError as e:
            print("Error when opening dump file:\n\n{0}".format(e))
            return
    # Initialize
    with CanActions(arb_id=send_arb_id) as can_wrap:
        print("Attempting XCP memory dump")
        # Connect and prepare for dump
        can_wrap.send_single_message_with_callback([0xff], handle_connect_reply)
        # Idle timeout handling
        timeout_start = datetime.now()
        while not dump_complete and datetime.now() - timeout_start < timedelta(seconds=3):
            pass
        if not dump_complete:
            print("\nERROR: Dump ended due to idle timeout")

Example 75

Project: pyomo
Source File: pyro_mip_server.py
View license
    def process(self, data):
        self._worker_task_return_queue = self._current_task_client
        data = pyutilib.misc.Bunch(**data)

        if hasattr(data, 'action') and \
           data.action == 'Pyomo_pyro_mip_server_shutdown':
            print("Received shutdown request")
            self._worker_shutdown = True
            return

        time_start = time.time()
        with pyutilib.services.TempfileManager.push():
            #
            # Construct the solver on this end, based on the input
            # type stored in "data.opt".  This is slightly more
            # complicated for asl-based solvers, whose real executable
            # name is stored in data.solver_options["solver"].
            #
            with SolverFactory(data.opt) as opt:

                if opt is None:
                    self._worker_error = True
                    return TaskProcessingError("Problem constructing solver `"
                                               +data.opt+"'")

                # here is where we should set any options required by
                # the solver, available as specific attributes of the
                # input data object.
                solver_options = data.solver_options
                del data.solver_options
                for key,value in solver_options.items():
                    setattr(opt.options,key,value)

                problem_filename_suffix = os.path.split(data.filename)[1]
                temp_problem_filename = \
                    pyutilib.services.TempfileManager.\
                    create_tempfile(suffix="."+problem_filename_suffix)

                with open(temp_problem_filename, 'w') as f:
                    f.write(data.file)

                if data.warmstart_filename is not None:
                    warmstart_filename_suffix = \
                        os.path.split(data.warmstart_filename)[1]
                    temp_warmstart_filename = \
                        pyutilib.services.TempfileManager.\
                        create_tempfile(suffix="."+warmstart_filename_suffix)
                    with open(temp_warmstart_filename, 'w') as f:
                        f.write(data.warmstart_file)
                    assert opt.warm_start_capable()
                    assert (('warmstart' in data.kwds) and \
                            data.kwds['warmstart'])
                    data.kwds['warmstart_file'] = temp_warmstart_filename

                now = datetime.datetime.now()
                if self._verbose:
                    print(str(now) + ": Applying solver="+data.opt
                          +" to solve problem="+temp_problem_filename)
                    sys.stdout.flush()
                results = opt.solve(temp_problem_filename,
                                    **data.kwds)
                assert results._smap_id is None
                # NOTE: This results object contains solutions,
                # because no model is provided (just a model file).
                # Also, the results._smap_id value is None.

        results.pyomo_solve_time = time.time()-time_start

        now = datetime.datetime.now()
        if self._verbose:
            print(str(now) + ": Solve completed - number of solutions="
                  +str(len(results.solution)))
            sys.stdout.flush()

        # PYTHON3 / PYRO4 Fix
        # The default serializer in Pyro4 is not pickle and does not
        # support user defined types (e.g., the results object).
        # Therefore, we pickle the results object before sending it
        # over the wire so the user does not need to change the Pyro
        # serializer.
        results = pickle.dumps(results, protocol=pickle.HIGHEST_PROTOCOL)

        if using_pyro4:
            #
            # The standard bytes object returned by pickle.dumps must be
            # converted to base64 to avoid errors sending over the
            # wire with Pyro4. Also, the base64 bytes must be wrapped
            # in a str object to avoid a different set of Pyro4 errors
            # related to its default serializer (Serpent)
            if six.PY3:
                results = str(base64.encodebytes(results))
            else:
                results = base64.encodestring(results)

        return results

Example 76

Project: pyNastran
Source File: test_op2.py
View license
def run_op2(op2_filename, make_geom=False, write_bdf=False,
            write_f06=True, write_op2=False, write_xlsx=False,
            is_mag_phase=False, is_sort2=False,
            delete_f06=False,
            subcases=None, exclude=None, short_stats=False,
            compare=True, debug=False, binary_debug=False,
            quiet=False, check_memory=False, stop_on_failure=True, dev=False):
    """
    Runs an OP2

    Parameters
    ----------
    op2_filename : str
        path of file to test
    make_geom : bool; default=False
        should the GEOMx, EPT, MPT, DYNAMIC, DIT, etc. tables be read
    write_bdf : bool; default=False
        should a BDF be written based on the geometry tables
    write_f06 : bool; default=True
        should an F06 be written based on the results
    write_op2 : bool; default=False
        should an OP2 be written based on the results
    write_xlsx : bool; default=False
        should an XLSX be written based on the results
    is_mag_phase : bool; default=False
        False : write real/imag results
        True : write mag/phase results
        For static results, does nothing
    is_sort2 : bool; default=False
        False : writes "transient" data is SORT1
        True : writes "transient" data is SORT2
    delete_f06 : bool; default=False
        deletes the F06 (assumes write_f06 is True)
    subcases : List[int, ...]; default=None
        limits subcases to specified values; default=None -> no limiting
    exclude : List[str, ...]; default=None
        limits result types; (remove what's listed)
    short_stats : bool; default=False
        print a short version of the op2 stats
    compare : bool
        True : compares vectorized result to slow vectorized result
        False : doesn't run slow vectorized result
    debug : bool; default=False
        dunno???
    binary_debug : bool; default=False
        creates a very cryptic developer debug file showing exactly what was parsed
    quiet : bool; default=False
        dunno???
    stop_on_failure : bool; default=True
        is this used???
    """
    op2 = None
    op2_nv = None
    if subcases is None:
        subcases = []
    if exclude is None:
        exclude = []
    assert '.op2' in op2_filename.lower(), 'op2_filename=%s is not an OP2' % op2_filename
    is_passed = False

    fname_base = os.path.splitext(op2_filename)[0]
    bdf_filename = fname_base + '.test_op2.bdf'

    if isinstance(subcases, string_types):
        if '_' in subcases:
            subcases = [int(i) for i in subcases.split('_')]
        else:
            subcases = [int(subcases)]
    if not quiet:
        print('subcases = %s' % subcases)

    debug_file = None
    model = os.path.splitext(op2_filename)[0]
    if binary_debug or write_op2:
        debug_file = model + '.debug.out'
    #print('debug_file = %r' % debug_file, os.getcwd())

    if make_geom and not is_geom:
        raise RuntimeError('make_geom=%s is not supported' % make_geom)
    if make_geom:
        op2 = OP2Geom(debug=debug)
        op2_nv = OP2Geom(debug=debug, debug_file=debug_file)
        op2_bdf = OP2Geom(debug=debug)
        op2_bdf.set_error_storage(nparse_errors=0, stop_on_parsing_error=True,
                                  nxref_errors=0, stop_on_xref_error=True)
    else:
        op2 = OP2(debug=debug)
        op2_nv = OP2(debug=debug, debug_file=debug_file) # have to double write this until
    op2_nv.use_vector = False

    op2.set_subcases(subcases)
    op2_nv.set_subcases(subcases)
    op2.remove_results(exclude)
    op2_nv.remove_results(exclude)

    if is_memory and check_memory:
        if is_linux: # linux
            kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
        else: # windows
            kb = get_memory_usage() / 1024
        mb = kb / 1024.
        print("Memory usage start: %s (KB); %.2f (MB)" % (kb, mb))

    try:
        #op2.read_bdf(op2.bdf_filename, includeDir=None, xref=False)
        if compare:
            op2_nv.read_op2(op2_filename)
        op2.read_op2(op2_filename)

        #op2a.get_op2_stats()
        if quiet:
            op2.get_op2_stats()
            op2.object_attributes()
            op2.object_methods()
        else:
            print("---stats for %s---" % op2_filename)
            print(op2.get_op2_stats(short=short_stats))
            op2.print_subcase_key()

        if write_bdf:
            assert make_geom, 'make_geom=%s' % make_geom
            op2._nastran_format = 'msc'
            op2.executive_control_lines = ['CEND\n']
            op2.write_bdf(bdf_filename, size=8)
            op2.log.debug('bdf_filename = %s' % bdf_filename)
            try:
                op2_bdf.read_bdf(bdf_filename)
            except:
                if dev and len(op2_bdf.card_count) == 0:
                    pass
                else:
                    raise
            #os.remove(bdf_filename)
        if compare:
            assert op2 == op2_nv

        if is_memory and check_memory:
            if is_linux: # linux
                kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
            else: # windows
                kb = get_memory_usage() / 1024
            mb = kb / 1024.
            print("Memory usage     end: %s (KB); %.2f (MB)" % (kb, mb))

        if write_f06:
            op2.write_f06(model + '.test_op2.f06', is_mag_phase=is_mag_phase,
                          is_sort1=not is_sort2, quiet=quiet, repr_check=True)
            if delete_f06:
                try:
                    os.remove(model + '.test_op2.f06')
                except:
                    pass

        # we put it down here so we don't blame the dataframe for real errors
        if is_pandas:
            op2.build_dataframe()
        #if compare:
            #op2_nv.build_dataframe()

        if write_op2:
            model = os.path.splitext(op2_filename)[0]
            op2.write_op2(model + '.test_op2.op2', is_mag_phase=is_mag_phase)
            if delete_f06:
                try:
                    os.remove(model + '.test_op2.op2')
                except:
                    pass

        if write_xlsx:
            model = os.path.splitext(op2_filename)[0]
            op2.write_xlsx(model + '.test_op2.xlsx', is_mag_phase=is_mag_phase)
            if delete_f06:
                try:
                    os.remove(model + '.test_op2.xlsx')
                except:
                    pass

        if is_memory and check_memory:
            op2 = None
            del op2_nv
            if is_linux: # linux
                kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
            else: # windows
                kb = get_memory_usage() / 1024
            mb = kb / 1024.
            print("Memory usage cleanup: %s (KB); %.2f (MB)" % (kb, mb))


        #table_names_f06 = parse_table_names_from_F06(op2.f06FileName)
        #table_names_op2 = op2.getTableNamesFromOP2()
        #print("subcases = ", op2.subcases)

        #if table_names_f06 != table_names_op2:
            #msg = 'table_names_f06=%s table_names_op2=%s' % (table_names_f06, table_names_op2)
            #raise RuntimeError(msg)
        #op2.case_control_deck.sol = op2.sol
        #print(op2.case_control_deck.get_op2_data())
        #print(op2.case_control_deck.get_op2_data())
        is_passed = True
    except KeyboardInterrupt:
        sys.stdout.flush()
        print_exc(file=sys.stdout)
        sys.stderr.write('**file=%s\n' % op2_filename)
        sys.exit('keyboard stop...')
    #except SortCodeError: # inherits from Runtime; comment this
        #is_passed = True

    #except RuntimeError: # the op2 is bad, not my fault; comment this
        #is_passed = True
        #if stop_on_failure:
            #raise
        #else:
            #is_passed = True
    #except RuntimeError:
        #pass
    #except ValueError:
        #pass
    #except FortranMarkerError:
        #pass
    except IOError: # missing file; this block should be uncommented
        #if stop_on_failure:
            #raise
        if not dev:
            raise
        is_passed = True
    #except UnicodeDecodeError:  # this block should be commented
        #is_passed = True
    #except NotImplementedError:  # this block should be commented
        #is_passed = True
    except FatalError:  # this block should be commented
        #if stop_on_failure:
            #raise
        if not dev:
            raise
        is_passed = True
    #except KeyError:  # this block should be commented
        #is_passed = True
    #except DeviceCodeError:  # this block should be commented
        #is_passed = True
    #except AssertionError:  # this block should be commented
        #is_passed = True
    #except RuntimeError: #invalid analysis code; this block should be commented
        #is_passed = True
    except SystemExit:
        #print_exc(file=sys.stdout)
        #sys.exit('stopping on sys.exit')
        raise
    #except NameError:  # variable isnt defined
    #    if stop_on_failure:
    #        raise
    #    else:
    #        is_passed = True
    #except IndexError: # this block should be commented
        #is_passed = True
    #except SyntaxError: #Param Parse; this block should be commented
        #if stop_on_failure:
            #raise
        #is_passed = True
    except:
        #print(e)
        if stop_on_failure:
            raise
        else:
            print_exc(file=sys.stdout)
            is_passed = False

    return op2, is_passed

Example 77

Project: catkin_tools
Source File: controllers.py
View license
    def run(self):
        queued_jobs = []
        active_jobs = []
        completed_jobs = {}
        failed_jobs = []
        warned_jobs = []

        cumulative_times = dict()
        start_times = dict()
        active_stages = dict()

        start_time = self.pre_start_time or time.time()
        last_update_time = time.time()

        # If the status rate is too low, just disable it
        if self.active_status_rate < 1E-3:
            self.show_active_status = False
        else:
            update_duration = 1.0 / self.active_status_rate

        # Disable the wide log padding if the status is disabled
        if not self.show_active_status:
            disable_wide_log()

        while True:
            # Check if we should stop
            if not self.keep_running:
                wide_log(clr('[{}] An internal error occurred!').format(self.label))
                return

            # Write a continuously-updated status line
            if self.show_active_status:

                # Try to get an event from the queue (non-blocking)
                try:
                    event = self.event_queue.get(False)
                except Empty:
                    # Determine if the status should be shown based on the desired
                    # status rate
                    elapsed_time = time.time() - last_update_time
                    show_status_now = elapsed_time > update_duration

                    if show_status_now:
                        # Print live status (overwrites last line)
                        status_line = clr('[{} {} s] [{}/{} complete] [{}/{} jobs] [{} queued]').format(
                            self.label,
                            format_time_delta_short(time.time() - start_time),
                            len(completed_jobs),
                            len(self.jobs),
                            job_server.running_jobs(),
                            job_server.max_jobs(),
                            len(queued_jobs) + len(active_jobs) - len(active_stages)
                        )

                        # Show failed jobs
                        if len(failed_jobs) > 0:
                            status_line += clr(' [@[email protected]{rf}{}@| @{rf}[email protected]|]').format(len(failed_jobs))

                        # Check load / mem
                        if not job_server.load_ok():
                            status_line += clr(' [@[email protected]{rf}High [email protected]|]')
                        if not job_server.mem_ok():
                            status_line += clr(' [@[email protected]{rf}Low [email protected]|]')

                        # Add active jobs
                        if len(active_jobs) == 0:
                            status_line += clr(' @/@[email protected]{kf}Waiting for [email protected]|')
                        else:
                            active_labels = []

                            for j, (s, t, p) in active_stages.items():
                                d = format_time_delta_short(cumulative_times[j] + time.time() - t)
                                if p == '':
                                    active_labels.append(clr('[{}:{} - {}]').format(j, s, d))
                                else:
                                    active_labels.append(clr('[{}:{} ({}%) - {}]').format(j, s, p, d))

                            status_line += ' ' + ' '.join(active_labels)

                        # Print the status line
                        # wide_log(status_line)
                        wide_log(status_line, rhs='', end='\r')
                        sys.stdout.flush()

                        # Store this update time
                        last_update_time = time.time()
                    else:
                        time.sleep(max(0.0, min(update_duration - elapsed_time, 0.01)))

                    # Only continue when no event was received
                    continue
            else:
                # Try to get an event from the queue (blocking)
                try:
                    event = self.event_queue.get(True)
                except Empty:
                    break

            # A `None` event is a signal to terminate
            if event is None:
                break

            # Handle the received events
            eid = event.event_id

            if 'JOB_STATUS' == eid:
                queued_jobs = event.data['queued']
                active_jobs = event.data['active']
                completed_jobs = event.data['completed']

                # Check if all jobs have finished in some way
                if all([len(event.data[t]) == 0 for t in ['pending', 'queued', 'active']]):
                    break

            elif 'STARTED_JOB' == eid:
                cumulative_times[event.data['job_id']] = 0.0
                wide_log(clr('Starting >>> {:<{}}').format(
                    event.data['job_id'],
                    self.max_jid_length))

            elif 'FINISHED_JOB' == eid:
                duration = format_time_delta(cumulative_times[event.data['job_id']])

                if event.data['succeeded']:
                    wide_log(clr('Finished <<< {:<{}} [ {} ]').format(
                        event.data['job_id'],
                        self.max_jid_length,
                        duration))
                else:
                    failed_jobs.append(event.data['job_id'])
                    wide_log(clr('Failed <<< {:<{}} [ {} ]').format(
                        event.data['job_id'],
                        self.max_jid_length,
                        duration))

            elif 'ABANDONED_JOB' == eid:
                # Create a human-readable reason string
                if 'DEP_FAILED' == event.data['reason']:
                    direct = event.data['dep_job_id'] == event.data['direct_dep_job_id']
                    if direct:
                        reason = clr('Depends on failed job {}').format(event.data['dep_job_id'])
                    else:
                        reason = clr('Depends on failed job {} via {}').format(
                            event.data['dep_job_id'],
                            event.data['direct_dep_job_id'])
                elif 'PEER_FAILED' == event.data['reason']:
                    reason = clr('Unrelated job failed')
                elif 'MISSING_DEPS' == event.data['reason']:
                    reason = clr('Depends on unknown jobs: {}').format(
                        ', '.join([clr('@!{}@|').format(jid) for jid in event.data['dep_ids']]))

                wide_log(clr('Abandoned <<< {:<{}} [ {} ]').format(
                    event.data['job_id'],
                    self.max_jid_length,
                    reason))

            elif 'STARTED_STAGE' == eid:
                active_stages[event.data['job_id']] = [event.data['stage_label'], event.time, '']
                start_times[event.data['job_id']] = event.time

                if self.show_stage_events:
                    wide_log(clr('Starting >> {}:{}').format(
                        event.data['job_id'],
                        event.data['stage_label']))

            elif 'STAGE_PROGRESS' == eid:
                active_stages[event.data['job_id']][2] = event.data['percent']

            elif 'SUBPROCESS' == eid:
                if self.show_stage_events:
                    wide_log(clr('Subprocess > {}:{} `{}`').format(
                        event.data['job_id'],
                        event.data['stage_label'],
                        event.data['stage_repro']))

            elif 'FINISHED_STAGE' == eid:
                # Get the stage duration
                duration = event.time - start_times[event.data['job_id']]
                cumulative_times[event.data['job_id']] += duration

                # This is no longer the active stage for this job
                del active_stages[event.data['job_id']]

                header_border = None
                header_title = None
                lines = []
                footer_title = None
                footer_border = None

                # Generate headers / borders for output
                if event.data['succeeded']:
                    footer_title = clr(
                        'Finished << {}:{}').format(
                            event.data['job_id'],
                            event.data['stage_label'])

                    if len(event.data['stderr']) > 0:
                        # Mark that this job warned about something
                        if event.data['job_id'] not in warned_jobs:
                            warned_jobs.append(event.data['job_id'])

                        # Output contains warnings
                        header_border = clr('@[email protected]{yf}' + '_' * (terminal_width() - 1) + '@|')
                        header_title = clr(
                            'Warnings << {}:{} {}').format(
                                event.data['job_id'],
                                event.data['stage_label'],
                                event.data['logfile_filename'])
                        footer_border = clr('@{yf}' + '.' * (terminal_width() - 1) + '@|')
                    else:
                        # Normal output, no warnings
                        header_title = clr(
                            'Output << {}:{} {}').format(
                                event.data['job_id'],
                                event.data['stage_label'],
                                event.data['logfile_filename'])

                    # Don't print footer title
                    if not self.show_stage_events:
                        footer_title = None
                else:
                    # Output contains errors
                    header_border = clr('@[email protected]{rf}' + '_' * (terminal_width() - 1) + '@|')
                    header_title = clr(
                        'Errors << {}:{} {}').format(
                            event.data['job_id'],
                            event.data['stage_label'],
                            event.data['logfile_filename'])
                    footer_border = clr('@{rf}' + '.' * (terminal_width() - 1) + '@|')

                    footer_title = clr(
                        'Failed << {}:{:<{}} [ Exited with code {} ]').format(
                            event.data['job_id'],
                            event.data['stage_label'],
                            max(0, self.max_jid_length - len(event.data['job_id'])),
                            event.data['retcode'])

                if self.show_buffered_stdout:
                    if len(event.data['interleaved']) > 0:
                        lines = [
                            l
                            for l in event.data['interleaved'].splitlines(True)
                            if (self.show_compact_io is False or len(l.strip()) > 0)
                        ]
                    else:
                        header_border = None
                        header_title = None
                        footer_border = None
                elif self.show_buffered_stderr:
                    if len(event.data['stderr']) > 0:
                        lines = [
                            l
                            for l in event.data['stderr'].splitlines(True)
                            if (self.show_compact_io is False or len(l.strip()) > 0)
                        ]
                    else:
                        header_border = None
                        header_title = None
                        footer_border = None

                if len(lines) > 0:
                    if self.show_repro_cmd:
                        if event.data['repro'] is not None:
                            lines.append(clr('@[email protected]{kf}{}@|\n').format(event.data['repro']))

                    # Print the output
                    if header_border:
                        wide_log(header_border)
                    if header_title:
                        wide_log(header_title)
                    if len(lines) > 0:
                        wide_log(''.join(lines), end='\r')
                    if footer_border:
                        wide_log(footer_border)
                    if footer_title:
                        wide_log(footer_title)

            elif 'STDERR' == eid:
                if self.show_live_stderr and len(event.data['data']) > 0:
                    wide_log(self.format_interleaved_lines(event.data), end='\r')

            elif 'STDOUT' == eid:
                if self.show_live_stdout and len(event.data['data']) > 0:
                    wide_log(self.format_interleaved_lines(event.data), end='\r')

            elif 'MESSAGE' == eid:
                wide_log(event.data['msg'])

        # Print the full summary
        if self.show_full_summary:
            self.print_exec_summary(completed_jobs, warned_jobs, failed_jobs)

        # Print a compact summary
        if self.show_summary or self.show_full_summary:
            self.print_compact_summary(completed_jobs, warned_jobs, failed_jobs)

        # Print final runtime
        wide_log(clr('[{}] Runtime: {} total.').format(
            self.label,
            format_time_delta(time.time() - start_time)))

Example 78

Project: datacommons
Source File: matching.py
View license
    @transaction.commit_on_success
    def handle(self, *args, **options):

        if not (self.subject and self.match and self.match_table_prefix):
            transaction.rollback()
            raise CommandError('You must define self.subject, self.match and self.match_table_prefix in the subclass.')

        count = self.subject.count()
        begin_at = int(options['begin_at_count'])
        table = options['table'] or self.generate_table_name()

        cursor = connections['default'].cursor()
        if options.get('table'):
            print 'Resuming with table {0}'.format(table)
        else:
            try:
                print 'Creating table {0}'.format(table)
                cursor.execute('create table {0} (id serial primary key, subject_id {1} not null, match_id {2}, confidence numeric not null, metadata_confidence numeric)'.format(table, self.subject_id_type, self.match_id_type))
                transaction.commit()
            except DatabaseError:
                transaction.rollback()
                raise CommandError('Database table exists. Wait a minute and try again.')

        try:
            for i, subject in enumerate(self.subject.all()[begin_at-1:]):
                log_msg = ''

                print u"{0}/{1}: {2}".format(begin_at+i, count, getattr(subject, self.subject_name_attr))
                #pre-process subject name
                subject_raw_name = self.preprocess_subject_name(getattr(subject, self.subject_name_attr))

                try:
                    subject_name = self.name_cleaver(subject_raw_name).parse()

                    if self.name_processing_failed(subject_name):
                        continue
                except:
                    continue
                finally:
                    transaction.rollback()

                # search match entities
                potential_matches = self.get_potential_matches_for_subject(subject_name, subject)
                log_msg += 'Potential matches: {0}; '.format(potential_matches.count())

                matches_we_like = self.cull_match_pool(subject_name, subject, potential_matches)

                confidence_levels = matches_we_like.keys()
                if len(confidence_levels):
                    confidence_levels.sort()

                    # this will insert all the matches at the highest confidence level
                    for match, metadata_confidence in matches_we_like[confidence_levels[-1]]:
                        self.insert_match(cursor, table, match, subject, confidence_levels[-1], metadata_confidence)

                        if options['do_post_insert']:
                            self.post_insert()

                        transaction.commit()
                        log_msg += 'Committed.'

                elif options['insert_non_matches']:
                    # a confidence of -1 means no matches were found
                    self.insert_match(cursor, table, None, subject, -1)
                    transaction.commit()
                    log_msg += 'Committed no-match.'

                print log_msg
                sys.stdout.flush()

                # make sure Django doesn't leak memory due to DB query logging
                django.db.reset_queries()


        except KeyboardInterrupt:
            transaction.commit()
            print '\nTo resume, run:'
            print './manage.py {0} -b {1} -t {2}{3}'.format(self.__module__.split('.')[-1], begin_at+i, table, ' -n' if options['insert_non_matches'] else '')

        print 'Done matching. Find your results in {0}.'.format(table)
        print ''
        print ''

        print 'Statistics:'
        print 'Total subjects: {}'.format(self.subject.count())
        print ''

        cursor.execute('select count(*) from (select subject_id from {} group by subject_id having count(*) > 1)x'.format(table))
        number_of_multiples = cursor.fetchone()[0]
        print 'Subject rows with more than one match: {}'.format(number_of_multiples)
        print ''

        cursor.execute('select avg(count)::integer from (select count(*) from {} group by subject_id having count(*) > 1)x'.format(table))
        avg_multi_match = cursor.fetchone()[0]
        print 'Average number of over-matches per subject: {}'.format(avg_multi_match)
        print ''

        cursor.execute('select confidence, count(*) from {} group by confidence order by confidence desc'.format(table))
        confidence_distrib = cursor.fetchall()

        print 'Confidence distribution:'
        for confidence in confidence_distrib:
            if confidence[0] > 0:
                print '  {}: {}'.format(*confidence)
            else:
                print ' {}: {}'.format(*confidence)

        # flush memcached to avoid bugs between script runs where MC gives back stale objects
        self.mc.flush_all()

Example 79

Project: deep-pwning
Source File: evaluator.py
View license
    def run(self, input_dict):
        x = input_dict["x"]
        y_ = input_dict["y_"]
        y_conv = input_dict["y_conv"]
        keep_prob = input_dict["keep_prob"]
        train_data = input_dict["train_data"]
        train_labels = input_dict["train_labels"]
        test_data = input_dict["test_data"]
        test_labels = input_dict["test_labels"]
        validation_data = input_dict["validation_data"]
        validation_labels = input_dict["validation_labels"]
        num_epochs = input_dict["num_epochs"]
        train_size = input_dict["train_size"]

        batch_size = self.config.getint('main', 'batch_size')
        checkpoint_path = self.config.get('main', 'checkpoint_path')
        num_classes = self.config.getint('main', 'num_classes')
        eval_frequency = self.config.getint('main', 'eval_frequency')

        utils.ensure_dir(os.path.dirname(checkpoint_path))
        start_time = time.time()

        with tf.Session() as sess:
            tf.initialize_all_variables().run()
            print('Initialized!')

            if not self.cmd_args.restore_checkpoint:
                print('No checkpoint to load, training model from scratch...')

                if self.cmd_args.test:
                    iter_range = xrange(1)
                else:
                    iter_range = xrange(int(num_epochs * train_size) // batch_size)

                for step in iter_range:
                    offset = (step * batch_size) % (train_size - batch_size)
                    batch_data = train_data[offset:(offset + batch_size), ...]
                    batch_labels = train_labels[offset:(offset + batch_size)]

                    feed_dict = {
                        x: batch_data, 
                        y_: batch_labels,
                        keep_prob: 0.5
                    }

                    _, l, lr, predictions = sess.run(
                        [self.optimizer, self.loss, self.learning_rate, y_conv], feed_dict=feed_dict)

                    if step % eval_frequency == 0:
                        if not self.cmd_args.test:
                            path = self.saver.save(sess, checkpoint_path)
                            print("Saved model checkpoint to {}\n".format(path))
                        elapsed_time = time.time() - start_time
                        start_time = time.time()
                        print('Step %d (epoch %.2f), %.1f ms' %
                            (step, float(step) * batch_size / train_size,
                            1000 * elapsed_time / eval_frequency))
                        print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
                        print('Minibatch error: %.1f%%' % utils.error_rate(predictions, 
                                                                           batch_labels, 
                                                                           self.onehot_labels))
                        print('Validation error: %.1f%%' % utils.error_rate(
                            self.eval_in_batches(y_conv, 
                                                 x, 
                                                 keep_prob, 
                                                 validation_data, 
                                                 sess, 
                                                 batch_size, 
                                                 num_classes), validation_labels, self.onehot_labels))
                        sys.stdout.flush()
        
            # Finally print the result!
            test_error = utils.error_rate(self.eval_in_batches(y_conv, 
                                                               x, 
                                                               keep_prob, 
                                                               test_data, 
                                                               sess,
                                                               batch_size,
                                                               num_classes), test_labels, self.onehot_labels)
            print('Test error: %.1f%%' % test_error)

Example 80

Project: cgat
Source File: liftover.py
View license
def readLiftOver(infile, chromosome,
                 chromosome_size=250000000,
                 report_step=1000000):
    """read a matrix. There probably is a routine for this in Numpy, which
    I haven't found yet.
    """

    if options.loglevel >= 2:
        print("## started reading mapping information")
        sys.stdout.flush()

    map_position = numpy.zeros((chromosome_size,), numpy.int)

    # signed character for chromosme, negative values for negative strands
    map_chromosome = numpy.zeros((chromosome_size,), numpy.int8)

    map_id2chromosome = ["", ]
    map_chromosome2id = {}
    n = 0

    for line in infile:

        n += 1

        if not (n % report_step):
            if options.loglevel >= 2:
                print("# iteration %i" % n)
                sys.stdout.flush()

        if line[:5] == "chain":
            (chr_x, size_x, strand_x, first_x, last_x,
             chr_y, size_y, strand_y, first_y, last_y,
             dontknow) = line[:-1].split(" ")[2:]

            if strand_x == "-":
                raise ValueError("what shall I do with negative strands?")

            x = int(first_x)

            # revert coordinates for negative strands (it seems that
            # the mapping file uses reverse coordinates, while liftover
            # output doesn't)
            # add 1 to coordinates, because 0 is flag for unmappable.
            if strand_y == "-":
                invert = True
                # no +1, because already one past current residue (due to open
                # bracket)
                y = int(size_y) - int(first_y)
            else:
                invert = False
                y = int(first_y) + 1

            if chr_x != chromosome:
                keep = False
            else:
                keep = True
                if options.loglevel >= 3:
                    print("# adding alignment", line[:-1])

            continue

        elif line.strip() == "":
            keep = False
            continue

        elif keep:

            data = list(map(int, line[:-1].split("\t")))

            if len(data) == 3:
                size, increment_x, increment_y = data
            else:
                size, increment_x, increment_y = data[0], 0, 0

            # add position
            if invert:
                map_position[x:x + size] = numpy.arrayrange(y, y - size, -1)
            else:
                map_position[x:x + size] = numpy.arrayrange(y, y + size, 1)

            if chr_y not in map_id2chromosome:
                map_chromosome2id[chr_y] = len(map_id2chromosome)
                map_id2chromosome.append(chr_y)

            id = map_chromosome2id[chr_y]
            if strand_y == "-":
                id = -id

            # add chromsome
            map_chromosome[x:x + size] = id

            x += increment_x + size
            if invert:
                y -= increment_y + size
            else:
                y += increment_y + size

            if y < 0:
                raise ValueError(
                    "illegal mapping: %i -> %i for %s %s:%s-%s(%s) "
                    "to %s %s: %s-%s(%s)" % (
                        x, y,
                        chr_x, strand_x, first_x, last_x, size_x,
                        chr_y, strand_y, first_y, last_y, size_y))

    return map_position, map_chromosome, map_chromosome2id, map_id2chromosome

Example 81

Project: chipsec
Source File: test_re.py
View license
def run_re_tests():
    from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
    if verbose:
        print 'Running re_tests test suite'
    else:
        # To save time, only run the first and last 10 tests
        #tests = tests[:10] + tests[-10:]
        pass

    for t in tests:
        sys.stdout.flush()
        pattern = s = outcome = repl = expected = None
        if len(t) == 5:
            pattern, s, outcome, repl, expected = t
        elif len(t) == 3:
            pattern, s, outcome = t
        else:
            raise ValueError, ('Test tuples should have 3 or 5 fields', t)

        try:
            obj = re.compile(pattern)
        except re.error:
            if outcome == SYNTAX_ERROR: pass  # Expected a syntax error
            else:
                print '=== Syntax error:', t
        except KeyboardInterrupt: raise KeyboardInterrupt
        except:
            print '*** Unexpected error ***', t
            if verbose:
                traceback.print_exc(file=sys.stdout)
        else:
            try:
                result = obj.search(s)
            except re.error, msg:
                print '=== Unexpected exception', t, repr(msg)
            if outcome == SYNTAX_ERROR:
                # This should have been a syntax error; forget it.
                pass
            elif outcome == FAIL:
                if result is None: pass   # No match, as expected
                else: print '=== Succeeded incorrectly', t
            elif outcome == SUCCEED:
                if result is not None:
                    # Matched, as expected, so now we compute the
                    # result string and compare it to our expected result.
                    start, end = result.span(0)
                    vardict={'found': result.group(0),
                             'groups': result.group(),
                             'flags': result.re.flags}
                    for i in range(1, 100):
                        try:
                            gi = result.group(i)
                            # Special hack because else the string concat fails:
                            if gi is None:
                                gi = "None"
                        except IndexError:
                            gi = "Error"
                        vardict['g%d' % i] = gi
                    for i in result.re.groupindex.keys():
                        try:
                            gi = result.group(i)
                            if gi is None:
                                gi = "None"
                        except IndexError:
                            gi = "Error"
                        vardict[i] = gi
                    repl = eval(repl, vardict)
                    if repl != expected:
                        print '=== grouping error', t,
                        print repr(repl) + ' should be ' + repr(expected)
                else:
                    print '=== Failed incorrectly', t

                # Try the match on a unicode string, and check that it
                # still succeeds.
                try:
                    result = obj.search(unicode(s, "latin-1"))
                    if result is None:
                        print '=== Fails on unicode match', t
                except NameError:
                    continue # 1.5.2
                except TypeError:
                    continue # unicode test case

                # Try the match on a unicode pattern, and check that it
                # still succeeds.
                obj=re.compile(unicode(pattern, "latin-1"))
                result = obj.search(s)
                if result is None:
                    print '=== Fails on unicode pattern match', t

                # Try the match with the search area limited to the extent
                # of the match and see if it still succeeds.  \B will
                # break (because it won't match at the end or start of a
                # string), so we'll ignore patterns that feature it.

                if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
                               and result is not None:
                    obj = re.compile(pattern)
                    result = obj.search(s, result.start(0), result.end(0) + 1)
                    if result is None:
                        print '=== Failed on range-limited match', t

                # Try the match with IGNORECASE enabled, and check that it
                # still succeeds.
                obj = re.compile(pattern, re.IGNORECASE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on case-insensitive match', t

                # Try the match with LOCALE enabled, and check that it
                # still succeeds.
                obj = re.compile(pattern, re.LOCALE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on locale-sensitive match', t

                # Try the match with UNICODE locale enabled, and check
                # that it still succeeds.
                obj = re.compile(pattern, re.UNICODE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on unicode-sensitive match', t

Example 82

Project: CumulusCI
Source File: run_apex_tests.py
View license
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS',False) in ['true','True']
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    json_output = os.environ.get('TEST_JSON_OUTPUT', None)
    junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None)
    
    if namespace:
        namespace = "'{0}'".format(namespace,)
    else:
        namespace = 'null'
    
    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True
    
    sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0')
    
    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'
    
    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        if pattern:
            where_name.append("Name LIKE '{0}'".format(pattern))

    # Add any excludes to the where clause
    where_exclude = []
    for pattern in test_name_exclude.split(','):
        if pattern:
            where_exclude.append("(NOT Name LIKE '{0}')".format(pattern,))
   
    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format(namespace,)
    if where_name:
        query += " AND ({0})".format(' OR '.join(where_name),)
    if where_exclude:
        query += " AND {0}".format(' AND '.join(where_exclude),)

    print "Running Query: {0}".format(query,)
    sys.stdout.flush()

    res = sf.query_all(query)

    print "Found {0} classes".format(res['totalSize'],)
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}
    
    classes_by_id = {}
    classes_by_name = {}
    trace_id = None
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}
    
    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        print 'Setting up trace flag to capture debug logs'

        # Get the User's id to set a TraceFlag
        res_user = sf.query("Select Id from User where Username = '{0}'".format(username,))
        user_id = res_user['records'][0]['Id']
        
        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        TraceFlag = sf.TraceFlag
        TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                     .format(instance=sf.sf_instance,
                             object_name='TraceFlag',
                             sf_version=sf.sf_version))

        # First, delete any old trace flags still lying around
        tf_res = sf.query('Select Id from TraceFlag')
        if tf_res['totalSize']:
            for tf in tf_res['records']:
                TraceFlag.delete(tf['Id'])
    
        expiration = datetime.datetime.now() + datetime.timedelta(seconds=60*60*12)
        res = TraceFlag.create({
            'ApexCode': 'Info',
            'ApexProfiling': 'Debug',
            'Callout': 'Info',
            'Database': 'Info',
            'ExpirationDate': expiration.isoformat(),
            #'ScopeId': user_id,
            'System': 'Info',
            'TracedEntityId': user_id,
            'Validation': 'Info',
            'Visualforce': 'Info',
            'Workflow': 'Info',
        })
        trace_id = res['id']

        print 'Created TraceFlag for user'
    
    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
    
    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'".format(job_id,))
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1
    
        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break
        
        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)
    
    # Get the test results by method
    res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'".format(job_id,))
    
    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug and result['ApexLogId']:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
    
    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('{0}')".format("','".join([str(id) for id in classes_by_log_id.keys()]),)
        res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}".format(log_ids,))
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '{0}sobjects/ApexLog/{1}/Body'.format(sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(class_name, f)
            
            # Add method stats to results_by_class_name
            for method, info in method_stats.items():
                results_by_class_name[class_name][method].update(info)

        # Delete the trace flag
        TraceFlag.delete(trace_id)

    # Build an OrderedDict of results
    test_results = []

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        duration = None
        if debug and class_id in logs_by_class_id:
            duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: {0} ({1}s)'.format(class_name, duration)
        else:
            print 'Class: {0}'.format(class_name,)
        sys.stdout.flush()

        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]

            test_results.append({
                'Children': result.get('children', None),
                'ClassName': decode_to_unicode(class_name),
                'Method': decode_to_unicode(result['MethodName']),
                'Message': decode_to_unicode(result['Message']),
                'Outcome': decode_to_unicode(result['Outcome']),
                'StackTrace': decode_to_unicode(result['StackTrace']),
                'Stats': result.get('stats', None),
                'TestTimestamp': result.get('TestTimestamp', None),
            })
            
            # Output result for method
            if debug and json_output and result.get('stats') and 'duration' in result['stats']:
                # If debug is enabled and we're generating the json output, include duration with the test
                print u'   {0}: {1} ({2}s)'.format(
                    result['Outcome'], 
                    result['MethodName'], 
                    result['stats']['duration']
                )
            else:
                print u'   {Outcome}: {MethodName}'.format(**result)

            if debug and not json_output:
                print u'     DEBUG LOG INFO:'
                stats = result.get('stats',None)
                if not stats:
                    print u'       No stats found, likely because of debug log size limit'
                else:
                    stat_keys = stats.keys()
                    stat_keys.sort()
                    for stat in stat_keys:
                        try:
                            value = stats[stat]
                            output = u'       {0} / {1}'.format(value['used'], value['allowed'])
                            print output.ljust(26) + stat
                        except:
                            output = u'       {0}'.format(stats[stat],)
                            print output.ljust(26) + stat
    
            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail','CompileFail']:
                print u'   Message: {Message}'.format(**result)
                print u'   StackTrace: {StackTrace}'.format(**result)
            sys.stdout.flush()
    
    print u'-------------------------------------------------------------------------------'
    print u'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print u'-------------------------------------------------------------------------------'
    sys.stdout.flush()
    
    if counts['Fail'] or counts['CompileFail']:
        print u''
        print u'Failing Tests'
        print u'-------------'
        print u''
        sys.stdout.flush()

        counter = 0
        for result in test_results:
            if result['Outcome'] not in ['Fail','CompileFail']:
                continue
            counter += 1
            print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'], result['Method'], result['Outcome'])
            print u'  Message: {0}'.format(result['Message'],)
            print u'  StackTrace: {0}'.format(result['StackTrace'],)
            sys.stdout.flush()

    if json_output:
        f = codecs.open(json_output, encoding='utf-8', mode='w')
        f.write(json.dumps(test_results))
        f.close()

    if junit_output:
        f = codecs.open(junit_output, encoding='utf-8', mode='w')
        f.write('<testsuite tests="{0}">\n'.format(len(test_results)),)
        for result in test_results:
            testcase = '  <testcase classname="{0}" name="{1}"'.format(result['ClassName'], result['Method'])
            if 'Stats' in result and result['Stats'] and 'duration' in result['Stats']:
                testcase = '{0} time="{1}"'.format(testcase, result['Stats']['duration'])
            if result['Outcome'] in ['Fail','CompileFail']:
                testcase = '{0}>\n'.format(testcase,)
                testcase = '{0}    <failure type="{1}">{2}</failure>\n'.format(
                    testcase, 
                    cgi.escape(result['StackTrace']), 
                    cgi.escape(result['Message']),
                )
                testcase = '{0}  </testcase>\n'.format(testcase,)
            else:
                testcase = '{0} />\n'.format(testcase,)
            f.write(testcase)

        f.write('</testsuite>')
        f.close()
        

    return counts

Example 83

Project: vmnetx
Source File: generate.py
View license
def copy_memory(in_path, out_path, xml=None, compression='xz', verbose=True,
        low_priority=False):
    def report(line, newline=True):
        if not verbose:
            return
        if newline:
            print line
        else:
            print line,
            sys.stdout.flush()

    # Open files, read header
    fin = open(in_path, 'r')
    fout = open(out_path, 'w')
    hdr = LibvirtQemuMemoryHeader(fin)

    # Determine input and output compression
    compress_in = hdr.compressed
    if compress_in not in MEMORY_DECOMPRESS_COMMANDS:
        raise MachineGenerationError('Cannot decode save format %d' %
                compress_in)
    if compression == 'xz':
        compress_out = hdr.COMPRESS_XZ
    elif compression == 'lzop':
        compress_out = hdr.COMPRESS_LZOP
    elif compression == None:
        compress_out = hdr.COMPRESS_RAW
    else:
        raise ValueError('Unknown compression: %s' % compression)
    if compress_out not in MEMORY_COMPRESS_COMMANDS:
        raise MachineGenerationError('Cannot encode save format %d' %
                compress_out)

    # Write header
    hdr.compressed = compress_out
    if xml is not None:
        hdr.xml = xml
    hdr.write(fout, extend=True)
    fout.flush()

    processes = []
    try:
        # Start compressor/decompressor if required
        if compress_in != compress_out:
            for command in (MEMORY_COMPRESS_COMMANDS[compress_out],
                    MEMORY_DECOMPRESS_COMMANDS[compress_in]):
                if not command:
                    continue
                if low_priority:
                    # Python < 3.3 doesn't have os.setpriority(), so we use
                    # the command-line utility
                    command = ['nice'] + list(command)
                pipe_r, pipe_w = os.pipe()
                proc = subprocess.Popen(command, stdin=pipe_r, stdout=fout,
                        close_fds=True)
                processes.append(proc)
                os.close(pipe_r)
                fout.close()
                fout = os.fdopen(pipe_w, 'w')

        # Copy body; report progress
        fin.seek(0, 2)
        total = fin.tell()
        hdr.seek_body(fin)
        if compress_in != compress_out and compress_out != hdr.COMPRESS_RAW:
            action = 'Copying and compressing'
        else:
            action = 'Copying'
        while True:
            buf = fin.read(1 << 20)
            if not buf:
                break
            fout.write(buf)
            report('\r%s memory image: %3d%%' % (action,
                    100 * fin.tell() / total), newline=False)
        report('')
    finally:
        # Clean up
        fin.close()
        fout.close()
        failed = False
        for proc in reversed(processes):
            proc.wait()
            failed = failed or proc.returncode
        if failed:
            raise IOError('Compressor/decompressor failed')

Example 84

Project: core
Source File: perflogserver.py
View license
def main():
    usagestr = "%prog [-h] [options] [args]\n\nLog server and optional CORE session metrics to stdout."
    parser = optparse.OptionParser(usage = usagestr)
    parser.set_defaults(interval=2, timestamp=False, 
			configfile = "/etc/core/perflogserver.conf",\
                        alarm = True, session = None)
    parser.add_option("-i", "--interval", dest = "interval", type = int,
                      help = "seconds to wait between samples; default=%s" %
                      parser.defaults["interval"])
    parser.add_option("-t", "--timestamp", action = "store_true",
                      dest = "timestamp", 
                      help = "include timestamp on each line")
    parser.add_option("-c", "--configfile", dest = "configfile", 
		      type = "string",
                      help = "read threshold values from the specified file;\
  		      default=%s" % parser.defaults["configfile"])
    parser.add_option("-a", "--alarm", action = "store_true",
                      dest = "alarm", 
                      help = "generate alarms based threshold check on each cycle")
    parser.add_option("-s", "--session", dest = "session", type = int,
                      help = "CORE session id; default=%s" %
                      parser.defaults["session"])
    global options
    global ncpus
    global serverthresholds
    global logsession
    global cyclecount

    (options, args) = parser.parse_args()
    # print options

    signal.signal(signal.SIGINT, handler)
    signal.signal(signal.SIGTERM, handler)

    ncpus = numcpus()

    # server threshold dictionary - a ServerMetrics instant with default values
    serverthresholds = ServerMetrics()
    # set to server threshold default values: serverloadavg1=3.5, 
    # serverloadavg5=3.5, serverloadavg15=3.5, serverusedmemory=80.0, 
    # serverusedcputime=80.0, processorusedcputime=90.0
    serverthresholds.setvalues([3.5, 3.5, 3.5, 80.0, 80.0, 90.0])
    if options.alarm is True:
        # read server threshold values from configuration file
	readserverthresholds(options.configfile)
	
    if options.session is not None:
        logsession = LogSession()
	# print logsession

    # mark host log baseline
    print "server: ", ", ".join(map(lambda(x):str(x), os.uname())), ",", ncpus, "CPU cores"
    print "start timestamp:", time.time(), ", baseline data: "
    print csvserverbaseline()
    print "server metrics: ", ", ".join(map(lambda(x):str(x), serverthresholds.getkeys()))
    if options.session is not None:
        print "node metrics: nodename, ", ", ".join(map(lambda(x):str(x), logsession.nodethresholds.getkeys()))

    cyclecount = 0
    while True:
        cputimea = collectservercputimes()
        if options.session is not None:
	   nodesa = logsession.getnodemetrics('a')
	   # print "nodes a:", nodesa

        time.sleep(options.interval)

        cputimeb = collectservercputimes()
	mems = collectservermems()

        calccputime = calcservercputimes(cputimea, cputimeb)
	m = csvservermetrics(collectservermetrics(calccputime, mems, options.alarm))
	print m

        if options.session is not None:
	    nodesb = logsession.getnodemetrics('b')
	    # print "nodes b:", nodesb
	    if nodesb != {}:
	        logsession.calcnodemetrics(cputimea, cputimeb, mems)
		logsession.printnodemetrics('c')

	sys.stdout.flush()
        cyclecount = cyclecount + 1

Example 85

Project: ck
Source File: module.py
View license
def start(i):
    """

    Input:  {
              (host)        - Internal web server host
              (port)        - Internal web server port

              (wfe_host)    - External web server host
              (wfe_port)    - External web server port

              (browser)     - if 'yes', open browser
              (template)    - if !='', add template
              (cid)         - view a given entry
              (extra_url)   - extra text for URL
            }

    Output: {
              return       - return code =  0, if successful
                                         >  0, if error
              (error)      - error text if return > 0
            }

    """

    # Define internal server host.
    host=ck.cfg.get('default_host','')
    host=i.get('host',host)
    if host=='': host='localhost' # 'localhost' if ''

    # Define external server host.
    global wfe_host
    wfe_host=i.get('wfe_host',host)

    # Define internal server port.
    port=ck.cfg.get('default_port','')
    port=i.get('port',port)
    if port=='': return {'return':1, 'error':'web port is not defined'}

    # Define external server port.
    global wfe_port
    wfe_port=i.get('wfe_port',port)

    # Assemble URL.
    url=host+':'+port
    wfe_url=wfe_host+':'+wfe_port

    ck.out('Starting CK web service on '+url+' (configured for access at '+wfe_url+') ...')
    ck.out('')

    sys.stdout.flush()

    if i.get('browser','')=='yes':
       rurl='http://'+url

       ext=''

       if i.get('template','')!='':
          ext='template='+i['template']

       cid=i.get('cid','')
       if cid!='' and cid!='web':
          if ext!='': ext+='&'
          ext+='wcid='+cid

       if i.get('extra_url','')!='':
          if ext!='': ext+='&'
          ext+=i['extra_url']

       if ext!='':
          rurl+='/?'+ext

       import webbrowser
       webbrowser.open(rurl)

    try:
       server = ThreadedHTTPServer((host, int(port)), server_handler)
       # Prevent issues with socket reuse
       server.allow_reuse_address=True
       server.serve_forever()
    except KeyboardInterrupt:
       ck.out('Keyboard interrupt, terminating CK web service ...')
       server.socket.close()
       return {'return':0}
    except OSError as e:
       return {'return':1, 'error':'problem starting CK web service ('+format(e)+')'}

    return {'return':0}

Example 86

Project: ck-autotuning
Source File: module.py
View license
def extract_opts_new(i):
    """
    Input:  {
              (host_os)              - host OS (detect, if omitted)
              (target_os)            - OS module to check (if omitted, analyze host)
              (device_id)            - device id if remote (such as adb)

              (compiler_id)          - currently support only "gcc" (default)

              (repo_uoa)             - repo UOA to record compiler description
              (data_uoa)             - data UOA to record compiler description (if empty, autogenerate)
            }

    Output: {
              return       - return code =  0, if successful
                                         >  0, if error
              (error)      - error text if return > 0
            }

    """

    import os
    import sys

    o=i.get('out','')

    comp_id=i.get('compiler_id','').lower()
    if comp_id=='': comp_id='gcc'

    # Check OS
    hos=i.get('host_os','')
    tos=i.get('target_os','')
    tdid=i.get('device_id','')

    # Get some info about platforms
    ii={'action':'detect',
        'module_uoa':cfg['module_deps']['platform.os'],
        'host_os':hos,
        'target_os':tos,
        'device_id':tdid}
    r=ck.access(ii)
    if r['return']>0: return r

    hos=r['host_os_uid']
    hosx=r['host_os_uoa']
    hosd=r['host_os_dict']

    tos=r['os_uid']
    tosx=r['os_uoa']
    tosd=r['os_dict']

    tdid=r['device_id']

    sext=hosd.get('script_ext','')
    scall=hosd.get('env_call','')
    sbp=hosd.get('bin_prefix','')

    tags='compiler'
    if comp_id=='gcc': tags+=',gcc'

    # Get compiler env
    ii={'action':'set',
        'module_uoa':cfg['module_deps']['env'],
        'host_os':hos,
        'target_os':tos,
        'device_id':tdid,
        'tags':tags,
        'out':o}
    r=ck.access(ii)
    if r['return']>0: return r

    env_uoa=r['env_uoa']
    bat=r['bat']

    # Detect version
    ii={'action':'internal_detect',
        'module_uoa':cfg['module_deps']['soft'],
        'host_os':hos,
        'target_os':tos,
        'device_id':tdid,
        'tags':tags,
        'env':bat}
    r=ck.access(ii)
    if r['return']>0: return r

    vstr=r['version_str']
    if o=='con':
       ck.out('Detected GCC version: '+vstr)

    # Prepare batch file
    rx=ck.gen_tmp_file({'prefix':'tmp-', 'suffix':sext, 'remove_dir':'yes'})
    if rx['return']>0: return rx
    fbat=rx['file_name']

    # Prepare 2 tmp files
    rx=ck.gen_tmp_file({'prefix':'tmp-', 'suffix':'.tmp', 'remove_dir':'yes'})
    if rx['return']>0: return rx
    fout1=rx['file_name']

    # Prepare GCC help
    bat+='\n'
    bat+='gcc --help=optimizer > '+fout1+'\n'

    # Save file
    rx=ck.save_text_file({'text_file':fbat, 'string':bat})
    if rx['return']>0: return rx

    # Execute 
    y=scall+' '+sbp+fbat

    if o=='con':
       ck.out('')
       ck.out('Executing prepared batch file '+fbat+' ...')
       ck.out('')

    sys.stdout.flush()
    rx=os.system(y)

    os.remove(fbat)

    # Load opt file
    rx=ck.load_text_file({'text_file':fout1,
                          'split_to_list':'yes', 
                          'encoding':sys.stdin.encoding,
                          'delete_after_read':'yes'})
    if rx['return']>0: return rx
    lopts=rx['lst']

    # Check if want params
    ck.out('')
    r=ck.inp({'text':'Enter full path to params.def file if you have GCC sources (or Enter to skip it): '})
    if r['return']>0: return r

    fpar=r['string'].strip()

    lparams=[]
    if fpar!='':
       rx=ck.load_text_file({'text_file':fpar,
                             'split_to_list':'yes', 
                             'encoding':sys.stdin.encoding})
       if rx['return']>0: return rx
       lparams=rx['lst']

    # Parsing opts
    dd={"##base_opt": {
            "choice": [
              "-O3", 
              "-Ofast", 
              "-O0", 
              "-O1", 
              "-O2", 
              "-Os"
            ], 
            "default": "", 
            "desc": "base compiler flag", 
            "sort": 10000, 
            "tags": [
              "base", 
              "basic", 
              "optimization"
            ], 
            "type": "text"
          }
       }

    iopt=0
    iparam=0

    opt=''
    opt1=''
    desc=''
    desc1=''

    add=False
    finish=False

    for q in range(1, len(lopts)):
        qq=lopts[q]
        if len(qq)>2:
           if qq[2]=='-':
              qq=qq.strip()
              j=qq.find(' ')
              desc1=''
              if j>=0:
                 desc1=qq[j:].strip()
              else:
                 j=len(qq)
              opt1=qq[1:j]

              if not opt1.startswith('O'):
                 if opt=='': 
                    opt=opt1
                    desc=desc1
                 else:
                    add=True

           else:
              qq=qq.strip()
              if len(qq)>0:
                 desc+=' '+qq
        else:
           add=True
           finish=True

        if add and opt!='':
           iopt+=1

           ck.out('Adding opt '+str(iopt)+' "'+opt+'" - '+desc)

           dd['##'+opt]={
             "can_omit": "yes", 
             "choice": [
               "-f"+opt, 
               "-fno-"+opt
             ], 
             "default": "", 
             "desc": "compiler flag: -f"+opt+"("+desc+")", 
             "sort": iopt*100, 
             "tags": [
               "basic", 
              "optimization"
             ], 
             "type":"text"
           }

           opt=opt1 	
           desc=desc1

           add=False

        if finish: 
           break

    # Parsing params
    opt=''
    opt1=''
    desc=''
    desc1=''

    add=False
    finish=False

    for q in range(0, len(lparams)):
        qq=lparams[q].strip()
        if qq.startswith('DEFPARAM'):
           iparam+=1

           q+=1
           opt=lparams[q].strip()[1:-2]

           q+=1
           desc=lparams[q].strip()[1:-2]
           line='x'
           while True:
              q+=1
              line=lparams[q].strip()
              if line[-1]==')': break
              desc+=line[1:-2]

           e1=0
           e2=0

           exp=line[:-1].split(',')

           skip=False
           for j in range(0, len(exp)):
               jj=exp[j].strip()
               if jj.find('*')>0 or jj.find('_')>0:
                  skip=True
               else:
                  jj=int(exp[j].strip())
                  exp[j]=jj

           if not skip:
              if exp[2]>exp[1]:
                 e1=exp[1]
                 e2=exp[2]
              else:
                 e1=0
                 e2=exp[0]*2

              ck.out('Adding param '+str(iparam)+' "'+opt+'" - '+desc)

              dd['##param_'+opt]={
                "can_omit": "yes", 
                "default": "", 
                "desc": "compiler flag: --param "+opt+"= ("+desc+")", 
                "sort": iparam*100+30000, 
                "explore_prefix": "--param "+opt+"=", 
                "explore_start": e1, 
                "explore_step": 1, 
                "explore_stop": e2, 
                "tags": [
                  "basic", 
                 "optimization"
                ], 
                "type":"integer"
              }

    # Prepare CK entry
    duoa=i.get('data_uoa','')
    if duoa=='':
       v=vstr.split('.')
       duoa='gcc-'+v[0]+v[1]+'x'

    if o=='con':
       ck.out('')
       ck.out('Recording to '+duoa+' ...')

    ii={'action':'add',
        'module_uoa':work['self_module_uid'],
        'repo_uoa':i.get('repo_uoa',''),
        'data_uoa':duoa,
        'desc':{'all_compiler_flags_desc':dd},
        'dict':{
          "tags": [
          "compiler", 
          "gcc", 
          "v"+v[0], 
          "v"+v[0]+"."+v[1]
          ]
        }
       }
    r=ck.access(ii)
    if r['return']>0: return r

    # Final info
    if o=='con':
       ck.out('')
       ck.out('Compiler version:           '+vstr)
       ck.out('Number of boolean opts:     '+str(iopt))
       ck.out('Number of parameteric opts: '+str(iparam))

    return {'return':0}

Example 87

Project: babble
Source File: test_re.py
View license
def run_re_tests():
    from test.re_tests import benchmarks, tests, SUCCEED, FAIL, SYNTAX_ERROR
    if verbose:
        print 'Running re_tests test suite'
    else:
        # To save time, only run the first and last 10 tests
        #tests = tests[:10] + tests[-10:]
        pass

    for t in tests:
        sys.stdout.flush()
        pattern = s = outcome = repl = expected = None
        if len(t) == 5:
            pattern, s, outcome, repl, expected = t
        elif len(t) == 3:
            pattern, s, outcome = t
        else:
            raise ValueError, ('Test tuples should have 3 or 5 fields', t)

        try:
            obj = re.compile(pattern)
        except re.error:
            if outcome == SYNTAX_ERROR: pass  # Expected a syntax error
            else:
                print '=== Syntax error:', t
        except KeyboardInterrupt: raise KeyboardInterrupt
        except:
            print '*** Unexpected error ***', t
            if verbose:
                traceback.print_exc(file=sys.stdout)
        else:
            try:
                result = obj.search(s)
            except re.error, msg:
                print '=== Unexpected exception', t, repr(msg)
            if outcome == SYNTAX_ERROR:
                # This should have been a syntax error; forget it.
                pass
            elif outcome == FAIL:
                if result is None: pass   # No match, as expected
                else: print '=== Succeeded incorrectly', t
            elif outcome == SUCCEED:
                if result is not None:
                    # Matched, as expected, so now we compute the
                    # result string and compare it to our expected result.
                    start, end = result.span(0)
                    vardict={'found': result.group(0),
                             'groups': result.group(),
                             'flags': result.re.flags}
                    for i in range(1, 100):
                        try:
                            gi = result.group(i)
                            # Special hack because else the string concat fails:
                            if gi is None:
                                gi = "None"
                        except IndexError:
                            gi = "Error"
                        vardict['g%d' % i] = gi
                    for i in result.re.groupindex.keys():
                        try:
                            gi = result.group(i)
                            if gi is None:
                                gi = "None"
                        except IndexError:
                            gi = "Error"
                        vardict[i] = gi
                    repl = eval(repl, vardict)
                    if repl != expected:
                        print '=== grouping error', t,
                        print repr(repl) + ' should be ' + repr(expected)
                else:
                    print '=== Failed incorrectly', t

                # Try the match on a unicode string, and check that it
                # still succeeds.
                try:
                    result = obj.search(unicode(s, "latin-1"))
                    if result is None:
                        print '=== Fails on unicode match', t
                except NameError:
                    continue # 1.5.2
                except TypeError:
                    continue # unicode test case

                # Try the match on a unicode pattern, and check that it
                # still succeeds.
                obj=re.compile(unicode(pattern, "latin-1"))
                result = obj.search(s)
                if result is None:
                    print '=== Fails on unicode pattern match', t

                # Try the match with the search area limited to the extent
                # of the match and see if it still succeeds.  \B will
                # break (because it won't match at the end or start of a
                # string), so we'll ignore patterns that feature it.

                if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
                               and result is not None:
                    obj = re.compile(pattern)
                    result = obj.search(s, result.start(0), result.end(0) + 1)
                    if result is None:
                        print '=== Failed on range-limited match', t

                # Try the match with IGNORECASE enabled, and check that it
                # still succeeds.
                obj = re.compile(pattern, re.IGNORECASE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on case-insensitive match', t

                # Try the match with LOCALE enabled, and check that it
                # still succeeds.
                obj = re.compile(pattern, re.LOCALE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on locale-sensitive match', t

                # Try the match with UNICODE locale enabled, and check
                # that it still succeeds.
                obj = re.compile(pattern, re.UNICODE)
                result = obj.search(s)
                if result is None:
                    print '=== Fails on unicode-sensitive match', t

Example 88

Project: conary
Source File: changemail.py
View license
def doWork(repos, cfg, srcMap, pkgMap, grpMap, sourceuser, binaryuser, fromaddr, maxsize, argSet):
    exitCode = 0
    tmpfd, tmppath = tempfile.mkstemp('', 'changemail-')
    os.unlink(tmppath)
    tmpfile = os.fdopen(tmpfd)
    sys.stdout.flush()
    oldStdOut = os.dup(sys.stdout.fileno())
    os.dup2(tmpfd, 1)
    mailhost = argSet.pop('mailhost', 'localhost')

    if srcMap:
        sources = sorted(srcMap.keys())
        names = [ x.split(':')[0] for x in sources ]
        subjectList = []
        for sourceName in sources:
            for ver, shortver in srcMap[sourceName]:
                subjectList.append('%s=%s' %(
                    sourceName.split(':')[0], shortver))
        subject = 'Source: %s' %" ".join(subjectList)

        for sourceName in sources:
            for ver, shortver in srcMap[sourceName]:
                new = repos.findTrove(cfg.buildLabel, (sourceName, ver, None))
                newV = new[0][1]
                old, oldV = checkin.findRelativeVersion(repos, sourceName,
                                                        1, newV)
                if old:
                    old = ' (previous: %s)'%oldV.trailingRevision().asString()
                else:
                    old = ''
                print '================================'
                print '%s=%s%s' %(sourceName, shortver, old)
                print 'cvc rdiff %s -1 %s' %(sourceName[:-7], ver)
                print '================================'
                try:
                    checkin.rdiff(repos, cfg.buildLabel, sourceName, '-1', ver)
                except:
                    exitCode = 2
                    print 'rdiff failed for %s' %sourceName
                    try:
                        t, v, tb = sys.exc_info()
                        tbd = traceback.format_exception(t, v, tb)
                        sys.stdout.write(''.join(tbd[-min(2, len(tbd)):]))
                        sys.stderr.write(''.join(tbd))
                    except:
                        print 'Failed to print exception information'

                    print ''
                    print 'Please include a copy of this message in an issue'
                    print 'filed at https://issues.rpath.com/'
                print
        if sourceuser:
            print 'Committed by: %s' %sourceuser

        sendMail(tmpfile, subject, fromaddr, maxsize, argSet['email'], mailhost)

    if pkgMap or grpMap:
        # stdout is the tmpfile
        sys.stdout.flush()
        sys.stdout.seek(0)
        sys.stdout.truncate()

        binaries = sorted(pkgMap.keys())
        groups = sorted(grpMap.keys())
        subject = 'Binary: %s' %" ".join(binaries+groups)

        wrap = textwrap.TextWrapper(
            initial_indent='    ',
            subsequent_indent='        ',
        )

        if binaries:
            print "Binary package commits:"
            if binaryuser:
                print 'Committed by: %s' %binaryuser
        for package in binaries:
            for version in sorted(pkgMap[package].keys()):
                print '================================'
                print '%s=%s' %(package, version)
                flavorDict = pkgMap[package][version]
                for flavor in sorted(flavorDict.keys()):
                    print wrap.fill('%s:%s [%s]' %(package,
                        ' :'.join(sorted(flavorDict[flavor])),
                        ', '.join(flavor.split(','))))
                print

        if groups:
            print "Group commits:"
        for group in groups:
            for version in sorted(grpMap[group].keys()):
                print '================================'
                print '%s=%s' %(group, version)
                flavorSet = grpMap[group][version]
                for flavor in sorted(flavorSet):
                    print wrap.fill('[%s]' %
                        ', '.join(flavor.split(',')))
                print

        sendMail(tmpfile, subject, fromaddr, maxsize, argSet['email'], mailhost)
        os.dup2(oldStdOut, 1)

    return exitCode

Example 89

Project: cython
Source File: Dependencies.py
View license
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None,
              exclude_failures=False, **options):
    """
    Compile a set of source modules into C/C++ files and return a list of distutils
    Extension objects for them.

    As module list, pass either a glob pattern, a list of glob patterns or a list of
    Extension objects.  The latter allows you to configure the extensions separately
    through the normal distutils options.

    When using glob patterns, you can exclude certain module names explicitly
    by passing them into the 'exclude' option.

    To globally enable C++ mode, you can pass language='c++'.  Otherwise, this
    will be determined at a per-file level based on compiler directives.  This
    affects only modules found based on file names.  Extension instances passed
    into cythonize() will not be changed.

    For parallel compilation, set the 'nthreads' option to the number of
    concurrent builds.

    For a broad 'try to compile' mode that ignores compilation failures and
    simply excludes the failed extensions, pass 'exclude_failures=True'. Note
    that this only really makes sense for compiling .py files which can also
    be used without compilation.

    Additional compilation options can be passed as keyword arguments.
    """
    if exclude is None:
        exclude = []
    if 'include_path' not in options:
        options['include_path'] = ['.']
    if 'common_utility_include_dir' in options:
        if options.get('cache'):
            raise NotImplementedError("common_utility_include_dir does not yet work with caching")
        safe_makedirs(options['common_utility_include_dir'])
    c_options = CompilationOptions(**options)
    cpp_options = CompilationOptions(**options); cpp_options.cplus = True
    ctx = c_options.create_context()
    options = c_options
    module_list, module_metadata = create_extension_list(
        module_list,
        exclude=exclude,
        ctx=ctx,
        quiet=quiet,
        exclude_failures=exclude_failures,
        language=language,
        aliases=aliases)
    deps = create_dependency_tree(ctx, quiet=quiet)
    build_dir = getattr(options, 'build_dir', None)

    modules_by_cfile = {}
    to_compile = []
    for m in module_list:
        if build_dir:
            root = os.getcwd()  # distutil extension depends are relative to cwd
            def copy_to_build_dir(filepath, root=root):
                filepath_abs = os.path.abspath(filepath)
                if os.path.isabs(filepath):
                    filepath = filepath_abs
                if filepath_abs.startswith(root):
                    mod_dir = join_path(build_dir,
                            os.path.dirname(_relpath(filepath, root)))
                    copy_once_if_newer(filepath_abs, mod_dir)
            for dep in m.depends:
                copy_to_build_dir(dep)

        new_sources = []
        for source in m.sources:
            base, ext = os.path.splitext(source)
            if ext in ('.pyx', '.py'):
                if m.language == 'c++':
                    c_file = base + '.cpp'
                    options = cpp_options
                else:
                    c_file = base + '.c'
                    options = c_options

                # setup for out of place build directory if enabled
                if build_dir:
                    c_file = os.path.join(build_dir, c_file)
                    dir = os.path.dirname(c_file)
                    safe_makedirs_once(dir)

                if os.path.exists(c_file):
                    c_timestamp = os.path.getmtime(c_file)
                else:
                    c_timestamp = -1

                # Priority goes first to modified files, second to direct
                # dependents, and finally to indirect dependents.
                if c_timestamp < deps.timestamp(source):
                    dep_timestamp, dep = deps.timestamp(source), source
                    priority = 0
                else:
                    dep_timestamp, dep = deps.newest_dependency(source)
                    priority = 2 - (dep in deps.immediate_dependencies(source))
                if force or c_timestamp < dep_timestamp:
                    if not quiet:
                        if source == dep:
                            print("Compiling %s because it changed." % source)
                        else:
                            print("Compiling %s because it depends on %s." % (source, dep))
                    if not force and options.cache:
                        extra = m.language
                        fingerprint = deps.transitive_fingerprint(source, extra)
                    else:
                        fingerprint = None
                    to_compile.append((priority, source, c_file, fingerprint, quiet,
                                       options, not exclude_failures, module_metadata.get(m.name)))
                new_sources.append(c_file)
                if c_file not in modules_by_cfile:
                    modules_by_cfile[c_file] = [m]
                else:
                    modules_by_cfile[c_file].append(m)
            else:
                new_sources.append(source)
                if build_dir:
                    copy_to_build_dir(source)
        m.sources = new_sources

    if options.cache:
        if not os.path.exists(options.cache):
            os.makedirs(options.cache)
    to_compile.sort()
    # Drop "priority" component of "to_compile" entries and add a
    # simple progress indicator.
    N = len(to_compile)
    progress_fmt = "[{0:%d}/{1}] " % len(str(N))
    for i in range(N):
        progress = progress_fmt.format(i+1, N)
        to_compile[i] = to_compile[i][1:] + (progress,)

    if N <= 1:
        nthreads = 0
    if nthreads:
        # Requires multiprocessing (or Python >= 2.6)
        try:
            import multiprocessing
            pool = multiprocessing.Pool(
                nthreads, initializer=_init_multiprocessing_helper)
        except (ImportError, OSError):
            print("multiprocessing required for parallel cythonization")
            nthreads = 0
        else:
            # This is a bit more involved than it should be, because KeyboardInterrupts
            # break the multiprocessing workers when using a normal pool.map().
            # See, for example:
            # http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
            try:
                result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
                pool.close()
                while not result.ready():
                    try:
                        result.get(99999)  # seconds
                    except multiprocessing.TimeoutError:
                        pass
            except KeyboardInterrupt:
                pool.terminate()
                raise
            pool.join()
    if not nthreads:
        for args in to_compile:
            cythonize_one(*args)

    if exclude_failures:
        failed_modules = set()
        for c_file, modules in modules_by_cfile.items():
            if not os.path.exists(c_file):
                failed_modules.update(modules)
            elif os.path.getsize(c_file) < 200:
                f = io_open(c_file, 'r', encoding='iso8859-1')
                try:
                    if f.read(len('#error ')) == '#error ':
                        # dead compilation result
                        failed_modules.update(modules)
                finally:
                    f.close()
        if failed_modules:
            for module in failed_modules:
                module_list.remove(module)
            print("Failed compilations: %s" % ', '.join(sorted([
                module.name for module in failed_modules])))

    if options.cache:
        cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
    # cythonize() is often followed by the (non-Python-buffered)
    # compiler output, flush now to avoid interleaving output.
    sys.stdout.flush()
    return module_list

Example 90

Project: TheVGLC
Source File: WADRasterizer.py
View license
    def rasterize(self,scaling=1.0/32.0):
        view_box_size = self.scale(self.normalize(self.upper_right, 10),scaling)
        if view_box_size[0] > view_box_size[1]:
            canvas_size = (1024, int(1024*(float(view_box_size[1])/view_box_size[0])))
        else:
            canvas_size = (int(1024*(float(view_box_size[0])/view_box_size[1])), 1024)
        import numpy as np
        output = np.zeros([view_box_size[0]+1,view_box_size[1]+1])
        for line in self.lines:
            
            if line.is_one_sided():
                sys.stdout.flush()
                a = self.scale(self.normalize(self.vertices[line.a]),scaling)
                b = self.scale(self.normalize(self.vertices[line.b]),scaling)
                pts = self.get_line(a,b)
                for pt in pts:
                    if line.locked:
                        output[pt[0],pt[1]] = 13
                    elif line.teleport:
                        output[pt[0],pt[1]] = 14
                    elif line.door:
                        output[pt[0],pt[1]] = 15
                    elif line.exit:
                        output[pt[0],pt[1]] = 16                    
                    else:
                        output[pt[0],pt[1]] = 1
        for obj in self.objects:
            x,y,angle,type,spawn = obj
            (x,y) = self.scale(self.normalize((x,y)),scaling)
            self.fill(output, x, y,find=0,replace=2)
        
        for line in self.lines:
            if not line.is_one_sided():
                a =  self.scale(self.normalize(self.vertices[line.a]),scaling)
                b =  self.scale(self.normalize(self.vertices[line.b]),scaling)
                pts = self.get_line(a,b)
                for pt in pts:
                    if line.locked:
                        output[pt[0],pt[1]] = 13
                    elif line.teleport:
                        output[pt[0],pt[1]] = 14
                    elif line.door:
                        output[pt[0],pt[1]] = 15
                    elif line.exit:
                        output[pt[0],pt[1]] = 16                    
                    else:
                        output[pt[0],pt[1]] = 3
        
        for line in self.lines:
            
            if line.is_one_sided():
                sys.stdout.flush()
                a = self.scale(self.normalize(self.vertices[line.a]),scaling)
                b = self.scale(self.normalize(self.vertices[line.b]),scaling)
                pts = self.get_line(a,b)
                for pt in pts:
                    if line.locked:
                        output[pt[0],pt[1]] = 13
                    elif line.teleport:
                        output[pt[0],pt[1]] = 14
                    elif line.door:
                        output[pt[0],pt[1]] = 15
                    elif line.exit:
                        output[pt[0],pt[1]] = 16                    
                    else:
                        output[pt[0],pt[1]] = 1
        for obj in self.objects:
            x,y,angle,type,spawn = obj
            (x,y) = self.scale(self.normalize((x,y)),scaling)
            if type in enemy:
                output[x,y] = 4
            elif type in weapon:
                output[x,y] = 5
            
            elif type in ammo:
                output[x,y] = 6
            
            elif type in health:
                output[x,y] = 7
            
            elif type in environmental:
                output[x,y] = 8              
            
            elif 'Card' in type or ('Skull' in type and 'Floating' not in type):
                output[x,y] = 9         
            
            elif 'Player start' in type:
                output[x,y] = 10
            elif 'TeleportDest' in type:
                output[x,y] = 11                
            else :
                output[x,y] = 12
        #import matplotlib.pyplot as plt
        with open(self.name+'.txt','wb') as outfile:
            for yy in range(view_box_size[0]+1):
                str = ''
                for xx in range(view_box_size[1]+1):
                    str += id2char[output[yy,xx]]
                outfile.write(str + '\n')
        
            
        '''

Example 91

Project: COMMIT
Source File: core.py
View license
    def set_threads( self, n = None ) :
        """Set the number of threads to use for the matrix-vector operations with A and A'.

        Parameters
        ----------
        n : integer
            Number of threads to use (default : number of CPUs in the system)
        """
        if n is None :
            # Set to the number of CPUs in the system
            try :
                import multiprocessing
                n = multiprocessing.cpu_count()
            except :
                n = 1

        if n < 1 or n > 255 :
            raise RuntimeError( 'Number of threads must be between 1 and 255' )
        if self.DICTIONARY is None :
            raise RuntimeError( 'Dictionary not loaded; call "load_dictionary()" first.' )
        if self.KERNELS is None :
            raise RuntimeError( 'Response functions not generated; call "generate_kernels()" and "load_kernels()" first.' )

        self.THREADS = {}
        self.THREADS['n'] = n

        tic = time.time()
        print '\n-> Distributing workload to different threads:'
        print '\t* number of threads : %d' % n

        # Distribute load for the computation of A*x product
        print '\t* A operator...',
        sys.stdout.flush()

        if self.DICTIONARY['IC']['n'] > 0 :
            self.THREADS['IC'] = np.zeros( n+1, dtype=np.uint32 )
            if n > 1 :
                N = np.floor( self.DICTIONARY['IC']['n']/n )
                t = 1
                tot = 0
                for c in np.bincount( self.DICTIONARY['IC']['v'] ) :
                    tot += c
                    if tot >= N :
                        self.THREADS['IC'][t] = self.THREADS['IC'][t-1] + tot
                        t += 1
                        tot = 0
            self.THREADS['IC'][-1] = self.DICTIONARY['IC']['n']

            # check if some threads are not assigned any segment
            if np.count_nonzero( np.diff( self.THREADS['IC'].astype(np.int32) ) <= 0 ) :
                self.THREADS = None
                raise RuntimeError( 'Too many threads for the IC compartments to evaluate; try decreasing the number.' )
        else :
            self.THREADS['IC'] = None

        if self.DICTIONARY['EC']['nE'] > 0 :
            self.THREADS['EC'] = np.zeros( n+1, dtype=np.uint32 )
            for i in xrange(n) :
                self.THREADS['EC'][i] = np.searchsorted( self.DICTIONARY['EC']['v'], self.DICTIONARY['IC']['v'][ self.THREADS['IC'][i] ] )
            self.THREADS['EC'][-1] = self.DICTIONARY['EC']['nE']

            # check if some threads are not assigned any segment
            if np.count_nonzero( np.diff( self.THREADS['EC'].astype(np.int32) ) <= 0 ) :
                self.THREADS = None
                raise RuntimeError( 'Too many threads for the EC compartments to evaluate; try decreasing the number.' )
        else :
            self.THREADS['EC'] = None

        if self.DICTIONARY['nV'] > 0 :
            self.THREADS['ISO'] = np.zeros( n+1, dtype=np.uint32 )
            for i in xrange(n) :
                self.THREADS['ISO'][i] = np.searchsorted( self.DICTIONARY['ISO']['v'], self.DICTIONARY['IC']['v'][ self.THREADS['IC'][i] ] )
            self.THREADS['ISO'][-1] = self.DICTIONARY['nV']

            # check if some threads are not assigned any segment
            if np.count_nonzero( np.diff( self.THREADS['ISO'].astype(np.int32) ) <= 0 ) :
                self.THREADS = None
                raise RuntimeError( 'Too many threads for the ISO compartments to evaluate; try decreasing the number.' )
        else :
            self.THREADS['ISO'] = None

        print ' [ OK ]'

        # Distribute load for the computation of At*y product
        print '\t* A\' operator...',
        sys.stdout.flush()

        if self.DICTIONARY['IC']['n'] > 0 :
            self.THREADS['ICt'] = np.full( self.DICTIONARY['IC']['n'], n-1, dtype=np.uint8 )
            if n > 1 :
                idx = np.argsort( self.DICTIONARY['IC']['fiber'], kind='mergesort' )
                C = np.bincount( self.DICTIONARY['IC']['fiber'] )

                t = tot = i1 = i2 = 0
                N = np.floor(self.DICTIONARY['IC']['n']/n)
                for i in xrange(C.size) :
                    i2 += C[i]
                    tot += C[i]
                    if tot >= N :
                        self.THREADS['ICt'][ i1:i2 ] = t
                        t += 1
                        if t==n-1 :
                            break
                        i1 = i2
                        tot = C[i]
                self.THREADS['ICt'][idx] = self.THREADS['ICt'].copy()

        else :
            self.THREADS['ICt'] = None

        if self.DICTIONARY['EC']['nE'] > 0 :
            self.THREADS['ECt'] = np.zeros( n+1, dtype=np.uint32 )
            N = np.floor( self.DICTIONARY['EC']['nE']/n )
            for i in xrange(1,n) :
                self.THREADS['ECt'][i] = self.THREADS['ECt'][i-1] + N
            self.THREADS['ECt'][-1] = self.DICTIONARY['EC']['nE']

            # check if some threads are not assigned any segment
            if np.count_nonzero( np.diff( self.THREADS['ECt'].astype(np.int32) ) <= 0 ) :
                self.THREADS = None
                raise RuntimeError( 'Too many threads for the EC compartments to evaluate; try decreasing the number.' )
        else :
            self.THREADS['ECt'] = None

        if self.DICTIONARY['nV'] > 0 :
            self.THREADS['ISOt'] = np.zeros( n+1, dtype=np.uint32 )
            N = np.floor( self.DICTIONARY['nV']/n )
            for i in xrange(1,n) :
                self.THREADS['ISOt'][i] = self.THREADS['ISOt'][i-1] + N
            self.THREADS['ISOt'][-1] = self.DICTIONARY['nV']

            # check if some threads are not assigned any segment
            if np.count_nonzero( np.diff( self.THREADS['ISOt'].astype(np.int32) ) <= 0 ) :
                self.THREADS = None
                raise RuntimeError( 'Too many threads for the ISO compartments to evaluate; try decreasing the number.' )
        else :
            self.THREADS['ISOt'] = None

        print '[ OK ]'

        print '   [ %.1f seconds ]' % ( time.time() - tic )

Example 92

Project: COMMIT
Source File: solvers.py
View license
def nnls( y, A, At = None, tol_fun = 1e-4, tol_x = 1e-9, max_iter = 100, verbose = 1, x0 = None ) :
    """Solve non negative least squares problem of the following form:

       min 0.5*||y-A x||_2^2 s.t. x >= 0

    The problem is solved using the forward-backward algorithm with FISTA-like acceleration.

    Parameters
    ----------
    y : 1-d array of doubles.
        Contains the measurements.

    A : matrix or class exposing the .dot() method.
        It is the forward measurement operator.

    At : matrix or class exposing the .dot() method.
        It is the corresponding adjoint operator (default: computed as A.T).

    tol_fun : double, optional (default: 1e-4).
        Minimum relative change of the objective value. The algorithm stops if:
               | f(x(t)) - f(x(t-1)) | / f(x(t)) < tol_fun,
        where x(t) is the estimate of the solution at iteration t.

    max_iter : integer, optional (default: 100).
        Maximum number of iterations.

    verbose : integer, optional (default: 1).
        0 no log, 1 print each iteration results.

    x0 : 1-d array of double, optional (default: automatically computed).
        Initial solution.

    Returns
    -------
    x : 1-d array of doubles.
        Best solution in the least-squares sense.

    Notes
    -----
    Author: Rafael Carrillo
    E-mail: [email protected]
    """
    # Initialization
    if At is None :
        At = A.T

    if x0 is not None :
        xhat = x0
        res = A.dot(xhat) - y
    else :
        xhat = np.zeros( A.shape[1], dtype=np.float64 )
        res = -y
    grad = At.dot(res)
    prev_obj = 0.5 * np.linalg.norm(res)**2
    iter = 1
    told = 1
    prev_x = xhat
    beta = 0.9
    qfval = prev_obj

    # Step size computation
    L = np.linalg.norm( A.dot(grad) )**2 / np.linalg.norm(grad)**2
    mu = 1.9 / L

    # Main loop
    if verbose >= 1 :
        print "      |   Cost function    Abs error      Rel error        Abs x          Rel x"
        print "------|----------------------------------------------------------------------------"

    while True :
        if verbose >= 1 :
            print "%4d  |" % iter,
            sys.stdout.flush()

        # Gradient descend step
        x = xhat - mu*grad

        # Projection onto the positive orthant
        x = np.real( x )
        x[ x<0 ] = 0

        # Stepsize check
        tmp = x-xhat
        q = qfval + np.real( np.dot(tmp,grad) ) + 0.5/mu * np.linalg.norm(tmp)**2
        res = A.dot(x) - y
        curr_obj = 0.5 * np.linalg.norm(res)**2

        # Backtracking
        while curr_obj > q :
            # Gradient descend step
            mu = beta*mu
            x = xhat - mu*grad

            # Projection onto the positive orthant
            x = np.real( x )
            x[ x<0 ] = 0

            # New stepsize check
            tmp = x-xhat
            q = qfval + np.real( np.dot(tmp,grad) ) + 0.5/mu * np.linalg.norm(tmp)**2
            res = A.dot(x) - y
            curr_obj = 0.5 * np.linalg.norm(res)**2

        # Global stopping criterion
        abs_obj = np.abs(curr_obj - prev_obj)
        rel_obj = abs_obj / curr_obj
        abs_x   = np.linalg.norm(x - prev_x)
        rel_x   = abs_x / np.linalg.norm(x)
        if verbose >= 1 :
            print "  %13.7e  %13.7e  %13.7e  %13.7e  %13.7e" % ( np.sqrt(2.0*curr_obj), abs_obj, rel_obj, abs_x, rel_x )

        if abs_obj < np.finfo(float).eps :
            criterion = "ABS_OBJ"
            break
        elif rel_obj < tol_fun :
            criterion = "REL_OBJ"
            break
        elif abs_x < np.finfo(float).eps :
            criterion = "ABS_X"
            break
        elif rel_x < tol_x :
            criterion = "REL_X"
            break
        elif iter >= max_iter :
            criterion = "MAX_IT"
            break

        # FISTA update
        t = 0.5 * ( 1 + np.sqrt(1+4*told**2) )
        xhat = x + (told-1)/t * (x - prev_x)

        # Gradient computation
        res = A.dot(xhat) - y
        grad = At.dot(res)

        # Update variables
        iter += 1
        prev_obj = curr_obj
        prev_x = x
        told = t
        qfval = 0.5 * np.linalg.norm(res)**2

    if verbose >= 1 :
        print "\t< Stopping criterion: %s >" % criterion

    return x

Example 93

Project: trtools
Source File: rmagic.py
View license
    @skip_doctest
    @magic_arguments()
    @argument(
        '-i', '--input', action='append',
        help='Names of input variable from shell.user_ns to be assigned to R variables of the same names after calling self.pyconverter. Multiple names can be passed separated only by commas with no whitespace.'
        )
    @argument(
        '-o', '--output', action='append',
        help='Names of variables to be pushed from rpy2 to shell.user_ns after executing cell body and applying self.Rconverter. Multiple names can be passed separated only by commas with no whitespace.'
        )
    @argument(
        '-w', '--width', type=int,
        help='Width of png plotting device sent as an argument to *png* in R.'
        )
    @argument(
        '-h', '--height', type=int,
        help='Height of png plotting device sent as an argument to *png* in R.'
        )

    @argument(
        '-d', '--dataframe', action='append',
        help='Convert these objects to data.frames and return as structured arrays.'
        )
    @argument(
        '-u', '--units', type=int,
        help='Units of png plotting device sent as an argument to *png* in R. One of ["px", "in", "cm", "mm"].'
        )
    @argument(
        '-p', '--pointsize', type=int,
        help='Pointsize of png plotting device sent as an argument to *png* in R.'
        )
    @argument(
        '-b', '--bg',
        help='Background of png plotting device sent as an argument to *png* in R.'
        )
    @argument(
        '-n', '--noreturn',
        help='Force the magic to not return anything.',
        action='store_true',
        default=False
        )
    @argument(
        'code',
        nargs='*',
        )
    @needs_local_scope
    @line_cell_magic
    def R(self, line, cell=None, local_ns=None):
        '''
        Execute code in R, and pull some of the results back into the Python namespace.

        In line mode, this will evaluate an expression and convert the returned value to a Python object.
        The return value is determined by rpy2's behaviour of returning the result of evaluating the
        final line. 

        Multiple R lines can be executed by joining them with semicolons::

            In [9]: %R X=c(1,4,5,7); sd(X); mean(X)
            Out[9]: array([ 4.25])

        As a cell, this will run a block of R code, without bringing anything back by default::

            In [10]: %%R
               ....: Y = c(2,4,3,9)
               ....: print(summary(lm(Y~X)))
               ....:

            Call:
            lm(formula = Y ~ X)

            Residuals:
                1     2     3     4
             0.88 -0.24 -2.28  1.64

            Coefficients:
                        Estimate Std. Error t value Pr(>|t|)
            (Intercept)   0.0800     2.3000   0.035    0.975
            X             1.0400     0.4822   2.157    0.164

            Residual standard error: 2.088 on 2 degrees of freedom
            Multiple R-squared: 0.6993,Adjusted R-squared: 0.549
            F-statistic: 4.651 on 1 and 2 DF,  p-value: 0.1638

        In the notebook, plots are published as the output of the cell.

        %R plot(X, Y)

        will create a scatter plot of X bs Y.

        If cell is not None and line has some R code, it is prepended to
        the R code in cell.

        Objects can be passed back and forth between rpy2 and python via the -i -o flags in line::

            In [14]: Z = np.array([1,4,5,10])

            In [15]: %R -i Z mean(Z)
            Out[15]: array([ 5.])


            In [16]: %R -o W W=Z*mean(Z)
            Out[16]: array([  5.,  20.,  25.,  50.])

            In [17]: W
            Out[17]: array([  5.,  20.,  25.,  50.])

        The return value is determined by these rules:

        * If the cell is not None, the magic returns None.

        * If the cell evaluates as False, the resulting value is returned
        unless the final line prints something to the console, in
        which case None is returned.

        * If the final line results in a NULL value when evaluated
        by rpy2, then None is returned.

        * No attempt is made to convert the final value to a structured array.
        Use the --dataframe flag or %Rget to push / return a structured array.

        * If the -n flag is present, there is no return value.

        * A trailing ';' will also result in no return value as the last
        value in the line is an empty string.

        The --dataframe argument will attempt to return structured arrays. 
        This is useful for dataframes with
        mixed data types. Note also that for a data.frame, 
        if it is returned as an ndarray, it is transposed::

            In [18]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]

            In [19]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)

            In [20]: %%R -o datar
            datar = datapy
               ....: 

            In [21]: datar
            Out[21]: 
            array([['1', '2', '3', '4'],
                   ['2', '3', '2', '5'],
                   ['a', 'b', 'c', 'e']], 
                  dtype='|S1')

            In [22]: %%R -d datar
            datar = datapy
               ....: 

            In [23]: datar
            Out[23]: 
            array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')], 
                  dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])

        The --dataframe argument first tries colnames, then names.
        If both are NULL, it returns an ndarray (i.e. unstructured)::

            In [1]: %R mydata=c(4,6,8.3); NULL

            In [2]: %R -d mydata

            In [3]: mydata
            Out[3]: array([ 4. ,  6. ,  8.3])

            In [4]: %R names(mydata) = c('a','b','c'); NULL

            In [5]: %R -d mydata

            In [6]: mydata
            Out[6]: 
            array((4.0, 6.0, 8.3), 
                  dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])

            In [7]: %R -o mydata

            In [8]: mydata
            Out[8]: array([ 4. ,  6. ,  8.3])

        '''

        args = parse_argstring(self.R, line)

        # arguments 'code' in line are prepended to
        # the cell lines

        if cell is None:
            code = ''
            return_output = True
            line_mode = True
        else:
            code = cell
            return_output = False
            line_mode = False

        code = ' '.join(args.code) + code

        # if there is no local namespace then default to an empty dict
        if local_ns is None:
            local_ns = {}

        if args.input:
            for input in ','.join(args.input).split(','):
                try:
                    val = local_ns[input]
                except KeyError:
                    val = self.shell.user_ns[input]
                self.r.assign(input, self.pyconverter(val))

        png_argdict = dict([(n, getattr(args, n)) for n in ['units', 'height', 'width', 'bg', 'pointsize']])
        png_args = ','.join(['%s=%s' % (o,v) for o, v in list(png_argdict.items()) if v is not None])
        # execute the R code in a temporary directory

        tmpd = tempfile.mkdtemp()
        self.r('png("%s/Rplots%%03d.png",%s)' % (tmpd, png_args))

        text_output = ''
        if line_mode:
            for line in code.split(';'):
                text_result, result = self.eval(line)
                text_output += text_result
            if text_result:
                # the last line printed something to the console so we won't return it
                return_output = False
        else:
            text_result, result = self.eval(code)
            text_output += text_result

        self.r('dev.off()')

        # read out all the saved .png files

        images = [open(imgfile, 'rb').read() for imgfile in glob("%s/Rplots*png" % tmpd)]

        # now publish the images
        # mimicking IPython/zmq/pylab/backend_inline.py
        fmt = 'png'
        mimetypes = { 'png' : 'image/png', 'svg' : 'image/svg+xml' }
        mime = mimetypes[fmt]

        # publish the printed R objects, if any

        display_data = []
        if text_output:
            display_data.append(('RMagic.R', {'text/plain':text_output}))

        # flush text streams before sending figures, helps a little with output
        for image in images:
            # synchronization in the console (though it's a bandaid, not a real sln)
            sys.stdout.flush(); sys.stderr.flush()
            display_data.append(('RMagic.R', {mime: image}))

        # kill the temporary directory
        rmtree(tmpd)

        # try to turn every output into a numpy array
        # this means that output are assumed to be castable
        # as numpy arrays

        if args.output:
            for output in ','.join(args.output).split(','):
                self.shell.push({output:self.Rconverter(self.r(output), dataframe=False)})

        if args.dataframe:
            for output in ','.join(args.dataframe).split(','):
                self.shell.push({output:self.Rconverter(self.r(output), dataframe=True)})

        for tag, disp_d in display_data:
            publish_display_data(tag, disp_d)

        # this will keep a reference to the display_data
        # which might be useful to other objects who happen to use
        # this method

        if self.cache_display_data:
            self.display_cache = display_data

        # if in line mode and return_output, return the result as an ndarray
        if return_output and not args.noreturn:
            if result != ri.NULL:
                return self.Rconverter(result, dataframe=False)

Example 94

View license
def main(_):
  if FLAGS.self_test:
    print('Running self-test.')
    train_data, train_labels = fake_data(256)
    validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
    test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
    num_epochs = 1
  else:
    # Get the data.
    train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
    train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
    test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
    test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')

    # Extract it into numpy arrays.
    train_data = extract_data(train_data_filename, 60000)
    train_labels = extract_labels(train_labels_filename, 60000)
    test_data = extract_data(test_data_filename, 10000)
    test_labels = extract_labels(test_labels_filename, 10000)

    # Generate a validation set.
    validation_data = train_data[:VALIDATION_SIZE, ...]
    validation_labels = train_labels[:VALIDATION_SIZE]
    train_data = train_data[VALIDATION_SIZE:, ...]
    train_labels = train_labels[VALIDATION_SIZE:]
    num_epochs = NUM_EPOCHS
  train_size = train_labels.shape[0]

  # This is where training samples and labels are fed to the graph.
  # These placeholder nodes will be fed a batch of training data at each
  # training step using the {feed_dict} argument to the Run() call below.
  train_data_node = tf.placeholder(
      data_type(),
      shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
  train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
  eval_data = tf.placeholder(
      data_type(),
      shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))

  # The variables below hold all the trainable weights. They are passed an
  # initial value which will be assigned when we call:
  # {tf.global_variables_initializer().run()}
  conv1_weights = tf.Variable(
      tf.truncated_normal([5, 5, NUM_CHANNELS, 32],  # 5x5 filter, depth 32.
                          stddev=0.1,
                          seed=SEED, dtype=data_type()))
  conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
  conv2_weights = tf.Variable(tf.truncated_normal(
      [5, 5, 32, 64], stddev=0.1,
      seed=SEED, dtype=data_type()))
  conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
  fc1_weights = tf.Variable(  # fully connected, depth 512.
      tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
                          stddev=0.1,
                          seed=SEED,
                          dtype=data_type()))
  fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
  fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],
                                                stddev=0.1,
                                                seed=SEED,
                                                dtype=data_type()))
  fc2_biases = tf.Variable(tf.constant(
      0.1, shape=[NUM_LABELS], dtype=data_type()))

  # We will replicate the model structure for the training subgraph, as well
  # as the evaluation subgraphs, while sharing the trainable parameters.
  def model(data, train=False):
    """The Model definition."""
    # 2D convolution, with 'SAME' padding (i.e. the output feature map has
    # the same size as the input). Note that {strides} is a 4D array whose
    # shape matches the data layout: [image index, y, x, depth].
    conv = tf.nn.conv2d(data,
                        conv1_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    # Bias and rectified linear non-linearity.
    relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
    # Max pooling. The kernel size spec {ksize} also follows the layout of
    # the data. Here we have a pooling window of 2, and a stride of 2.
    pool = tf.nn.max_pool(relu,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')
    conv = tf.nn.conv2d(pool,
                        conv2_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
    pool = tf.nn.max_pool(relu,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')
    # Reshape the feature map cuboid into a 2D matrix to feed it to the
    # fully connected layers.
    pool_shape = pool.get_shape().as_list()
    reshape = tf.reshape(
        pool,
        [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
    # Fully connected layer. Note that the '+' operation automatically
    # broadcasts the biases.
    hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
    # Add a 50% dropout during training only. Dropout also scales
    # activations such that no rescaling is needed at evaluation time.
    if train:
      hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
    return tf.matmul(hidden, fc2_weights) + fc2_biases

  # Training computation: logits + cross-entropy loss.
  logits = model(train_data_node, True)
  loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, train_labels_node))

  # L2 regularization for the fully connected parameters.
  regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
                  tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
  # Add the regularization term to the loss.
  loss += 5e-4 * regularizers

  # Optimizer: set up a variable that's incremented once per batch and
  # controls the learning rate decay.
  batch = tf.Variable(0, dtype=data_type())
  # Decay once per epoch, using an exponential schedule starting at 0.01.
  learning_rate = tf.train.exponential_decay(
      0.01,                # Base learning rate.
      batch * BATCH_SIZE,  # Current index into the dataset.
      train_size,          # Decay step.
      0.95,                # Decay rate.
      staircase=True)
  # Use simple momentum for the optimization.
  optimizer = tf.train.MomentumOptimizer(learning_rate,
                                         0.9).minimize(loss,
                                                       global_step=batch)

  # Predictions for the current training minibatch.
  train_prediction = tf.nn.softmax(logits)

  # Predictions for the test and validation, which we'll compute less often.
  eval_prediction = tf.nn.softmax(model(eval_data))

  # Small utility function to evaluate a dataset by feeding batches of data to
  # {eval_data} and pulling the results from {eval_predictions}.
  # Saves memory and enables this to run on smaller GPUs.
  def eval_in_batches(data, sess):
    """Get all predictions for a dataset by running it in small batches."""
    size = data.shape[0]
    if size < EVAL_BATCH_SIZE:
      raise ValueError("batch size for evals larger than dataset: %d" % size)
    predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
    for begin in xrange(0, size, EVAL_BATCH_SIZE):
      end = begin + EVAL_BATCH_SIZE
      if end <= size:
        predictions[begin:end, :] = sess.run(
            eval_prediction,
            feed_dict={eval_data: data[begin:end, ...]})
      else:
        batch_predictions = sess.run(
            eval_prediction,
            feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
        predictions[begin:, :] = batch_predictions[begin - size:, :]
    return predictions

  # Create a local session to run the training.
  start_time = time.time()
  with tf.Session() as sess:
    # Run all the initializers to prepare the trainable parameters.
    tf.global_variables_initializer().run()
    print('Initialized!')
    # Loop through training steps.
    for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
      # Compute the offset of the current minibatch in the data.
      # Note that we could use better randomization across epochs.
      offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
      batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
      batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
      # This dictionary maps the batch data (as a numpy array) to the
      # node in the graph it should be fed to.
      feed_dict = {train_data_node: batch_data,
                   train_labels_node: batch_labels}
      # Run the optimizer to update weights.
      sess.run(optimizer, feed_dict=feed_dict)
      # print some extra information once reach the evaluation frequency
      if step % EVAL_FREQUENCY == 0:
        # fetch some extra nodes' data
        l, lr, predictions = sess.run([loss, learning_rate, train_prediction],
                                      feed_dict=feed_dict)
        elapsed_time = time.time() - start_time
        start_time = time.time()
        print('Step %d (epoch %.2f), %.1f ms' %
              (step, float(step) * BATCH_SIZE / train_size,
               1000 * elapsed_time / EVAL_FREQUENCY))
        print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
        print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
        print('Validation error: %.1f%%' % error_rate(
            eval_in_batches(validation_data, sess), validation_labels))
        sys.stdout.flush()
    # Finally print the result!
    test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
    print('Test error: %.1f%%' % test_error)
    if FLAGS.self_test:
      print('test_error', test_error)
      assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
          test_error,)

Example 95

Project: metaseq
Source File: signal_comparison.py
View license
def compare(signal1, signal2, features, outfn, comparefunc=np.subtract,
        batchsize=5000, array_kwargs=None, verbose=False):
    """
    Compares two genomic signal objects and outputs results as a bedGraph file.
    Can be used for entire genome-wide comparisons due to its parallel nature.

    Typical usage would be to create genome-wide windows of equal size to
    provide as `features`::

        windowsize = 10000
        features = pybedtools.BedTool().window_maker(
           genome='hg19', w=windowsize)

    You will usually want to choose bins for the array based on the final
    resolution you would like. Say you would like 10-bp bins in the final
    bedGraph; using the example above you would use array_kwargs={'bins':
    windowsize/10}.  Or, for single-bp resolution (beware: file will be large),
    use {'bins': windowsize}.

    Here's how it works.  This function:

        * Takes `batchsize` features at a time from `features`

        * Constructs normalized (RPMMR) arrays in parallel for each input
          genomic signal object for those `batchsize` features

        * Applies `comparefunc` (np.subtract by default) to the arrays to get
          a "compared" (e.g., difference matrix by default) for the `batchsize`
          features.

        * For each row in this matrix, it outputs each nonzero column as
          a bedGraph format line in `outfn`

    `comparefunc` is a function with the signature::

        def f(x, y):
            return z

    where `x` and `y` will be arrays for `signal1` and `signal2` (normalized to
    RPMMR) and `z` is a new array.  By default this is np.subtract, but another
    common `comparefunc` might be a log2-fold-change function::

        def lfc(x, y):
            return np.log2(x / y)

    :param signal1: A genomic_signal object
    :param signal2: Another genomic_signal object
    :param features: An iterable of pybedtools.Interval objects. A list will be
        created for every `batchsize` features, so you need enough memory for
        this.
    :param comparefunc: Function to use to compare arrays (default is
        np.subtract)
    :param outfn: String filename to write bedGraph file
    :param batchsize: Number of features (each with length `windowsize` bp) to
        process at a time
    :param array_kwargs: Kwargs passed directly to genomic_signal.array.  Needs
        `processes` and `chunksize` if you want parallel processing
    :param verbose: Be noisy
    """
    fout = open(outfn, 'w')
    fout.write('track type=bedGraph\n')

    i = 0
    this_batch = []
    for feature in features:
        if i <= batchsize:
            this_batch.append(feature)
            i += 1
            continue

        if verbose:
            print 'working on batch of %s' % batchsize
            sys.stdout.flush()

        arr1 = signal1.array(this_batch, **array_kwargs).astype(float)
        arr2 = signal2.array(this_batch, **array_kwargs).astype(float)
        arr1 /= signal1.million_mapped_reads()
        arr2 /= signal2.million_mapped_reads()
        compared = comparefunc(arr1, arr2)

        for feature, row in itertools.izip(this_batch, compared):
            start = feature.start
            bins = len(row)
            binsize = len(feature) / len(row)

            # Quickly move on if nothing here.  speed increase prob best for
            # sparse data
            if sum(row) == 0:
                continue

            for j in range(0, len(row)):
                score = row[j]
                stop = start + binsize
                if score != 0:
                    fout.write('\t'.join([
                        feature.chrom,
                        str(start),
                        str(stop),
                        str(score)]) + '\n')
                start = start + binsize
        this_batch = []
        i = 0
    fout.close()

Example 96

Project: stoq
Source File: logo.py
View license
def print_logo():
    """

    print random ascii art

    """

    logo = []

    logo.append("""
    .------..------..------..------.
    |S.--. ||T.--. ||O.--. ||Q.--. |
    | :/\: || :/\: || :/\: || (\/) |
    | :\/: || (__) || :\/: || :\/: |
    | '--'S|| '--'T|| '--'O|| '--'Q|
    `------'`------'`------'`------'

          Analysis. Simplified.
                 v{}
    """.format(__version__))

    logo.append("""
          *******                               * ***
        *       ***      *                    *  ****
       *         **     **                   *  *  ***
       **        *      **                  *  **   ***
        ***           ********    ****     *  ***    ***
       ** ***        ********    * ***  * **   **     **
        *** ***         **      *   ****  **   **     **
          *** ***       **     **    **   **   **     **
            *** ***     **     **    **   **   **     **
              ** ***    **     **    **   **   **     **
               ** **    **     **    **    **  ** *** **
                * *     **     **    **     ** *   ****
      ***        *      **      ******       ***     ***
     *  *********        **      ****         ******* **
    *     *****                                 ***   **
    *                                                 **
     **                                               *
                                                     *
                                                    *
                    Analysis. Simplified.
                          v{}
    """.format(__version__))

    logo.append("""
     .d8888b.  888             .d88888b.
    d88P  Y88b 888            d88P" "Y88b
    Y88b.      888            888     888
     "Y888b.   888888 .d88b.  888     888
        "Y88b. 888   d88""88b 888     888
          "888 888   888  888 888 Y8b 888
    Y88b  d88P Y88b. Y88..88P Y88b.Y8b88P
     "Y8888P"   "Y888 "Y88P"   "Y888888"
                                     Y8b
            Analysis. Simplified.
                  v{}
    """.format(__version__))

    logo.append("""
     _______ _______  _____   _____
     |______    |    |     | |   __|
     ______|    |    |_____| |____\|

           Analysis. Simplified.
                 v{}
    """.format(__version__))

    logo.append("""
      .--.--.       ___                  ,----..
     /  /    '.   ,--.'|_               /   /   '
    |  :  /`. /   |  | :,'    ,---.    /   .     :
    ;  |  |--`    :  : ' :   '   ,'\  .   /   ;.  '
    |  :  ;_    .;__,'  /   /   /   |.   ;   /  ` ;
     \  \    `. |  |   |   .   ; ,. :;   |  ; \ ; |
      `----.   \:__,'| :   '   | |: :|   :  | ; | '
      __ \  \  |  '  : |__ '   | .; :.   |  ' ' ' :
     /  /`--'  /  |  | '.'||   :    |'   ;  \; /  |
    '--'.     /   ;  :    ; \   \  /  \   \  ',  . "
      `--'---'    |  ,   /   `----'    ;   :      ; |
                   ---`-'               \   \ .'`--"
                                         `---`
                Analysis. Simplified.
                      v{}
    """.format(__version__))

    logo.append("""
     _______ _________ _______  _______
    (  ____ \\__   __/(  ___  )(  ___  )
    | (    \/   ) (   | (   ) || (   ) |
    | (_____    | |   | |   | || |   | |
    (_____  )   | |   | |   | || |   | |
          ) |   | |   | |   | || | /\| |
    /\____) |   | |   | (___) || (_\ \ |
    \_______)   )_(   (_______)(____\/_)

            Analysis. Simplified.
                  v{}
    """.format(__version__))

    logo.append("""
      _________  __          ________
     /   _____/_/  |_  ____  \_____  -
     \_____  \ \   __\/  _ \  /  / \  -
     /        \ |  | (  <_> )/   \_/.  -
    /_______  / |__|  \____/ \_____\ \_/
            \/                      \__>

            Analysis. Simplified.
                  v{}
    """.format(__version__))

    logo.append("""
               ___
              (   )
        .--.   | |_      .--.    .--.
      /  _  \ (   __)   /    \  /    "
     . .' `. ; | |     |  .-. ;|  .-. '
     | '   | | | | ___ | |  | || |  | |
     _\_`.(___)| |(   )| |  | || |  | |
    (   ). '.  | | | | | |  | || |  | |
     | |  `\ | | ' | | | '  | || '  | |
     ; '._,' ' ' `-' ; '  `-' /' `-'  |
      '.___.'   `.__.   `.__.'  `._ / |
                                    | |
                                   (___)

            Analysis. Simplified.
                  v{}
    """.format(__version__))

    logo.append("""
    ███████╗████████╗ ██████╗  ██████╗
    ██╔════╝╚══██╔══╝██╔═══██╗██╔═══██╗
    ███████╗   ██║   ██║   ██║██║   ██║
    ╚════██║   ██║   ██║   ██║██║▄▄ ██║
    ███████║   ██║   ╚██████╔╝╚██████╔╝
    ╚══════╝   ╚═╝    ╚═════╝  ╚══▀▀═╝

           Analysis. Simplified.
                 v{}
    """.format(__version__))

    logo.append("""
     ▄████████     ███      ▄██████▄  ████████▄
    ███    ███ ▀█████████▄ ███    ███ ███    ███
    ███    █▀     ▀███▀▀██ ███    ███ ███    ███
    ███            ███   ▀ ███    ███ ███    ███
    ▀███████████   ███     ███    ███ ███    ███
             ███   ███     ███    ███ ███    ███
       ▄█    ███   ███     ███    ███ ███  ▀ ███
     ▄████████▀   ▄████▀    ▀██████▀   ▀██████▀▄█

                   Analysis. Simplified.
                         v{}
    """.format(__version__))

    sys.stdout.flush()
    try:
        print(random.choice(logo))
    except:
        print(logo[3])
    sys.stdout.flush()

Example 97

Project: PythonJS
Source File: run.py
View license
def run_test_on(filename):
    """run one test and returns the number of errors"""
    if not show_details:
        f = open(filename)
        comment = f.readline().strip(" \n\"'")
        f.close()
        print(table_header % (filename[2:-3], comment), end='')
    sum_errors = {}

    if filename.endswith('.html'):
        run_html_test( filename, sum_errors )
        return sum_errors

    def display(function):
        global _test_description
        _test_description = function.__doc__
        if show_details:
            print('\n<%s>\n' % function.__doc__)

        errors = function(filename)
        if errors:
            if not show_details:
                print(table_cell % ''.join('%s%d' % (k[0], v)
                                            for k, v in errors.items()),
                      end='')
        else:
            if not show_details:
                print(table_cell % 'OK', end='')
        sys.stdout.flush()

        for k, v in errors.items():
            sum_errors[k] = sum_errors.get(k, 0) + v

        if show_details:
            print('-'*77)

    if 'requirejs' not in filename and not filename.startswith('./go/'):
        display(run_python_test_on)
        display(run_python3_test_on)
        if pypy_runnable:
            display(run_pypy_test_on)
        if old_pypy_runnable:
            display(run_old_pypy_test_on)

    global js
    if not filename.startswith('./go/'):
        js = translate_js(
            filename, 
            javascript=False, 
            multioutput=filename.startswith('./threads/' or filename.startswith('./bench/webworker'))
        )
        if rhino_runnable:
            display(run_pythonjs_test_on)
        if node_runnable:
            display(run_pythonjs_test_on_node)

        if nodewebkit_runnable:
            display(run_pythonjs_test_on_nodewebkit)


    if '--no-javascript-mode' not in sys.argv and not filename.startswith('./go/'):
        js = translate_js(filename, javascript=True, multioutput=filename.startswith('./threads/' or filename.startswith('./bench/webworker')))
        if rhino_runnable:
            display(run_pythonjsjs_test_on)
        if node_runnable:
            display(run_pythonjsjs_test_on_node)

        if nodewebkit_runnable:
            display(run_pythonjsjs_test_on_nodewebkit)


    if 'requirejs' not in filename:

        if dart_runnable:
            js = translate_js(filename, javascript=False, dart=True)
            display(run_pythonjs_dart_test_on_dart)

        if dart2js_runnable and node_runnable:
            js = translate_js(filename, javascript=False, dart=True)
            display(run_pythonjs_dart_test_on_node)

        if coffee_runnable and node_runnable:
            js = translate_js(filename, javascript=False, dart=False, coffee=True)
            display(run_pythonjs_coffee_test_on_node)

        if luajs_runnable and node_runnable:
            js = translate_js(filename, luajs=True)
            display(run_pythonjs_luajs_test_on_node)

        if lua_runnable:
            js = translate_js(filename, lua=True)
            display(run_pythonjs_lua_test_on_lua)

        if luajit_runnable:
            js = translate_js(filename, lua=True)
            display(run_pythonjs_lua_test_on_luajit)

        if go_runnable:
            js = translate_js(filename, go=True)
            display(run_pythonjs_go_test)

        if gopherjs_runnable:
            js = translate_js(filename, gopherjs=True)
            display(run_pythonjs_gopherjs_test)

    print()
    return sum_errors

Example 98

Project: apilogs
Source File: core.py
View license
    def list_logs(self):
        streams = []

        if self.log_stream_name != self.ALL_WILDCARD:
            streams = list(self._get_streams_from_pattern(self.log_group_name, self.log_stream_name))

            if len(streams) > self.FILTER_LOG_EVENTS_STREAMS_LIMIT:
                raise exceptions.TooManyStreamsFilteredError(
                     self.log_stream_name,
                     len(streams),
                     self.FILTER_LOG_EVENTS_STREAMS_LIMIT
                )
            if len(streams) == 0:
                raise exceptions.NoStreamsFilteredError(self.log_stream_name)

        max_stream_length = max([len(s) for s in streams]) if streams else 10
        group_length = len(self.log_group_name)

        queue, exit = Queue(), Event()

        def update_next_token(response, kwargs):
            group = kwargs['logGroupName']

            if 'nextToken' in response:
                next = response['nextToken']
    
                self.next_tokens[group] = next

                #print "Updated tokens"
                #print self.next_tokens
            else:
                if group in self.next_tokens:
                    del self.next_tokens[group]

                if self.watch:
                    time.sleep(0.2)
                else:
                    queue.put(None)
                    return

        ## todo: remove shared kwargs
        def list_lambda_logs(allevents, kwargs):
            # add events from lambda function streams
            fxns = self.get_lambda_function_names(self.api_id, self.stage)
            for fxn in fxns:
                lambda_group = ("/aws/lambda/" + fxn).split(':')[0]
                kwargs['logGroupName'] = lambda_group
    
                if lambda_group in self.next_tokens:
                    kwargs['nextToken'] = self.next_tokens[lambda_group]
                else:
                    if 'nextToken' in kwargs:
                        del kwargs['nextToken']

                lambdaresponse = filter_log_events(**kwargs)
                events = lambdaresponse.get('events', [])
                for event in events:
                    event['group_name'] = lambda_group
                    allevents.append(event)
                update_next_token(lambdaresponse, kwargs)
                return allevents

        ## todo: remove shared kwargs
        def list_apigateway_logs(allevents, kwargs):
            # add events from API Gateway streams
            kwargs['logGroupName'] = self.log_group_name
            if self.log_group_name in self.next_tokens:
                kwargs['nextToken'] = self.next_tokens[self.log_group_name]
            else:
                if 'nextToken' in kwargs:
                    del kwargs['nextToken']

            try:
                apigresponse = filter_log_events(**kwargs)
            except Exception as e:
                print "Error fetching logs for API {0}. Please ensure logging is enabled for this API and the API is deployed. " \
                      "See http://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-stage-settings.html: {1}".format(self.api_id, e)
                raise

            events = apigresponse.get('events', [])
            for event in events:
                event['group_name'] = self.log_group_name
                allevents.append(event)
            update_next_token(apigresponse, kwargs)
            return allevents

        def filter_log_events(**kwargs):
            try:
                resp = self.client.filter_log_events(**kwargs)

                if 'nextToken' in resp:
                    group = kwargs['logGroupName']
                    next = resp['nextToken']
                    #print "Resp: Group: " + group + " nextToken: " + next

                #print resp

                return resp
            except Exception as e:
                print "Caught error from CloudWatch: {0}".format(e)
                raise


        def consumer():
            while not exit.is_set():
                event = queue.get()

                if event is None:
                    exit.set()
                    break

                output = []
                if self.output_group_enabled:
                    output.append(
                        self.color(
                            event['group_name'].ljust(group_length, ' '),
                            'green'
                        )
                    )
                if self.output_stream_enabled:
                    output.append(
                        self.color(
                            event['logStreamName'].ljust(max_stream_length,
                                                         ' '),
                            'cyan'
                        )
                    )
                if self.output_timestamp_enabled:
                    output.append(
                        self.color(
                            milis2iso(event['timestamp']),
                            'yellow'
                        )
                    )
                if self.output_ingestion_time_enabled:
                    output.append(
                        self.color(
                            milis2iso(event['ingestionTime']),
                            'blue'
                        )
                    )
                output.append(event['message'])
                print(' '.join(output))
                sys.stdout.flush()

        def generator():
            """Push events into queue trying to deduplicate them using a lru queue.
            AWS API stands for the interleaved parameter that:
                interleaved (boolean) -- If provided, the API will make a best
                effort to provide responses that contain events from multiple
                log streams within the log group interleaved in a single
                response. That makes some responses return some subsequent
                response duplicate events. In a similar way when awslogs is
                called with --watch option, we need to findout which events we
                have alredy put in the queue in order to not do it several
                times while waiting for new ones and reusing the same
                next_token. The site of this queue is MAX_EVENTS_PER_CALL in
                order to not exhaust the memory.
            """
            interleaving_sanity = deque(maxlen=self.MAX_EVENTS_PER_CALL)
            kwargs = {'logGroupName': self.log_group_name,
                      'interleaved': True}

            if streams:
                kwargs['logStreamNames'] = streams

            if self.start:
                kwargs['startTime'] = self.start

            if self.end:
                kwargs['endTime'] = self.end

            if self.filter_pattern:
                kwargs['filterPattern'] = self.filter_pattern

            while not exit.is_set():
                allevents = []
                
                list_apigateway_logs(allevents, kwargs)
                list_lambda_logs(allevents, kwargs)

                sorted(allevents, key=itemgetter('timestamp'))

                for event in allevents:
                    if event['eventId'] not in interleaving_sanity:
                        interleaving_sanity.append(event['eventId'])
                        queue.put(event)

                #print response

        g = Thread(target=generator)
        g.start()

        c = Thread(target=consumer)
        c.start()

        try:
            while not exit.is_set():
                time.sleep(.1)
        except (KeyboardInterrupt, SystemExit):
            exit.set()
            print('Closing...\n')
            os._exit(0)

Example 99

View license
def main(_):
  if FLAGS.self_test:
    print('Running self-test.')
    train_data, train_labels = fake_data(256)
    validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
    test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
    num_epochs = 1
  else:
    # Get the data.
    train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
    train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
    test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
    test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')

    # Extract it into numpy arrays.
    train_data = extract_data(train_data_filename, 60000)
    train_labels = extract_labels(train_labels_filename, 60000)
    test_data = extract_data(test_data_filename, 10000)
    test_labels = extract_labels(test_labels_filename, 10000)

    # Generate a validation set.
    validation_data = train_data[:VALIDATION_SIZE, ...]
    validation_labels = train_labels[:VALIDATION_SIZE]
    train_data = train_data[VALIDATION_SIZE:, ...]
    train_labels = train_labels[VALIDATION_SIZE:]
    num_epochs = NUM_EPOCHS
  train_size = train_labels.shape[0]

  # This is where training samples and labels are fed to the graph.
  # These placeholder nodes will be fed a batch of training data at each
  # training step using the {feed_dict} argument to the Run() call below.
  train_data_node = tf.placeholder(
      data_type(),
      shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
  train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
  eval_data = tf.placeholder(
      data_type(),
      shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))

  # The variables below hold all the trainable weights. They are passed an
  # initial value which will be assigned when we call:
  # {tf.global_variables_initializer().run()}
  conv1_weights = tf.Variable(
      tf.truncated_normal([5, 5, NUM_CHANNELS, 32],  # 5x5 filter, depth 32.
                          stddev=0.1,
                          seed=SEED, dtype=data_type()))
  conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
  conv2_weights = tf.Variable(tf.truncated_normal(
      [5, 5, 32, 64], stddev=0.1,
      seed=SEED, dtype=data_type()))
  conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
  fc1_weights = tf.Variable(  # fully connected, depth 512.
      tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
                          stddev=0.1,
                          seed=SEED,
                          dtype=data_type()))
  fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
  fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],
                                                stddev=0.1,
                                                seed=SEED,
                                                dtype=data_type()))
  fc2_biases = tf.Variable(tf.constant(
      0.1, shape=[NUM_LABELS], dtype=data_type()))

  # We will replicate the model structure for the training subgraph, as well
  # as the evaluation subgraphs, while sharing the trainable parameters.
  def model(data, train=False):
    """The Model definition."""
    # 2D convolution, with 'SAME' padding (i.e. the output feature map has
    # the same size as the input). Note that {strides} is a 4D array whose
    # shape matches the data layout: [image index, y, x, depth].
    conv = tf.nn.conv2d(data,
                        conv1_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    # Bias and rectified linear non-linearity.
    relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
    # Max pooling. The kernel size spec {ksize} also follows the layout of
    # the data. Here we have a pooling window of 2, and a stride of 2.
    pool = tf.nn.max_pool(relu,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')
    conv = tf.nn.conv2d(pool,
                        conv2_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
    pool = tf.nn.max_pool(relu,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')
    # Reshape the feature map cuboid into a 2D matrix to feed it to the
    # fully connected layers.
    pool_shape = pool.get_shape().as_list()
    reshape = tf.reshape(
        pool,
        [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
    # Fully connected layer. Note that the '+' operation automatically
    # broadcasts the biases.
    hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
    # Add a 50% dropout during training only. Dropout also scales
    # activations such that no rescaling is needed at evaluation time.
    if train:
      hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
    return tf.matmul(hidden, fc2_weights) + fc2_biases

  # Training computation: logits + cross-entropy loss.
  logits = model(train_data_node, True)
  loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, train_labels_node))

  # L2 regularization for the fully connected parameters.
  regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
                  tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
  # Add the regularization term to the loss.
  loss += 5e-4 * regularizers

  # Optimizer: set up a variable that's incremented once per batch and
  # controls the learning rate decay.
  batch = tf.Variable(0, dtype=data_type())
  # Decay once per epoch, using an exponential schedule starting at 0.01.
  learning_rate = tf.train.exponential_decay(
      0.01,                # Base learning rate.
      batch * BATCH_SIZE,  # Current index into the dataset.
      train_size,          # Decay step.
      0.95,                # Decay rate.
      staircase=True)
  # Use simple momentum for the optimization.
  optimizer = tf.train.MomentumOptimizer(learning_rate,
                                         0.9).minimize(loss,
                                                       global_step=batch)

  # Predictions for the current training minibatch.
  train_prediction = tf.nn.softmax(logits)

  # Predictions for the test and validation, which we'll compute less often.
  eval_prediction = tf.nn.softmax(model(eval_data))

  # Small utility function to evaluate a dataset by feeding batches of data to
  # {eval_data} and pulling the results from {eval_predictions}.
  # Saves memory and enables this to run on smaller GPUs.
  def eval_in_batches(data, sess):
    """Get all predictions for a dataset by running it in small batches."""
    size = data.shape[0]
    if size < EVAL_BATCH_SIZE:
      raise ValueError("batch size for evals larger than dataset: %d" % size)
    predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
    for begin in xrange(0, size, EVAL_BATCH_SIZE):
      end = begin + EVAL_BATCH_SIZE
      if end <= size:
        predictions[begin:end, :] = sess.run(
            eval_prediction,
            feed_dict={eval_data: data[begin:end, ...]})
      else:
        batch_predictions = sess.run(
            eval_prediction,
            feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
        predictions[begin:, :] = batch_predictions[begin - size:, :]
    return predictions

  # Create a local session to run the training.
  start_time = time.time()
  with tf.Session() as sess:
    # Run all the initializers to prepare the trainable parameters.
    tf.global_variables_initializer().run()
    print('Initialized!')
    # Loop through training steps.
    for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
      # Compute the offset of the current minibatch in the data.
      # Note that we could use better randomization across epochs.
      offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
      batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
      batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
      # This dictionary maps the batch data (as a numpy array) to the
      # node in the graph it should be fed to.
      feed_dict = {train_data_node: batch_data,
                   train_labels_node: batch_labels}
      # Run the optimizer to update weights.
      sess.run(optimizer, feed_dict=feed_dict)
      # print some extra information once reach the evaluation frequency
      if step % EVAL_FREQUENCY == 0:
        # fetch some extra nodes' data
        l, lr, predictions = sess.run([loss, learning_rate, train_prediction],
                                      feed_dict=feed_dict)
        elapsed_time = time.time() - start_time
        start_time = time.time()
        print('Step %d (epoch %.2f), %.1f ms' %
              (step, float(step) * BATCH_SIZE / train_size,
               1000 * elapsed_time / EVAL_FREQUENCY))
        print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
        print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
        print('Validation error: %.1f%%' % error_rate(
            eval_in_batches(validation_data, sess), validation_labels))
        sys.stdout.flush()
    # Finally print the result!
    test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
    print('Test error: %.1f%%' % test_error)
    if FLAGS.self_test:
      print('test_error', test_error)
      assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
          test_error,)

Example 100

Project: CuckooSploit
Source File: logo.py
View license
def logo():
    """Cuckoo asciiarts.
    @return: asciiarts array.
    """
    logos = []

    logos.append("""
                                 _|                            
     _|_|_|  _|    _|    _|_|_|  _|  _|      _|_|      _|_|    
   _|        _|    _|  _|        _|_|      _|    _|  _|    _|  
   _|        _|    _|  _|        _|  _|    _|    _|  _|    _|  
     _|_|_|    _|_|_|    _|_|_|  _|    _|    _|_|      _|_|""")

    logos.append("""
                      __                  
  .----..--.--..----.|  |--..-----..-----.
  |  __||  |  ||  __||    < |  _  ||  _  |
  |____||_____||____||__|__||_____||_____|""")

    logos.append("""
                          .:                 
                          ::                 
    .-.     ,  :   .-.    ;;.-.  .-.   .-.   
   ;       ;   ;  ;       ;; .' ;   ;';   ;' 
   `;;;;'.'`..:;._`;;;;'_.'`  `.`;;'  `;;'""")

    logos.append("""
  eeee e   e eeee e   e  eeeee eeeee 
  8  8 8   8 8  8 8   8  8  88 8  88 
  8e   8e  8 8e   8eee8e 8   8 8   8 
  88   88  8 88   88   8 8   8 8   8 
  88e8 88ee8 88e8 88   8 8eee8 8eee8""")

    logos.append("""
  _____________________________________/\/\_______________________________
  ___/\/\/\/\__/\/\__/\/\____/\/\/\/\__/\/\__/\/\____/\/\/\______/\/\/\___
  _/\/\________/\/\__/\/\__/\/\________/\/\/\/\____/\/\__/\/\__/\/\__/\/\_
  _/\/\________/\/\__/\/\__/\/\________/\/\/\/\____/\/\__/\/\__/\/\__/\/\_
  ___/\/\/\/\____/\/\/\/\____/\/\/\/\__/\/\__/\/\____/\/\/\______/\/\/\___
  ________________________________________________________________________""")

    logos.append("""
   _______ _     _ _______ _     _  _____   _____ 
   |       |     | |       |____/  |     | |     |
   |_____  |_____| |_____  |    \\_ |_____| |_____|""")

    logos.append("""
                     _ 
    ____ _   _  ____| |  _ ___   ___
   / ___) | | |/ ___) |_/ ) _ \ / _ \\
  ( (___| |_| ( (___|  _ ( |_| | |_| |
   \\____)____/ \\____)_| \\_)___/ \\___/""")

    logos.append("""
   ______   __  __   ______   ___   ___   ______   ______      
  /_____/\\ /_/\\/_/\\ /_____/\\ /___/\\/__/\\ /_____/\\ /_____/\\     
  \\:::__\\/ \\:\\ \\:\\ \\\\:::__\\/ \\::.\\ \\\\ \\ \\\\:::_ \\ \\\\:::_ \\ \\    
   \\:\\ \\  __\\:\\ \\:\\ \\\\:\\ \\  __\\:: \\/_) \\ \\\\:\\ \\ \\ \\\\:\\ \\ \\ \\   
    \\:\\ \\/_/\\\\:\\ \\:\\ \\\\:\\ \\/_/\\\\:. __  ( ( \\:\\ \\ \\ \\\\:\\ \\ \\ \\  
     \\:\\_\\ \\ \\\\:\\_\\:\\ \\\\:\\_\\ \\ \\\\: \\ )  \\ \\ \\:\\_\\ \\ \\\\:\\_\\ \\ \\ 
      \\_____\\/ \\_____\\/ \\_____\\/ \\__\\/\\__\\/  \\_____\\/ \\_____\\/""")

    logos.append("""
    sSSs   .S       S.     sSSs   .S    S.     sSSs_sSSs      sSSs_sSSs    
   d%%SP  .SS       SS.   d%%SP  .SS    SS.   d%%SP~YS%%b    d%%SP~YS%%b   
  d%S'    S%S       S%S  d%S'    S%S    S&S  d%S'     `S%b  d%S'     `S%b  
  S%S     S%S       S%S  S%S     S%S    d*S  S%S       S%S  S%S       S%S  
  S&S     S&S       S&S  S&S     S&S   .S*S  S&S       S&S  S&S       S&S  
  S&S     S&S       S&S  S&S     S&S_sdSSS   S&S       S&S  S&S       S&S  
  S&S     S&S       S&S  S&S     S&S~YSSY%b  S&S       S&S  S&S       S&S  
  S&S     S&S       S&S  S&S     S&S    `S%  S&S       S&S  S&S       S&S  
  S*b     S*b       d*S  S*b     S*S     S%  S*b       d*S  S*b       d*S  
  S*S.    S*S.     .S*S  S*S.    S*S     S&  S*S.     .S*S  S*S.     .S*S  
   SSSbs   SSSbs_sdSSS    SSSbs  S*S     S&   SSSbs_sdSSS    SSSbs_sdSSS   
    YSSP    YSSP~YSSY      YSSP  S*S     SS    YSSP~YSSY      YSSP~YSSY    
                                 SP                                        
                                 Y""")

    logos.append("""
           _______                   _____                    _____          
          /::\\    \\                 /\\    \\                  /\\    \\         
         /::::\\    \\               /::\\____\\                /::\\    \\        
        /::::::\\    \\             /::::|   |               /::::\\    \\       
       /::::::::\\    \\           /:::::|   |              /::::::\\    \\      
      /:::/~~\\:::\\    \\         /::::::|   |             /:::/\\:::\\    \\     
     /:::/    \\:::\\    \\       /:::/|::|   |            /:::/  \\:::\\    \\    
    /:::/    / \\:::\\    \\     /:::/ |::|   |           /:::/    \\:::\\    \\   
   /:::/____/   \\:::\\____\\   /:::/  |::|___|______    /:::/    / \\:::\\    \\  
  |:::|    |     |:::|    | /:::/   |::::::::\\    \\  /:::/    /   \\:::\\ ___\\ 
  |:::|____|     |:::|    |/:::/    |:::::::::\\____\\/:::/____/  ___\\:::|    |
   \\:::\\    \\   /:::/    / \\::/    / ~~~~~/:::/    /\\:::\\    \\ /\\  /:::|____|
    \\:::\\    \\ /:::/    /   \\/____/      /:::/    /  \\:::\\    /::\\ \\::/    / 
     \\:::\\    /:::/    /                /:::/    /    \\:::\\   \\:::\\ \\/____/  
      \\:::\\__/:::/    /                /:::/    /      \\:::\\   \\:::\\____\\    
       \\::::::::/    /                /:::/    /        \\:::\\  /:::/    /    
        \\::::::/    /                /:::/    /          \\:::\\/:::/    /     
         \\::::/    /                /:::/    /            \\::::::/    /      
          \\::/____/                /:::/    /              \\::::/    /       
           ~~                      \\::/    /                \\::/____/        
                                    \\/____/                                  
                                                       it's Cuckoo!""")

    logos.append("""
            _       _                   _             _              _            _       
          /\\ \\     /\\_\\               /\\ \\           /\\_\\           /\\ \\         /\\ \\     
         /  \\ \\   / / /         _    /  \\ \\         / / /  _       /  \\ \\       /  \\ \\    
        / /\\ \\ \\  \\ \\ \\__      /\\_\\ / /\\ \\ \\       / / /  /\\_\\    / /\\ \\ \\     / /\\ \\ \\   
       / / /\\ \\ \\  \\ \\___\\    / / // / /\\ \\ \\     / / /__/ / /   / / /\\ \\ \\   / / /\\ \\ \\  
      / / /  \\ \\_\\  \\__  /   / / // / /  \\ \\_\\   / /\\_____/ /   / / /  \\ \\_\\ / / /  \\ \\_\\ 
     / / /    \\/_/  / / /   / / // / /    \\/_/  / /\\_______/   / / /   / / // / /   / / / 
    / / /          / / /   / / // / /          / / /\\ \\ \\     / / /   / / // / /   / / /  
   / / /________  / / /___/ / // / /________  / / /  \\ \\ \\   / / /___/ / // / /___/ / /   
  / / /_________\\/ / /____\\/ // / /_________\\/ / /    \\ \\ \\ / / /____\\/ // / /____\\/ /    
  \\/____________/\\/_________/ \\/____________/\\/_/      \\_\\_\\\\/_________/ \\/_________/""")

    logos.append("""
                               ),-.     /
  Cuckoo Sandbox              <(a  `---',' 
     no chance for malwares!  ( `-, ._> )
                               ) _>.___/
                                   _/""")

    logos.append("""
  .-----------------.
  | Cuckoo Sandbox? |
  |     OH NOES!    |\\  '-.__.-'   
  '-----------------' \\  /oo |--.--,--,--.
                         \\_.-'._i__i__i_.'
                               \"\"\"\"\"\"\"\"\"""")

    print(color(random.choice(logos), random.randrange(31, 37)))
    print
    print(" Cuckoo Sandbox %s" % yellow(CUCKOO_VERSION))
    print(" www.cuckoosandbox.org")
    print(" Copyright (c) 2010-2015")
    print
    sys.stdout.flush()