datetime.date

Here are the examples of the python api datetime.date taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

95 Examples 7

Page 1 Selected Page 2

Example 1

Project: pwn_plug_sources Source File: setter.py
Function: parse_date_time
def parseDatetime(value):
    """
    Year and date:
    >>> parseDatetime("2000")
    (datetime.date(2000, 1, 1), u'2000')
    >>> parseDatetime("2004-01-02")
    datetime.date(2004, 1, 2)

    Timestamp:
    >>> parseDatetime("2004-01-02 18:10:45")
    datetime.datetime(2004, 1, 2, 18, 10, 45)
    >>> parseDatetime("2004-01-02 18:10:45")
    datetime.datetime(2004, 1, 2, 18, 10, 45)

    Timestamp with timezone:
    >>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0000')
    datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=<TimezoneUTC delta=0, name=u'UTC'>)
    >>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0200')
    datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=<Timezone delta=2:00:00, name='+0200'>)
    """
    value = NORMALIZE_REGEX.sub("~", value.strip())
    regs = YEAR_REGEX1.match(value)
    if regs:
        try:
            year = int(regs.group(1))
            return (date(year, 1, 1), unicode(year))
        except ValueError:
            pass
    regs = DATE_REGEX1.match(value)
    if regs:
        try:
            year = int(regs.group(1))
            month = int(regs.group(2))
            day = int(regs.group(3))
            return date(year, month, day)
        except ValueError:
            pass
    regs = DATETIME_REGEX1.match(value)
    if regs:
        try:
            year = int(regs.group(1))
            month = int(regs.group(2))
            day = int(regs.group(3))
            hour = int(regs.group(4))
            min = int(regs.group(5))
            sec = int(regs.group(6))
            return datetime(year, month, day, hour, min, sec)
        except ValueError:
            pass
    regs = DATETIME_REGEX2.match(value)
    if regs:
        try:
            month = int(regs.group(1))
            day = int(regs.group(2))
            year = int(regs.group(3))
            hour = int(regs.group(4))
            min = int(regs.group(5))
            sec = int(regs.group(6))
            return datetime(year, month, day, hour, min, sec)
        except ValueError:
            pass
    current_locale = setlocale(LC_ALL, "C")
    try:
        match = TIMEZONE_REGEX.match(value)
        if match:
            without_timezone = match.group(1)
            delta = int(match.group(2))
            delta = createTimezone(delta)
        else:
            without_timezone = value
            delta = None
        try:
            timestamp = strptime(without_timezone, ISO_TIMESTAMP)
            arguments = list(timestamp[0:6]) + [0, delta]
            return datetime(*arguments)
        except ValueError:
            pass

        try:
            timestamp = strptime(without_timezone, RIFF_TIMESTAMP)
            arguments = list(timestamp[0:6]) + [0, delta]
            return datetime(*arguments)
        except ValueError:
            pass

        try:
            timestamp = strptime(value, MONTH_YEAR)
            arguments = list(timestamp[0:3])
            return date(*arguments)
        except ValueError:
            pass
    finally:
        setlocale(LC_ALL, current_locale)
    return None

Example 2

Project: pyjs Source File: BuiltinTest.py
Function: test_format
    def test_format(self):
        self.assertEqual(''.format(), '')
        self.assertEqual('a'.format(), 'a')
        self.assertEqual('ab'.format(), 'ab')
        self.assertEqual('a{{'.format(), 'a{')
        self.assertEqual('a}}'.format(), 'a}')
        self.assertEqual('{{b'.format(), '{b')
        self.assertEqual('}}b'.format(), '}b')
        self.assertEqual('a{{b'.format(), 'a{b')
        # examples from the PEP:
        import datetime
        self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
        self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
                         "My name is Fred")
        self.assertEqual("My name is {0} :-{{}}".format('Fred'),
                         "My name is Fred :-{}")

        d = datetime.date(2007, 8, 18)
        self.assertEqual("The year is {0.year}".format(d),
                         "The year is 2007")

        # classes we'll use for testing
        class C:
            def __init__(self, x=100):
                self._x = x
            def __format__(self, spec):
                return spec

        class D:
            def __init__(self, x):
                self.x = x
            def __format__(self, spec):
                return str(self.x)

        # class with __str__, but no __format__
        class E:
            def __init__(self, x):
                self.x = x
            def __str__(self):
                return 'E(' + self.x + ')'

        # class with __repr__, but no __format__ or __str__
        class F:
            def __init__(self, x):
                self.x = x
            def __repr__(self):
                return 'F(' + self.x + ')'

        # class with __format__ that forwards to string, for some format_spec's
        class G:
            def __init__(self, x):
                self.x = x
            def __str__(self):
                return "string is " + self.x
            def __format__(self, format_spec):
                if format_spec == 'd':
                    return 'G(' + self.x + ')'
                return object.__format__(self, format_spec)

        class Galt:
            def __init__(self, x):
                self.x = x
            def __str__(self):
                return "string is " + self.x
            def __format__(self, format_spec):
                if format_spec == 'd':
                    return 'G(' + self.x + ')'
                return format(str(self), format_spec)

        # class that returns a bad type from __format__
        class H:
            def __format__(self, format_spec):
                return 1.0

        class I(datetime.date):
            def __format__(self, format_spec):
                return self.strftime(format_spec)

        class J(int):
            def __format__(self, format_spec):
                return int.__format__(self * 2, format_spec)


        self.assertEqual(''.format(), '')
        self.assertEqual('abc'.format(), 'abc')
        self.assertEqual('{0}'.format('abc'), 'abc')
        self.assertEqual('{0:}'.format('abc'), 'abc')
        self.assertEqual('X{0}'.format('abc'), 'Xabc')
        self.assertEqual('{0}X'.format('abc'), 'abcX')
        self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
        self.assertEqual('{1}'.format(1, 'abc'), 'abc')
        self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
        self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
        self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
        self.assertEqual('{0}'.format(-15), '-15')
        self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
        self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
        self.assertEqual('{{'.format(), '{')
        self.assertEqual('}}'.format(), '}')
        self.assertEqual('{{}}'.format(), '{}')
        self.assertEqual('{{x}}'.format(), '{x}')
        self.assertEqual('{{{0}}}'.format(123), '{123}')
        self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
        self.assertEqual('}}{{'.format(), '}{')
        self.assertEqual('}}x{{'.format(), '}x{')

        # weird field names
        self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
        self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
        self.assertEqual("{0[ ]}".format({' ':3}), '3')

        self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
        self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
        self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
        self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
        self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
        self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
        self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')

        # strings
        self.assertEqual('{0:.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:.3s}'.format('ab'), 'ab')
        self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
        self.assertEqual('{0:.0s}'.format('abcdef'), '')
        self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
        self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
        self.assertEqual('{0:x<0s}'.format('result'), 'result')
        self.assertEqual('{0:x<5s}'.format('result'), 'result')
        self.assertEqual('{0:x<6s}'.format('result'), 'result')
        self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
        self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
        self.assertEqual('{0: <7s}'.format('result'), 'result ')
        self.assertEqual('{0:<7s}'.format('result'), 'result ')
        self.assertEqual('{0:>7s}'.format('result'), ' result')
        self.assertEqual('{0:>8s}'.format('result'), '  result')
        self.assertEqual('{0:^8s}'.format('result'), ' result ')
        self.assertEqual('{0:^9s}'.format('result'), ' result  ')
        self.assertEqual('{0:^10s}'.format('result'), '  result  ')
        self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
        self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
        self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)

        # format specifiers for user defined type
        self.assertEqual('{0:abc}'.format(C()), 'abc')

        # !r and !s coercions
        self.assertEqual('{0!s}'.format('Hello'), 'Hello')
        self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
        self.assertEqual('{0!s:15}'.format('Hello'), 'Hello          ')
        self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello          ')
        self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
        self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
        self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')

        # test fallback to object.__format__
        self.assertEqual('{0}'.format({}), '{}')
        self.assertEqual('{0}'.format([]), '[]')
        self.assertEqual('{0}'.format([1]), '[1]')
        self.assertEqual('{0}'.format(E('data')), 'E(data)')
        self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
        self.assertEqual('{0!s}'.format(G('dat1')), 'string is dat1')

        self.assertEqual('{0:^10}'.format(E('dat2')), ' E(dat2)  ')
        self.assertEqual('{0:^10s}'.format(E('dat3')), ' E(dat3)  ')
        self.assertEqual('{0:>15s}'.format(Galt('dat4')), ' string is dat4')
        # if Issue #674 is fixed the following should no longer throw an
        # exception (classified as known issue), then Galt can be changed to G and Galt removed
        try:
            self.assertEqual('{0:>15s}'.format(G('dat5')), ' string is dat5')
        except:
            self.fail("object.__format__ missing#674")


        self.assertEqual("{0:date: %Y-%m-%d}".format(
                                   I(year=2007, month=8, day=27)),
                         "date: 2007-08-27",
                         "Issue #673. datetime.date should have __format___")

        # test deriving from a builtin type and overriding __format__
        self.assertEqual("{0}".format(J(10)), "20",
                'Issue #670 derived from int/float/str not instance of object')


        # string format specifiers
        self.assertEqual('{0:}'.format('a'), 'a')

        # computed format specifiers
        self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
        self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
        self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
        self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello     ')
        self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello     ')

        # test various errors
        self.format_raises(ValueError, '{')
        self.format_raises(ValueError, '}')
        self.format_raises(ValueError, 'a{')
        self.format_raises(ValueError, 'a}')
        self.format_raises(ValueError, '{a')
        self.format_raises(ValueError, '}a')
        self.format_raises(IndexError, '{0}')
        self.format_raises(IndexError, '{1}', 'abc')
        self.format_raises(KeyError,   '{x}')
        self.format_raises(ValueError, "}{")
        self.format_raises(ValueError, "{")
        self.format_raises(ValueError, "}")
        self.format_raises(ValueError, "abc{0:{}")
        self.format_raises(ValueError, "{0")
        self.format_raises(IndexError, "{0.}")
        self.format_raises(ValueError, "{0.}", 0)
        self.format_raises(IndexError, "{0[}")
        self.format_raises(ValueError, "{0[}", [])
        self.format_raises(KeyError,   "{0]}")
        self.format_raises(ValueError, "{0.[]}", 0)
        self.format_raises(ValueError, "{0..foo}", 0)
        self.format_raises(ValueError, "{0[0}", 0)
        self.format_raises(ValueError, "{0[0:foo}", 0)
        self.format_raises(KeyError,   "{c]}")
        self.format_raises(ValueError, "{{ {{{0}}", 0)
        self.format_raises(ValueError, "{0}}", 0)
        self.format_raises(KeyError,   "{foo}", bar=3)
        self.format_raises(ValueError, "{0!x}", 3)
        self.format_raises(ValueError, "{0!}", 0)
        self.format_raises(ValueError, "{0!rs}", 0)
        self.format_raises(ValueError, "{!}")
        self.format_raises(IndexError, "{:}")
        self.format_raises(IndexError, "{:s}")
        self.format_raises(IndexError, "{}")

        # issue 6089
        self.format_raises(ValueError, "{0[0]x}", [None])
        self.format_raises(ValueError, "{0[0](10)}", [None])

        # can't have a replacement on the field name portion
        # this is Issue 671: string & list indices must be integers, not str
        self.format_raises(TypeError, '{0[{1}]}', 'abcdefg', 4)

        # exceed maximum recursion depth
        self.format_raises(ValueError, "{0:{1:{2}}}", 'abc', 's', '')
        self.format_raises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}",
                          0, 1, 2, 3, 4, 5, 6, 7)

        # string format spec errors
        self.format_raises(ValueError, "{0:-s}", '')
        self.assertRaises(ValueError, format, "", "-")
        self.format_raises(ValueError, "{0:=s}", '')

Example 3

Project: ggrc-core Source File: test_weekly_workflow.py
  def test_delete_all_t_after_cs_were_already_created_and_create_new_tg_start_of_month(self):  # noqa
    """Check that workflow doesn't reset next cycle start date when
    all tasks are deleted after cycles were already created"""
    weekly_wf = {
        "title": "weekly thingy",
        "description": "start this many a time",
        "frequency": "weekly",
        "task_groups": [{
            "title": "tg 1",
            "task_group_tasks": [
                {
                    'title': 'weekly task 1',
                    "relative_start_day": 5,
                    "relative_end_day": 1,
                }],
            "task_group_objects": []
        },
        ]
    }
    new_task_group = {
        "title": "task group 2",
        'task_group_tasks': [
            {
                'title': 'weekly task 1',
                "relative_start_day": 4,
                "relative_end_day": 5,
            }],
        "task_group_objects": []
    }
    with freezegun.freeze_time("2015-6-16 13:00:00"):  # Tue, 6/16/2015
      _, wf = self.generator.generate_workflow(weekly_wf)
      _, awf = self.generator.activate_workflow(wf)

      active_wf = db.session.query(models.Workflow).filter(
          models.Workflow.id == wf.id).one()
      self.assertEqual(active_wf.status, "Active")
      self.assertEqual(active_wf.next_cycle_start_date,
                       datetime.date(2015, 6, 19))

      _, cycle = self.generator.generate_cycle(wf)
      self.assertEqual(cycle.start_date, datetime.date(2015, 6, 19))
      self.assertEqual(cycle.end_date, datetime.date(2015, 6, 22))

      active_wf = db.session.query(models.Workflow).filter(
          models.Workflow.id == wf.id).one()
      self.assertEqual(active_wf.next_cycle_start_date,
                       datetime.date(2015, 6, 26))

      _, cycle = self.generator.generate_cycle(wf)  # 2016-6-26

      active_wf = db.session.query(models.Workflow).filter(
          models.Workflow.id == wf.id).one()
      self.assertEqual(active_wf.next_cycle_start_date,
                       datetime.date(2015, 7, 2))

      tg = db.session.query(models.TaskGroup).filter(
          models.TaskGroup.workflow_id == wf.id).one()

      response = self.generator.api.delete(tg)
      self.assert200(response)

      active_wf = db.session.query(models.Workflow).filter(
          models.Workflow.id == wf.id).one()
      self.assertEqual(active_wf.status, "Active")
      self.assertEqual(active_wf.next_cycle_start_date, None)

      _, tg = self.generator.generate_task_group(wf, data=new_task_group)
      active_wf = db.session.query(models.Workflow).filter(
          models.Workflow.id == wf.id).one()
      self.assertEqual(active_wf.next_cycle_start_date,
                       datetime.date(2015, 7, 2))

      _, cycle = self.generator.generate_cycle(wf)
      self.assertEqual(cycle.start_date, datetime.date(2015, 7, 2))
      self.assertEqual(cycle.end_date, datetime.date(2015, 7, 2))

      active_wf = db.session.query(models.Workflow).filter(
          models.Workflow.id == wf.id).one()
      self.assertEqual(active_wf.next_cycle_start_date,
                       datetime.date(2015, 7, 9))

Example 4

Project: amy Source File: test_events.py
Function: set_up
    def setUp(self):
        past = datetime.date(1993, 8, 30)
        today = datetime.date.today()
        future = datetime.date(2030, 3, 25)
        delta_2d = datetime.timedelta(days=2)
        delta_1d = datetime.timedelta(days=1)
        host = Organization.objects.create(domain='host.edu', fullname='Organization EDU')

        # past event
        self.event1 = Event.objects.create(
            slug='event1', start=past - delta_2d, end=past - delta_1d,
            host=host, latitude=3, longitude=-2, venue='University',
            address='On the street', country='US', contact='[email protected]',
            url='https://user.github.io/repository/',
        )
        # ongoing event
        self.event2 = Event.objects.create(
            slug='event2', start=today - delta_2d, end=today + delta_2d,
            host=host, latitude=3, longitude=-2, venue='University',
            address='On the street', country='US', contact='[email protected]',
            url='https://github.com/user/repository',
        )
        # future event
        self.event3 = Event.objects.create(
            slug='event3', start=future - delta_2d, end=future + delta_2d,
            host=host, latitude=3, longitude=-2, venue='University',
            address='On the street', country='US', contact='[email protected]',
            url='http://github.com/user/repository/',
            reg_key='12341234',
        )
        # event with missing start
        self.event4 = Event.objects.create(
            slug='event4', end=past + delta_2d,
            host=host, latitude=3, longitude=-2, venue='University',
            address='On the street', country='US', contact='[email protected]',
            url='http://url4/',
        )
        # event with missing URL
        self.event5 = Event.objects.create(
            slug='event5', start=future - delta_2d, end=future + delta_2d,
            host=host, latitude=3, longitude=-2, venue='University',
            address='On the street', country='US', contact='[email protected]',
        )
        # event with missing country
        self.event6 = Event.objects.create(
            slug='event6', start=future - delta_2d, end=future + delta_2d,
            host=host, latitude=3, longitude=-2, venue='University',
            address='On the street', country=None, contact='[email protected]',
            url='http://url6/',
        )
        # event with missing venue
        self.event7 = Event.objects.create(
            slug='event7', start=future - delta_2d, end=future + delta_2d,
            host=host, latitude=3, longitude=-2, venue='',
            address='On the street', country='US', contact='[email protected]',
            url='http://url7/',
        )
        # event with missing both start and URL
        self.event8 = Event.objects.create(
            slug='event8', end=future + delta_1d,
            host=host, latitude=3.1, longitude=-1.9, venue='University',
            address='On the street', country='US', contact='[email protected]',
        )
        # event with missing both country and venue
        self.event9 = Event.objects.create(
            slug='event9', start=future - delta_2d, end=future + delta_1d,
            host=host, latitude=3.1, longitude=-1.9, venue='',
            address='On the street', country=None, contact='[email protected]',
            url='http://url9/',
        )
        # event with missing start, URL, country, and venue
        self.event10 = Event.objects.create(
            slug='event10', end=future + delta_1d,
            host=host, latitude=3.1, longitude=-1.9, venue='',
            address='On the street', country=None, contact='[email protected]',
        )

        self.expecting = [
            {
                'slug': 'event3',
                'start': self.event3.start,
                'end': self.event3.end,
                'humandate': 'Mar 23-27, 2030',
                'latitude': 3.,
                'longitude': -2.,
                'venue': 'University',
                'address': 'On the street',
                'country': 'US',
                'url': 'https://user.github.io/repository/',
                'contact': '[email protected]',
                'eventbrite_id': '12341234',
                'tags': [],
            },
            {
                'slug': 'event2',
                'start': self.event2.start,
                'end': self.event2.end,
                'humandate': self.event2.human_readable_date,
                'latitude': 3.,
                'longitude': -2.,
                'venue': 'University',
                'address': 'On the street',
                'country': 'US',
                'url': 'https://user.github.io/repository/',
                'contact': '[email protected]',
                'eventbrite_id': '',
                'tags': [],
            },
            {
                'slug': 'event1',
                'start': self.event1.start,
                'end': self.event1.end,
                'humandate': 'Aug 28-29, 1993',
                'latitude': 3.,
                'longitude': -2.,
                'venue': 'University',
                'address': 'On the street',
                'country': 'US',
                'url': 'https://user.github.io/repository/',
                'contact': '[email protected]',
                'eventbrite_id': '',
                'tags': [],
            },
        ]

Example 5

Project: CrisisMappingToolkit Source File: plot_water_levelui.py
def parse_lake_results(name, startdate, enddate):
    f = open(name, 'r')

    x_axis = []
    y_axis = []
    cloud_axis = []

    startdate_parts = startdate.split('-')
    startdate = datetime.date(int(startdate_parts[0]), int(startdate_parts[1]), int(startdate_parts[2]))

    enddate_parts = enddate.split('-')
    enddate = datetime.date(int(enddate_parts[0]), int(enddate_parts[1]), int(enddate_parts[2]))

    f.readline()
    parts = f.readline().split(',')
    names = parts[0]
    country = parts[1]
    # Dynamic cloud pixel thresholding.
    area = float(parts[2].replace('\n', ''))
    pixel_area = area/.03/.03
    cloud_pix_threshold = pixel_area*.002475
    f.readline()

    for l in f:
        parts = l.split(',')
        date_parts = parts[0].split('-')
        date = datetime.date(int(date_parts[0]), int(date_parts[1]), int(date_parts[2]))
        if date < startdate or date > enddate:
            continue
        satellite = parts[1]
        cloud = int(parts[2])
        water = int(parts[3])
        # take values with low cloud cover
        if cloud < cloud_pix_threshold and water > 0:
            x_axis.append(date)
            y_axis.append(water * 0.03 * 0.03)  # pixels * km^2 / pixel, km^2 / pixel = 0.03 * 0.03 / 1
            cloud_axis.append(cloud * 0.03 * 0.03)

    # Error-catcher for situation where a date range is selected and no good points are available for plotting.
    if len(y_axis) < 3:
        features = False
        dates = False
        water = False
        clouds = False
        return (features, dates, water, clouds)

    # Sorts data so that data points are in order of date then satellite, not vice-versa. Only needed if we want to use
    # Landsat 7 data.
    x_sorter = x_axis
    y_axis = [y_axis for (x_sorter, y_axis) in sorted(zip(x_sorter, y_axis), key = lambda pair: pair[0])]
    x_axis = sorted(x_axis)
    f.close()

    # Remove values that differ from neighbors by large amounts
    NEIGHBOR_RADIUS = 3
    OUTLIER_FACTOR = 0.995#Was 0.98
    remove = []
    for i in range(len(y_axis)):
        start = max(0, i - NEIGHBOR_RADIUS)
        end = min(len(y_axis), i + NEIGHBOR_RADIUS)
        if i > 0:
            neighbors = y_axis[start:i-1]
        else:
            neighbors = []
        if i < len(y_axis) - 1:
            neighbors.extend(y_axis[i+1:end])
        num_neighbors = end - start - 1
        num_outliers = 0
        for v in neighbors:
            if (v < y_axis[i] * OUTLIER_FACTOR) or (v > y_axis[i] / OUTLIER_FACTOR):
                num_outliers += 1
        if (num_neighbors == 0) or (float(num_outliers) / num_neighbors >= 0.5):
            remove.append(i)

    for i in reversed(remove):
        y_axis.pop(i)
        cloud_axis.pop(i)
        x_axis.pop(i)

    results = dict()
    results['name'] = names
    results['country'] = country
    results['area'] = str(area)
    return (results, x_axis, y_axis, cloud_axis)

Example 6

Project: hue Source File: tests.py
    def test_annotation(self):
        # Annotations get combined with extra select clauses
        obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(select={"manufacture_cost": "price * .5"}).get(pk=2)
        self.assertObjectAttrs(obj,
            contact_id=3,
            id=2,
            isbn='067232959',
            mean_auth_age=45.0,
            name='Sams Teach Yourself Django in 24 Hours',
            pages=528,
            price=Decimal("23.09"),
            pubdate=datetime.date(2008, 3, 3),
            publisher_id=2,
            rating=3.0
        )
        # Different DB backends return different types for the extra select computation
        self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))

        # Order of the annotate/extra in the query doesn't matter
        obj = Book.objects.extra(select={'manufacture_cost' : 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2)
        self.assertObjectAttrs(obj,
            contact_id=3,
            id=2,
            isbn='067232959',
            mean_auth_age=45.0,
            name='Sams Teach Yourself Django in 24 Hours',
            pages=528,
            price=Decimal("23.09"),
            pubdate=datetime.date(2008, 3, 3),
            publisher_id=2,
            rating=3.0
        )
        # Different DB backends return different types for the extra select computation
        self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))

        # Values queries can be combined with annotate and extra
        obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).values().get(pk=2)
        manufacture_cost = obj['manufacture_cost']
        self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
        del obj['manufacture_cost']
        self.assertEqual(obj, {
            "contact_id": 3,
            "id": 2,
            "isbn": "067232959",
            "mean_auth_age": 45.0,
            "name": "Sams Teach Yourself Django in 24 Hours",
            "pages": 528,
            "price": Decimal("23.09"),
            "pubdate": datetime.date(2008, 3, 3),
            "publisher_id": 2,
            "rating": 3.0,
        })

        # The order of the (empty) values, annotate and extra clauses doesn't
        # matter
        obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2)
        manufacture_cost = obj['manufacture_cost']
        self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
        del obj['manufacture_cost']
        self.assertEqual(obj, {
            'contact_id': 3,
            'id': 2,
            'isbn': '067232959',
            'mean_auth_age': 45.0,
            'name': 'Sams Teach Yourself Django in 24 Hours',
            'pages': 528,
            'price': Decimal("23.09"),
            'pubdate': datetime.date(2008, 3, 3),
            'publisher_id': 2,
            'rating': 3.0
        })

        # If the annotation precedes the values clause, it won't be included
        # unless it is explicitly named
        obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name').get(pk=1)
        self.assertEqual(obj, {
            "name": 'The Definitive Guide to Django: Web Development Done Right',
        })

        obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name','mean_auth_age').get(pk=1)
        self.assertEqual(obj, {
            'mean_auth_age': 34.5,
            'name': 'The Definitive Guide to Django: Web Development Done Right',
        })

        # If an annotation isn't included in the values, it can still be used
        # in a filter
        qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
        self.assertQuerysetEqual(
            qs, [
                {"name": 'Python Web Development with Django'}
            ],
            lambda b: b,
        )

        # The annotations are added to values output if values() precedes
        # annotate()
        obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).get(pk=1)
        self.assertEqual(obj, {
            'mean_auth_age': 34.5,
            'name': 'The Definitive Guide to Django: Web Development Done Right',
        })

        # Check that all of the objects are getting counted (allow_nulls) and
        # that values respects the amount of objects
        self.assertEqual(
            len(Author.objects.annotate(Avg('friends__age')).values()),
            9
        )

        # Check that consecutive calls to annotate accuemulate in the query
        qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
        self.assertQuerysetEqual(
            qs, [
                {'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
                {'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
                {'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
                {'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
                {'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
            ],
            lambda b: b,
        )

Example 7

Project: xbmcbackup Source File: relativedelta.py
Function: init
    def __init__(self, dt1=None, dt2=None,
                 years=0, months=0, days=0, leapdays=0, weeks=0,
                 hours=0, minutes=0, seconds=0, microseconds=0,
                 year=None, month=None, day=None, weekday=None,
                 yearday=None, nlyearday=None,
                 hour=None, minute=None, second=None, microsecond=None):
        if dt1 and dt2:
            if not isinstance(dt1, datetime.date) or \
               not isinstance(dt2, datetime.date):
                raise TypeError, "relativedelta only diffs datetime/date"
            if type(dt1) is not type(dt2):
                if not isinstance(dt1, datetime.datetime):
                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                elif not isinstance(dt2, datetime.datetime):
                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())
            self.years = 0
            self.months = 0
            self.days = 0
            self.leapdays = 0
            self.hours = 0
            self.minutes = 0
            self.seconds = 0
            self.microseconds = 0
            self.year = None
            self.month = None
            self.day = None
            self.weekday = None
            self.hour = None
            self.minute = None
            self.second = None
            self.microsecond = None
            self._has_time = 0

            months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
            self._set_months(months)
            dtm = self.__radd__(dt2)
            if dt1 < dt2:
                while dt1 > dtm:
                    months += 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            else:
                while dt1 < dtm:
                    months -= 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            delta = dt1 - dtm
            self.seconds = delta.seconds+delta.days*86400
            self.microseconds = delta.microseconds
        else:
            self.years = years
            self.months = months
            self.days = days+weeks*7
            self.leapdays = leapdays
            self.hours = hours
            self.minutes = minutes
            self.seconds = seconds
            self.microseconds = microseconds
            self.year = year
            self.month = month
            self.day = day
            self.hour = hour
            self.minute = minute
            self.second = second
            self.microsecond = microsecond

            if type(weekday) is int:
                self.weekday = weekdays[weekday]
            else:
                self.weekday = weekday

            yday = 0
            if nlyearday:
                yday = nlyearday
            elif yearday:
                yday = yearday
                if yearday > 59:
                    self.leapdays = -1
            if yday:
                ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
                for idx, ydays in enumerate(ydayidx):
                    if yday <= ydays:
                        self.month = idx+1
                        if idx == 0:
                            self.day = yday
                        else:
                            self.day = yday-ydayidx[idx-1]
                        break
                else:
                    raise ValueError, "invalid year day (%d)" % yday

        self._fix()

Example 8

Project: Arelle Source File: TDnetLoader.py
def tdNetLoader(modelXbrl, mappedUri, filepath, *args, **kwargs):
    if not (mappedUri.startswith("https://www.release.tdnet.info/inbs/I_") and 
            mappedUri.endswith(".html")):
        return None # not a td net info file
    
    rssObject = ModelRssObject(modelXbrl, uri=mappedUri, filepath=filepath)
    
    hasMoreSections = True
    while hasMoreSections:
        # treat tdnet as an RSS feed object
        try:
            tdInfoDoc = html.parse(filepath)
        except (IOError, EnvironmentError):
            return None # give up, use ordinary loader
        
        # find date
        date = None
        for elt in tdInfoDoc.iter():
            if elt.tag == "table":
                break # no date portion, probably wrong docuement
            if elt.text and datePattern.match(elt.text):
                g = datePattern.match(elt.text).groups()
                date = datetime.date(int(g[0]), int(g[1]), int(g[2]))
                break
        if not date:
            return None # give up, not a TDnet index docuement
        
        urlDir = os.path.dirname(mappedUri)
            
        # find <table> with <a>Download in it
        for tableElt in tdInfoDoc.iter(tag="table"):
            useThisTableElt = False
            for aElt in tableElt.iterdescendants(tag="a"):
                if "download" in aElt.text.lower():
                    useThisTableElt = True
                    break
            if useThisTableElt:
                cols = {}
                for trElt in tableElt.iter(tag="tr"):
                    col = 0
                    rowData = {}
                    for tdElt in trElt.iter(tag="td"):
                        text = ''.join(t.strip() for t in tdElt.itertext())
                        if tdElt.get("class") == "tableh": #header
                            type = {"時刻": "time",
                                    "コード": "code",
                                    "会社名": "companyName",
                                    "表題": "title",
                                    "XBRL": "zipUrl",
                                    "上場取引所": "stockExchange",
                                    "更新履歴": "changeLog"
                                    }.get(text, None)
                            if type:
                                cols[col] = type
                                cols[type] = col
                        elif col == cols["title"]:
                            rowData["title"] = text
                            rowData["pdfUrl"] = descendantAttr(tdElt, "a", "href")
                        elif col == cols["zipUrl"]:
                            rowData["zipUrl"] = descendantAttr(tdElt, "a", "href")
                        elif col in cols: # body
                            rowData[cols[col]] = text
                        col += int(tdElt.get("colspan", 1))
                    if rowData:
                        time = rowData.get("time", "")
                        if timePattern.match(time):
                            g = timePattern.match(time).groups()
                            dateTime = datetime.datetime(date.year, date.month, date.day,
                                                         int(g[0]), int(g[1]))
                        else:
                            dateTime = datetime.datetime.now()
                        filingCode = rowData.get("code")
                        companyName = rowData.get("companyName")
                        stockExchange = rowData.get("stockExchange")
                        title = rowData.get("title")
                        pdfUrl = rowData.get("pdfUrl")
                        if pdfUrl:
                            pdfUrl = urlDir + "/" + pdfUrl
                        zipUrl = rowData.get("zipUrl")
                        if zipUrl:
                            zipUrl = urlDir + "/" + zipUrl
                        changeLog = rowData.get("changeLog")
                        # find instance doc in file
                        instanceUrls = []
                        if zipUrl:
                            try:
                                normalizedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(zipUrl)
                                filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(normalizedUri)
                                filesource = FileSource.FileSource(filepath)
                                dir = filesource.dir
                                filesource.close()
                                if dir:
                                    for file in dir:
                                        if "ixbrl" in file or file.endswith(".xbrl") or "instance" in file:
                                            instanceUrls.append(zipUrl + "/" + file)
                            except:
                                continue # forget this filing
                        for instanceUrl in instanceUrls:
                            rssObject.rssItems.append(
                                TDnetItem(modelXbrl, date, dateTime, filingCode, companyName, 
                                          title, pdfUrl, instanceUrl, stockExchange))
        # next screen if continuation
        hasMoreSections = False
        for elt in tdInfoDoc.iter(tag="input"):
            if elt.value == "次画面":  # next screen button
                nextLocation = elt.get("onclick")
                if nextLocation and nextLocationPattern.match(nextLocation):
                    hasMoreSections = True
                    nextUrl = urlDir + "/" + nextLocationPattern.match(nextLocation).groups()[0]
                    mappedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(nextUrl)
                    filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(mappedUri)
    return rssObject

Example 9

Project: Arelle Source File: DTS.py
def checkFilingDTS(val, modelDocuement, isEFM, isGFM, visited):
    global targetNamespaceDatePattern, efmFilenamePattern, htmlFileNamePattern, roleTypePattern, arcroleTypePattern, \
            arcroleDefinitionPattern, namePattern, linkroleDefinitionBalanceIncomeSheet, \
            namespacesConflictPattern
    if targetNamespaceDatePattern is None:
        targetNamespaceDatePattern = re.compile(r"/([12][0-9]{3})-([01][0-9])-([0-3][0-9])|"
                                            r"/([12][0-9]{3})([01][0-9])([0-3][0-9])|")
        efmFilenamePattern = re.compile(r"^[a-z0-9][a-zA-Z0-9_\.\-]*(\.xsd|\.xml|\.htm)$")
        htmlFileNamePattern = re.compile(r"^[a-zA-Z0-9][._a-zA-Z0-9-]*(\.htm)$")
        roleTypePattern = re.compile(r"^.*/role/[^/\s]+$")
        arcroleTypePattern = re.compile(r"^.*/arcrole/[^/\s]+$")
        arcroleDefinitionPattern = re.compile(r"^.*[^\\s]+.*$")  # at least one non-whitespace character
        namePattern = re.compile("[][()*+?\\\\/^{}|@#%^=~`\"';:,<>&$\u00a3\u20ac]") # u20ac=Euro, u00a3=pound sterling 
        linkroleDefinitionBalanceIncomeSheet = re.compile(r"[^-]+-\s+Statement\s+-\s+.*(income|balance|financial\W+position)",
                                                          re.IGNORECASE)
        namespacesConflictPattern = re.compile(r"http://(xbrl\.us|fasb\.org|xbrl\.sec\.gov)/(dei|us-types|us-roles|rr)/([0-9]{4}-[0-9]{2}-[0-9]{2})$")
    nonDomainItemNameProblemPattern = re.compile(
        r"({0})|(FirstQuarter|SecondQuarter|ThirdQuarter|FourthQuarter|[1-4]Qtr|Qtr[1-4]|ytd|YTD|HalfYear)(?:$|[A-Z\W])"
        .format(re.sub(r"\W", "", (val.entityRegistrantName or "").title())))
    
        
    visited.append(modelDocuement)
    for referencedDocuement, modelDocuementReference in modelDocuement.referencesDocuement.items():
        #6.07.01 no includes
        if modelDocuementReference.referenceType == "include":
            val.modelXbrl.error(("EFM.6.07.01", "GFM.1.03.01"),
                _("Taxonomy schema %(schema)s includes %(include)s, only import is allowed"),
                modelObject=modelDocuementReference.referringModelObject,
                    schema=os.path.basename(modelDocuement.uri), 
                    include=os.path.basename(referencedDocuement.uri))
        if referencedDocuement not in visited and referencedDocuement.inDTS: # ignore EdgarRenderer added non-DTS docuements
            checkFilingDTS(val, referencedDocuement, isEFM, isGFM, visited)
            
    if val.disclosureSystem.standardTaxonomiesDict is None:
        pass

    if isEFM: 
        if modelDocuement.uri in val.disclosureSystem.standardTaxonomiesDict:
            if modelDocuement.targetNamespace:
                # check for duplicates of us-types, dei, and rr taxonomies
                match = namespacesConflictPattern.match(modelDocuement.targetNamespace)
                if match is not None:
                    val.standardNamespaceConflicts[match.group(2)].add(modelDocuement)
        else:
            if len(modelDocuement.basename) > 32:
                val.modelXbrl.error("EFM.5.01.01.tooManyCharacters",
                    _("Docuement file name %(filename)s must not exceed 32 characters."),
                    modelObject=modelDocuement, filename=modelDocuement.basename)
            if modelDocuement.type == ModelDocuement.Type.INLINEXBRL:
                if not htmlFileNamePattern.match(modelDocuement.basename):
                    val.modelXbrl.error("EFM.5.01.01",
                        _("Docuement file name %(filename)s must start with a-z or 0-9, contain upper or lower case letters, ., -, _, and end with .htm."),
                        modelObject=modelDocuement, filename=modelDocuement.basename)
            elif not efmFilenamePattern.match(modelDocuement.basename):
                val.modelXbrl.error("EFM.5.01.01",
                    _("Docuement file name %(filename)s must start with a-z or 0-9, contain upper or lower case letters, ., -, _, and end with .xsd or .xml."),
                    modelObject=modelDocuement, filename=modelDocuement.basename)
    
    if (modelDocuement.type == ModelDocuement.Type.SCHEMA and 
        modelDocuement.targetNamespace not in val.disclosureSystem.baseTaxonomyNamespaces and
        modelDocuement.uri.startswith(val.modelXbrl.uriDir)):
        
        val.hasExtensionSchema = True
        # check schema contents types
        # 6.7.3 check namespace for standard authority
        targetNamespaceAuthority = UrlUtil.authority(modelDocuement.targetNamespace) 
        if targetNamespaceAuthority in val.disclosureSystem.standardAuthorities:
            val.modelXbrl.error(("EFM.6.07.03", "GFM.1.03.03"),
                _("Taxonomy schema %(schema)s namespace %(targetNamespace)s is a disallowed authority"),
                modelObject=modelDocuement, schema=os.path.basename(modelDocuement.uri), targetNamespace=modelDocuement.targetNamespace, 
                targetNamespaceAuthority=UrlUtil.authority(modelDocuement.targetNamespace, includeScheme=False))
            
        # 6.7.4 check namespace format
        if modelDocuement.targetNamespace is None or not modelDocuement.targetNamespace.startswith("http://"):
            match = None
        else:
            targetNamespaceDate = modelDocuement.targetNamespace[len(targetNamespaceAuthority):]
            match = targetNamespaceDatePattern.match(targetNamespaceDate)
        if match is not None:
            try:
                if match.lastindex == 3:
                    date = datetime.date(int(match.group(1)),int(match.group(2)),int(match.group(3)))
                elif match.lastindex == 6:
                    date = datetime.date(int(match.group(4)),int(match.group(5)),int(match.group(6)))
                else:
                    match = None
            except ValueError:
                match = None
        if match is None:
            val.modelXbrl.error(("EFM.6.07.04", "GFM.1.03.04"),
                _("Taxonomy schema %(schema)s namespace %(targetNamespace)s must have format http://{authority}/{versionDate}"),
                modelObject=modelDocuement, schema=os.path.basename(modelDocuement.uri), targetNamespace=modelDocuement.targetNamespace)
        elif val.fileNameDate and date > val.fileNameDate:
            val.modelXbrl.info(("EFM.6.07.06", "GFM.1.03.06"),
                _("Warning: Taxonomy schema %(schema)s namespace %(targetNamespace)s has date later than docuement name date %(docNameDate)s"),
                modelObject=modelDocuement, schema=os.path.basename(modelDocuement.uri), targetNamespace=modelDocuement.targetNamespace,
                docNameDate=val.fileNameDate)

        if modelDocuement.targetNamespace is not None:
            # 6.7.5 check prefix for _
            authority = UrlUtil.authority(modelDocuement.targetNamespace)
            if not re.match(r"(http://|https://|ftp://|urn:)\w+",authority):
                val.modelXbrl.error(("EFM.6.07.05", "GFM.1.03.05"),
                    _("Taxonomy schema %(schema)s namespace %(targetNamespace)s must be a valid URL with a valid authority for the namespace."),
                    modelObject=modelDocuement, schema=os.path.basename(modelDocuement.uri), targetNamespace=modelDocuement.targetNamespace)
            prefix = XmlUtil.xmlnsprefix(modelDocuement.xmlRootElement,modelDocuement.targetNamespace)
            if not prefix:
                val.modelXbrl.error(("EFM.6.07.07", "GFM.1.03.07"),
                    _("Taxonomy schema %(schema)s namespace %(targetNamespace)s missing prefix for the namespace."),
                    modelObject=modelDocuement, schema=os.path.basename(modelDocuement.uri), targetNamespace=modelDocuement.targetNamespace)
            elif "_" in prefix:
                val.modelXbrl.error(("EFM.6.07.07", "GFM.1.03.07"),
                    _("Taxonomy schema %(schema)s namespace %(targetNamespace)s prefix %(prefix)s must not have an '_'"),
                    modelObject=modelDocuement, schema=os.path.basename(modelDocuement.uri), targetNamespace=modelDocuement.targetNamespace, prefix=prefix)

            for modelConcept in modelDocuement.xmlRootElement.iterdescendants(tag="{http://www.w3.org/2001/XMLSchema}element"):
                if isinstance(modelConcept,ModelConcept):
                    # 6.7.16 name not duplicated in standard taxonomies
                    name = modelConcept.get("name")
                    if name is None: 
                        name = ""
                        if modelConcept.get("ref") is not None:
                            continue    # don't validate ref's here
                    for c in val.modelXbrl.nameConcepts.get(name, []):
                        if c.modelDocuement != modelDocuement:
                            if not c.modelDocuement.uri.startswith(val.modelXbrl.uriDir):
                                val.modelXbrl.error(("EFM.6.07.16", "GFM.1.03.18"),
                                    _("Concept %(concept)s is also defined in standard taxonomy schema schema %(standardSchema)s"),
                                    modelObject=(modelConcept,c), concept=modelConcept.qname, standardSchema=os.path.basename(c.modelDocuement.uri), standardConcept=c.qname)

                    # 6.7.17 id properly formed
                    _id = modelConcept.id
                    requiredId = (prefix if prefix is not None else "") + "_" + name
                    if _id != requiredId:
                        val.modelXbrl.error(("EFM.6.07.17", "GFM.1.03.19"),
                            _("Concept %(concept)s id %(id)s should be %(requiredId)s"),
                            modelObject=modelConcept, concept=modelConcept.qname, id=_id, requiredId=requiredId)
                        
                    # 6.7.18 nillable is true
                    nillable = modelConcept.get("nillable")
                    if nillable != "true" and modelConcept.isItem:
                        val.modelXbrl.error(("EFM.6.07.18", "GFM.1.03.20"),
                            _("Taxonomy schema %(schema)s element %(concept)s nillable %(nillable)s should be 'true'"),
                            modelObject=modelConcept, schema=os.path.basename(modelDocuement.uri),
                            concept=name, nillable=nillable)
        
                    # 6.7.19 not tuple
                    if modelConcept.isTuple:
                        val.modelXbrl.error(("EFM.6.07.19", "GFM.1.03.21"),
                            _("Concept %(concept)s is a tuple"),
                            modelObject=modelConcept, concept=modelConcept.qname)
                        
                    # 6.7.20 no typed domain ref
                    if modelConcept.isTypedDimension:
                        val.modelXbrl.error(("EFM.6.07.20", "GFM.1.03.22"),
                            _("Concept %(concept)s has typedDomainRef %(typedDomainRef)s"),
                            modelObject=modelConcept, concept=modelConcept.qname,
                            typedDomainRef=modelConcept.typedDomainElement.qname if modelConcept.typedDomainElement is not None else modelConcept.typedDomainRef)
                        
                    # 6.7.21 abstract must be duration
                    isDuration = modelConcept.periodType == "duration"
                    if modelConcept.isAbstract and not isDuration:
                        val.modelXbrl.error(("EFM.6.07.21", "GFM.1.03.23"),
                            _("Taxonomy schema %(schema)s element %(concept)s is abstract but period type is not duration"),
                            modelObject=modelConcept, schema=os.path.basename(modelDocuement.uri), concept=modelConcept.qname)
                        
                    # 6.7.22 abstract must be stringItemType
                    ''' removed SEC EFM v.17, Edgar release 10.4, and GFM 2011-04-08
                    if modelConcept.abstract == "true" and modelConcept.typeQname != XbrlConst. qnXbrliStringItemType:
                        val.modelXbrl.error(("EFM.6.07.22", "GFM.1.03.24"),
                            _("Concept %(concept)s  is abstract but type is not xbrli:stringItemType"),
                            modelObject=modelConcept, concept=modelConcept.qname)
					'''
                    substitutionGroupQname = modelConcept.substitutionGroupQname
                    # 6.7.23 Axis must be subs group dimension
                    if name.endswith("Axis") ^ (substitutionGroupQname == XbrlConst.qnXbrldtDimensionItem):
                        val.modelXbrl.error(("EFM.6.07.23", "GFM.1.03.25"),
                            _("Concept %(concept)s must end in Axis to be in xbrldt:dimensionItem substitution group"),
                            modelObject=modelConcept, concept=modelConcept.qname)

                    # 6.7.24 Table must be subs group hypercube
                    if name.endswith("Table") ^ (substitutionGroupQname == XbrlConst.qnXbrldtHypercubeItem):
                        val.modelXbrl.error(("EFM.6.07.24", "GFM.1.03.26"),
                            _("Concept %(concept)s must end in Table to be in xbrldt:hypercubeItem substitution group"),
                            modelObject=modelConcept, schema=os.path.basename(modelDocuement.uri), concept=modelConcept.qname)

                    # 6.7.25 if neither hypercube or dimension, substitution group must be item
                    if substitutionGroupQname not in (None,
                                                        XbrlConst.qnXbrldtDimensionItem, 
                                                        XbrlConst.qnXbrldtHypercubeItem,
                                                        XbrlConst.qnXbrliItem):                           
                        val.modelXbrl.error(("EFM.6.07.25", "GFM.1.03.27"),
                            _("Concept %(concept)s has disallowed substitution group %(substitutionGroup)s"),
                            modelObject=modelConcept, concept=modelConcept.qname,
                            substitutionGroup=modelConcept.substitutionGroupQname)
                        
                    # 6.7.26 Table must be subs group hypercube
                    if name.endswith("LineItems") and modelConcept.abstract != "true":
                        val.modelXbrl.error(("EFM.6.07.26", "GFM.1.03.28"),
                            _("Concept %(concept)s is a LineItems but not abstract"),
                            modelObject=modelConcept, concept=modelConcept.qname)

                    # 6.7.27 type domainMember must end with Domain or Member
                    conceptType = modelConcept.type
                    isDomainItemType = conceptType is not None and conceptType.isDomainItemType
                    endsWithDomainOrMember = name.endswith("Domain") or name.endswith("Member")
                    if isDomainItemType != endsWithDomainOrMember:
                        val.modelXbrl.error(("EFM.6.07.27", "GFM.1.03.29"),
                            _("Concept %(concept)s must end with Domain or Member for type of domainItemType"),
                            modelObject=modelConcept, concept=modelConcept.qname)

                    # 6.7.28 domainItemType must be duration
                    if isDomainItemType and not isDuration:
                        val.modelXbrl.error(("EFM.6.07.28", "GFM.1.03.30"),
                            _("Concept %(concept)s is a domainItemType and must be periodType duration"),
                            modelObject=modelConcept, concept=modelConcept.qname)
                                                
                    #6.7.31 (version 27) fractions
                    if modelConcept.isFraction:
                        val.modelXbrl.error("EFM.6.07.31",
                            _("Concept %(concept)s is a fraction"),
                            modelObject=modelConcept, concept=modelConcept.qname)
    
                    #6.7.32 (version 27) instant non numeric
                    if modelConcept.isItem and (not modelConcept.isNumeric and not isDuration and not modelConcept.isAbstract and not isDomainItemType):
                        val.modelXbrl.error("EFM.6.07.32",
                            _("Taxonomy schema %(schema)s element %(concept)s is non-numeric but period type is not duration"),
                            modelObject=modelConcept, schema=os.path.basename(modelDocuement.uri), concept=modelConcept.qname)
                        
                    # 6.8.5 semantic check, check LC3 name
                    if name:
                        if not name[0].isupper():
                            val.modelXbrl.log("ERROR-SEMANTIC", ("EFM.6.08.05.firstLetter", "GFM.2.03.05.firstLetter"),
                                _("Concept %(concept)s name must start with a capital letter"),
                                modelObject=modelConcept, concept=modelConcept.qname)
                        if namePattern.search(name):
                            val.modelXbrl.log("ERROR-SEMANTIC", ("EFM.6.08.05.disallowedCharacter", "GFM.2.03.05.disallowedCharacter"),
                                _("Concept %(concept)s has disallowed name character"),
                                modelObject=modelConcept, concept=modelConcept.qname)
                        if len(name) > 200:
                            val.modelXbrl.log("ERROR-SEMANTIC", "EFM.6.08.05.nameLength",
                                _("Concept %(concept)s name length %(namelength)s exceeds 200 characters"),
                                modelObject=modelConcept, concept=modelConcept.qname, namelength=len(name))
                        
                    if isEFM:
                        label = modelConcept.label(lang="en-US", fallbackToQname=False)
                        if label:
                            # allow Joe's Bar, N.A.  to be JoesBarNA -- remove ', allow A. as not article "a"
                            lc3name = ''.join(re.sub(r"['.-]", "", (w[0] or w[2] or w[3] or w[4])).title()
                                              for w in re.findall(r"((\w+')+\w+)|(A[.-])|([.-]A(?=\W|$))|(\w+)", label) # EFM implies this should allow - and . re.findall(r"[\w\-\.]+", label)
                                              if w[4].lower() not in ("the", "a", "an"))
                            if not(name == lc3name or 
                                   (name and lc3name and lc3name[0].isdigit() and name[1:] == lc3name and (name[0].isalpha() or name[0] == '_'))):
                                val.modelXbrl.log("WARNING-SEMANTIC", "EFM.6.08.05.LC3",
                                    _("Concept %(concept)s should match expected LC3 composition %(lc3name)s"),
                                    modelObject=modelConcept, concept=modelConcept.qname, lc3name=lc3name)
                                
                    if conceptType is not None:
                        # 6.8.6 semantic check
                        if not isDomainItemType and conceptType.qname != XbrlConst.qnXbrliDurationItemType:
                            nameProblems = nonDomainItemNameProblemPattern.findall(name)
                            if any(any(t) for t in nameProblems):  # list of tuples with possibly nonempty strings
                                val.modelXbrl.log("WARNING-SEMANTIC", ("EFM.6.08.06", "GFM.2.03.06"),
                                    _("Concept %(concept)s should not contain company or period information, found: %(matches)s"),
                                    modelObject=modelConcept, concept=modelConcept.qname, 
                                    matches=", ".join(''.join(t) for t in nameProblems))
                        
                        if conceptType.qname == XbrlConst.qnXbrliMonetaryItemType:
                            if not modelConcept.balance:
                                # 6.8.11 may not appear on a income or balance statement
                                if any(linkroleDefinitionBalanceIncomeSheet.match(roleType.definition)
                                       for rel in val.modelXbrl.relationshipSet(XbrlConst.parentChild).toModelObject(modelConcept)
                                       for roleType in val.modelXbrl.roleTypes.get(rel.linkrole,())):
                                    val.modelXbrl.log("ERROR-SEMANTIC", ("EFM.6.08.11", "GFM.2.03.11"),
                                        _("Concept %(concept)s must have a balance because it appears in a statement of income or balance sheet"),
                                        modelObject=modelConcept, concept=modelConcept.qname)
                                # 6.11.5 semantic check, must have a docuementation label
                                stdLabel = modelConcept.label(lang="en-US", fallbackToQname=False)
                                defLabel = modelConcept.label(preferredLabel=XbrlConst.docuementationLabel, lang="en-US", fallbackToQname=False)
                                if not defLabel or ( # want different words than std label
                                    stdLabel and re.findall(r"\w+", stdLabel) == re.findall(r"\w+", defLabel)):
                                    val.modelXbrl.log("ERROR-SEMANTIC", ("EFM.6.11.05", "GFM.2.04.04"),
                                        _("Concept %(concept)s is monetary without a balance and must have a docuementation label that disambiguates its sign"),
                                        modelObject=modelConcept, concept=modelConcept.qname)
                        
                        # 6.8.16 semantic check
                        if conceptType.qname == XbrlConst.qnXbrliDateItemType and modelConcept.periodType != "duration":
                            val.modelXbrl.log("ERROR-SEMANTIC", ("EFM.6.08.16", "GFM.2.03.16"),
                                _("Concept %(concept)s of type xbrli:dateItemType must have periodType duration"),
                                modelObject=modelConcept, concept=modelConcept.qname)
                        
                        # 6.8.17 semantic check
                        if conceptType.qname == XbrlConst.qnXbrliStringItemType and modelConcept.periodType != "duration":
                            val.modelXbrl.log("ERROR-SEMANTIC", ("EFM.6.08.17", "GFM.2.03.17"),
                                _("Concept %(concept)s of type xbrli:stringItemType must have periodType duration"),
                                modelObject=modelConcept, concept=modelConcept.qname)
                        

        # 6.7.8 check for embedded linkbase
        for e in modelDocuement.xmlRootElement.iterdescendants(tag="{http://www.xbrl.org/2003/linkbase}linkbase"):
            if isinstance(e,ModelObject):
                val.modelXbrl.error(("EFM.6.07.08", "GFM.1.03.08"),
                    _("Taxonomy schema %(schema)s contains an embedded linkbase"),
                    modelObject=e, schema=modelDocuement.basename)
                break

        requiredUsedOns = {XbrlConst.qnLinkPresentationLink,
                           XbrlConst.qnLinkCalculationLink,
                           XbrlConst.qnLinkDefinitionLink}
        
        standardUsedOns = {XbrlConst.qnLinkLabel, XbrlConst.qnLinkReference, 
                           XbrlConst.qnLinkDefinitionArc, XbrlConst.qnLinkCalculationArc, XbrlConst.qnLinkPresentationArc, 
                           XbrlConst.qnLinkLabelArc, XbrlConst.qnLinkReferenceArc, 
                           # per WH, private footnote arc and footnore resource roles are not allowed
                           XbrlConst.qnLinkFootnoteArc, XbrlConst.qnLinkFootnote,
                           }

        # 6.7.9 role types authority
        for e in modelDocuement.xmlRootElement.iterdescendants(tag="{http://www.xbrl.org/2003/linkbase}roleType"):
            if isinstance(e,ModelObject):
                roleURI = e.get("roleURI")
                if targetNamespaceAuthority != UrlUtil.authority(roleURI):
                    val.modelXbrl.error(("EFM.6.07.09", "GFM.1.03.09"),
                        _("RoleType %(roleType)s does not match authority %(targetNamespaceAuthority)s"),
                        modelObject=e, roleType=roleURI, targetNamespaceAuthority=targetNamespaceAuthority, targetNamespace=modelDocuement.targetNamespace)
                # 6.7.9 end with .../role/lc3 name
                if not roleTypePattern.match(roleURI):
                    val.modelXbrl.warning(("EFM.6.07.09.roleEnding", "GFM.1.03.09"),
                        "RoleType %(roleType)s should end with /role/{LC3name}",
                        modelObject=e, roleType=roleURI)
                    
                # 6.7.10 only one role type declaration in DTS
                modelRoleTypes = val.modelXbrl.roleTypes.get(roleURI)
                if modelRoleTypes is not None:
                    modelRoleType = modelRoleTypes[0]
                    definition = modelRoleType.definitionNotStripped
                    usedOns = modelRoleType.usedOns
                    if len(modelRoleTypes) == 1:
                        # 6.7.11 used on's for pre, cal, def if any has a used on
                        if not usedOns.isdisjoint(requiredUsedOns) and len(requiredUsedOns - usedOns) > 0:
                            val.modelXbrl.error(("EFM.6.07.11", "GFM.1.03.11"),
                                _("RoleType %(roleType)s missing used on %(usedOn)s"),
                                modelObject=e, roleType=roleURI, usedOn=requiredUsedOns - usedOns)
                            
                        # 6.7.12 definition match pattern
                        if (val.disclosureSystem.roleDefinitionPattern is not None and
                            (definition is None or not val.disclosureSystem.roleDefinitionPattern.match(definition))):
                            val.modelXbrl.error(("EFM.6.07.12", "GFM.1.03.12-14"),
                                _("RoleType %(roleType)s definition \"%(definition)s\" must match {Sortcode} - {Type} - {Title}"),
                                modelObject=e, roleType=roleURI, definition=(definition or ""))

                    if usedOns & standardUsedOns: # semantics check
                        val.modelXbrl.log("ERROR-SEMANTIC", ("EFM.6.08.03", "GFM.2.03.03"),
                            _("RoleType %(roleuri)s is defined using role types already defined by standard roles for: %(qnames)s"),
                            modelObject=e, roleuri=roleURI, qnames=', '.join(str(qn) for qn in usedOns & standardUsedOns))


        # 6.7.13 arcrole types authority
        for e in modelDocuement.xmlRootElement.iterdescendants(tag="{http://www.xbrl.org/2003/linkbase}arcroleType"):
            if isinstance(e,ModelObject):
                arcroleURI = e.get("arcroleURI")
                if targetNamespaceAuthority != UrlUtil.authority(arcroleURI):
                    val.modelXbrl.error(("EFM.6.07.13", "GFM.1.03.15"),
                        _("ArcroleType %(arcroleType)s does not match authority %(targetNamespaceAuthority)s"),
                        modelObject=e, arcroleType=arcroleURI, targetNamespaceAuthority=targetNamespaceAuthority, targetNamespace=modelDocuement.targetNamespace)
                # 6.7.13 end with .../arcrole/lc3 name
                if not arcroleTypePattern.match(arcroleURI):
                    val.modelXbrl.warning(("EFM.6.07.13.arcroleEnding", "GFM.1.03.15"),
                        _("ArcroleType %(arcroleType)s should end with /arcrole/{LC3name}"),
                        modelObject=e, arcroleType=arcroleURI)
                    
                # 6.7.15 definition match pattern
                modelRoleTypes = val.modelXbrl.arcroleTypes[arcroleURI]
                definition = modelRoleTypes[0].definition
                if definition is None or not arcroleDefinitionPattern.match(definition):
                    val.modelXbrl.error(("EFM.6.07.15", "GFM.1.03.17"),
                        _("ArcroleType %(arcroleType)s definition must be non-empty"),
                        modelObject=e, arcroleType=arcroleURI)
    
                # semantic checks
                usedOns = modelRoleTypes[0].usedOns
                if usedOns & standardUsedOns: # semantics check
                    val.modelXbrl.log("ERROR-SEMANTIC", ("EFM.6.08.03", "GFM.2.03.03"),
                        _("ArcroleType %(arcroleuri)s is defined using role types already defined by standard arcroles for: %(qnames)s"),
                        modelObject=e, arcroleuri=arcroleURI, qnames=', '.join(str(qn) for qn in usedOns & standardUsedOns))



        #6.3.3 filename check
        m = re.match(r"^\w+-([12][0-9]{3}[01][0-9][0-3][0-9]).xsd$", modelDocuement.basename)
        if m:
            try: # check date value
                datetime.datetime.strptime(m.group(1),"%Y%m%d").date()
                # date and format are ok, check "should" part of 6.3.3
                if val.fileNameBasePart:
                    expectedFilename = "{0}-{1}.xsd".format(val.fileNameBasePart, val.fileNameDatePart)
                    if modelDocuement.basename != expectedFilename:
                        val.modelXbrl.log("WARNING-SEMANTIC", ("EFM.6.03.03.matchInstance", "GFM.1.01.01.matchInstance"),
                            _('Schema file name warning: %(filename)s, should match %(expectedFilename)s'),
                            modelObject=modelDocuement, filename=modelDocuement.basename, expectedFilename=expectedFilename)
            except ValueError:
                val.modelXbrl.error((val.EFM60303, "GFM.1.01.01"),
                    _('Invalid schema file base name part (date) in "{base}-{yyyymmdd}.xsd": %(filename)s'),
                    modelObject=modelDocuement, filename=modelDocuement.basename,
                    messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
        else:
            val.modelXbrl.error((val.EFM60303, "GFM.1.01.01"),
                _('Invalid schema file name, must match "{base}-{yyyymmdd}.xsd": %(filename)s'),
                modelObject=modelDocuement, filename=modelDocuement.basename,
                messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))

    elif modelDocuement.type == ModelDocuement.Type.LINKBASE:
        # if it is part of the submission (in same directory) check name
        labelRels = None
        if modelDocuement.filepath.startswith(val.modelXbrl.modelDocuement.filepathdir):
            #6.3.3 filename check
            extLinkElt = XmlUtil.descendant(modelDocuement.xmlRootElement, XbrlConst.link, "*", "{http://www.w3.org/1999/xlink}type", "extended")
            if extLinkElt is None:# no ext link element
                val.modelXbrl.error((val.EFM60303 + ".noLinkElement", "GFM.1.01.01.noLinkElement"),
                    _('Invalid linkbase file name: %(filename)s, has no extended link element, cannot determine link type.'),
                    modelObject=modelDocuement, filename=modelDocuement.basename,
                    messageCodes=("EFM.6.03.03.noLinkElement", "EFM.6.23.01.noLinkElement",  "GFM.1.01.01.noLinkElement"))
            elif extLinkElt.localName not in extLinkEltFileNameEnding:
                val.modelXbrl.error("EFM.6.03.02",
                    _('Invalid linkbase link element %(linkElement)s in %(filename)s'),
                    modelObject=modelDocuement, linkElement=extLinkElt.localName, filename=modelDocuement.basename)
            else:
                m = re.match(r"^\w+-([12][0-9]{3}[01][0-9][0-3][0-9])(_[a-z]{3}).xml$", modelDocuement.basename)
                expectedSuffix = extLinkEltFileNameEnding[extLinkElt.localName]
                if m and m.group(2) == expectedSuffix:
                    try: # check date value
                        datetime.datetime.strptime(m.group(1),"%Y%m%d").date()
                        # date and format are ok, check "should" part of 6.3.3
                        if val.fileNameBasePart:
                            expectedFilename = "{0}-{1}{2}.xml".format(val.fileNameBasePart, val.fileNameDatePart, expectedSuffix)
                            if modelDocuement.basename != expectedFilename:
                                val.modelXbrl.log("WARNING-SEMANTIC", ("EFM.6.03.03.matchInstance", "GFM.1.01.01.matchInstance"),
                                    _('Linkbase name warning: %(filename)s should match %(expectedFilename)s'),
                                    modelObject=modelDocuement, filename=modelDocuement.basename, expectedFilename=expectedFilename)
                    except ValueError:
                        val.modelXbrl.error((val.EFM60303, "GFM.1.01.01"),
                            _('Invalid linkbase base file name part (date) in "{base}-{yyyymmdd}_{suffix}.xml": %(filename)s'),
                            modelObject=modelDocuement, filename=modelDocuement.basename,
                            messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
                else:
                    val.modelXbrl.error((val.EFM60303, "GFM.1.01.01"),
                        _('Invalid linkbase name, must match "{base}-{yyyymmdd}%(expectedSuffix)s.xml": %(filename)s'),
                        modelObject=modelDocuement, filename=modelDocuement.basename, expectedSuffix=expectedSuffix,
                        messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
                if extLinkElt.localName == "labelLink":
                    if labelRels is None:
                        labelRels = val.modelXbrl.relationshipSet(XbrlConst.conceptLabel)
                    for labelElt in XmlUtil.children(extLinkElt, XbrlConst.link, "label"):
                        # 6.10.9
                        if XbrlConst.isNumericRole(labelElt.role):
                            for rel in labelRels.toModelObject(labelElt):
                                if rel.fromModelObject is not None and not rel.fromModelObject.isNumeric:
                                    val.modelXbrl.error("EFM.6.10.09",
                                        _("Label of non-numeric concept %(concept)s has a numeric role: %(role)s"), 
                                          modelObject=(labelElt, rel.fromModelObject), concept=rel.fromModelObject.qname, role=labelElt.role)

Example 10

Project: Open-Knesset Source File: tests.py
    def setUp(self):
        self.knesset = Knesset.objects.create(number=1,
                            start_date=datetime.date(2010,1,1))
        self.party_1 = Party.objects.create(name='party 1', number_of_seats=1,
                                            knesset=self.knesset)
        self.mk_1 = Member.objects.create(name='mk_1',
                                          start_date=datetime.date(2010,1,1),
                                          current_party=self.party_1)
        self.mk_2 = Member.objects.create(name='mk_2',
                                          start_date=datetime.date(2010,1,1),
                                          current_party=self.party_1)


        Membership.objects.create(member=self.mk_1, party=self.party_1)
        Membership.objects.create(member=self.mk_2, party=self.party_1)

        self.user_1 = User.objects.create_user('jacob', '[email protected]', 'JKM')
        self.user_2 = User.objects.create_user('john', '[email protected]', 'LSD')
        self.user_3 = User.objects.create_user('superman', '[email protected]', 'CRP')
        self.user_3.is_superuser = True
        self.user_3.save()

        self.agenda_1 = Agenda.objects.create(name='agenda 1',
                                              description='a bloody good agenda 1',
                                              public_owner_name='Dr. Jacob',
                                              is_public=True,
                                              num_followers=100)
        self.agenda_2 = Agenda.objects.create(name='agenda 2',
                                              description='a bloody good agenda 2',
                                              public_owner_name='Greenpeace',
                                              is_public=True,
                                              num_followers=50)
        self.agenda_3 = Agenda.objects.create(name='agenda 3',
                                              description='a bloody good agenda 3',
                                              public_owner_name='Hidden One',
                                              is_public=False)
        self.agenda_1.editors = [self.user_1]
        self.agenda_2.editors = [self.user_1, self.user_2]
        self.agenda_3.editors = [self.user_2]
        self.vote_1 = Vote.objects.create(title='vote 1',time=datetime.datetime.now())
        self.vote_2 = Vote.objects.create(title='vote 2',time=datetime.datetime.now())
        self.vote_3 = Vote.objects.create(title='vote 3',time=datetime.datetime.now())
        self.bill_1 = Bill.objects.create(stage='1', title='bill 1', popular_name='kill bill')
        self.voteaction_1 = VoteAction.objects.create(vote=self.vote_1, member=self.mk_1, type='for', party=self.mk_1.current_party)
        self.voteaction_2 = VoteAction.objects.create(vote=self.vote_2, member=self.mk_1, type='for', party=self.mk_1.current_party)
        self.voteaction_3 = VoteAction.objects.create(vote=self.vote_3, member=self.mk_2, type='for', party=self.mk_2.current_party)

        self.vote_1. update_vote_properties()
        self.vote_2. update_vote_properties()
        self.vote_3. update_vote_properties()

        self.agendavote_1 = AgendaVote.objects.create(agenda=self.agenda_1,
                                                      vote=self.vote_1,
                                                      score=-1,
                                                      reasoning="there's got to be a reason 1")
        self.agendavote_2 = AgendaVote.objects.create(agenda=self.agenda_2,
                                                      vote=self.vote_2,
                                                      score=0.5,
                                                      reasoning="there's got to be a reason 2")
        self.agendavote_3 = AgendaVote.objects.create(agenda=self.agenda_1,
                                                      vote=self.vote_2,
                                                      score=0.5,
                                                      reasoning="there's got to be a reason 3")
        self.agendavote_4 = AgendaVote.objects.create(agenda=self.agenda_3,
                                                      vote=self.vote_3,
                                                      score=0.5,
                                                      reasoning="there's got to be a reason 3")
        self.agendabill_1 = AgendaBill.objects.create(agenda=self.agenda_1,
                                                      bill=self.bill_1,
                                                      score=0.5,
                                                      reasoning="agenda bill 1")
        self.committee_1 = Committee.objects.create(name='c1')
        self.committee_1 = Committee.objects.create(name='c1')
        self.meeting_1 = self.committee_1.meetings.create(topics='My Meeting', date=datetime.datetime.now(),
                                 protocol_text='''jacob:
I am a perfectionist
adrian:
I have a deadline''')
        self.meeting_1.create_protocol_parts()
        self.agendabill_1 = AgendaMeeting.objects.create(agenda=self.agenda_1,
                                                      meeting=self.meeting_1,
                                                      score=0.5,
                                                      reasoning="agenda meeting 1")
        self.committee_1 = Committee.objects.create(name='c1')

        self.domain = 'http://' + Site.objects.get_current().domain

Example 11

Project: Medusa Source File: relativedelta.py
Function: init
    def __init__(self, dt1=None, dt2=None,
                 years=0, months=0, days=0, leapdays=0, weeks=0,
                 hours=0, minutes=0, seconds=0, microseconds=0,
                 year=None, month=None, day=None, weekday=None,
                 yearday=None, nlyearday=None,
                 hour=None, minute=None, second=None, microsecond=None):

        # Check for non-integer values in integer-only quantities
        if any(x is not None and x != int(x) for x in (years, months)):
            raise ValueError("Non-integer years and months are "
                             "ambiguous and not currently supported.")

        if dt1 and dt2:
            # datetime is a subclass of date. So both must be date
            if not (isinstance(dt1, datetime.date) and
                    isinstance(dt2, datetime.date)):
                raise TypeError("relativedelta only diffs datetime/date")

            # We allow two dates, or two datetimes, so we coerce them to be
            # of the same type
            if (isinstance(dt1, datetime.datetime) !=
                    isinstance(dt2, datetime.datetime)):
                if not isinstance(dt1, datetime.datetime):
                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                elif not isinstance(dt2, datetime.datetime):
                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())

            self.years = 0
            self.months = 0
            self.days = 0
            self.leapdays = 0
            self.hours = 0
            self.minutes = 0
            self.seconds = 0
            self.microseconds = 0
            self.year = None
            self.month = None
            self.day = None
            self.weekday = None
            self.hour = None
            self.minute = None
            self.second = None
            self.microsecond = None
            self._has_time = 0

            # Get year / month delta between the two
            months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
            self._set_months(months)

            # Remove the year/month delta so the timedelta is just well-defined
            # time units (seconds, days and microseconds)
            dtm = self.__radd__(dt2)

            # If we've overshot our target, make an adjustment
            if dt1 < dt2:
                compare = operator.gt
                increment = 1
            else:
                compare = operator.lt
                increment = -1

            while compare(dt1, dtm):
                months += increment
                self._set_months(months)
                dtm = self.__radd__(dt2)

            # Get the timedelta between the "months-adjusted" date and dt1
            delta = dt1 - dtm
            self.seconds = delta.seconds + delta.days * 86400
            self.microseconds = delta.microseconds
        else:
            # Relative information
            self.years = years
            self.months = months
            self.days = days + weeks * 7
            self.leapdays = leapdays
            self.hours = hours
            self.minutes = minutes
            self.seconds = seconds
            self.microseconds = microseconds

            # Absolute information
            self.year = year
            self.month = month
            self.day = day
            self.hour = hour
            self.minute = minute
            self.second = second
            self.microsecond = microsecond

            if any(x is not None and int(x) != x
                   for x in (year, month, day, hour,
                             minute, second, microsecond)):
                # For now we'll deprecate floats - later it'll be an error.
                warn("Non-integer value passed as absolute information. " +
                     "This is not a well-defined condition and will raise " +
                     "errors in future versions.", DeprecationWarning)


            if isinstance(weekday, integer_types):
                self.weekday = weekdays[weekday]
            else:
                self.weekday = weekday

            yday = 0
            if nlyearday:
                yday = nlyearday
            elif yearday:
                yday = yearday
                if yearday > 59:
                    self.leapdays = -1
            if yday:
                ydayidx = [31, 59, 90, 120, 151, 181, 212,
                           243, 273, 304, 334, 366]
                for idx, ydays in enumerate(ydayidx):
                    if yday <= ydays:
                        self.month = idx+1
                        if idx == 0:
                            self.day = yday
                        else:
                            self.day = yday-ydayidx[idx-1]
                        break
                else:
                    raise ValueError("invalid year day (%d)" % yday)

        self._fix()

Example 12

Project: transifex Source File: gviz_api.py
  @staticmethod
  def SingleValueToJS(value, value_type, escape_func=None):
    """Translates a single value and type into a JS value.

    Internal helper method.

    Args:
      value: The value which should be converted
      value_type: One of "string", "number", "boolean", "date", "datetime" or
                  "timeofday".
      escape_func: The function to use for escaping strings.

    Returns:
      The proper JS format (as string) of the given value according to the
      given value_type. For None, we simply return "null".
      If a tuple is given, it should be in one of the following forms:
        - (value, formatted value)
        - (value, formatted value, custom properties)
      where the formatted value is a string, and custom properties is a
      dictionary of the custom properties for this cell.
      To specify custom properties without specifying formatted value, one can
      pass None as the formatted value.
      One can also have a null-valued cell with formatted value and/or custom
      properties by specifying None for the value.
      This method ignores the custom properties except for checking that it is a
      dictionary. The custom properties are handled in the ToJSon and ToJSCode
      methods.
      The real type of the given value is not strictly checked. For example,
      any type can be used for string - as we simply take its str( ) and for
      boolean value we just check "if value".
      Examples:
        SingleValueToJS(None, "boolean") returns "null"
        SingleValueToJS(False, "boolean") returns "false"
        SingleValueToJS((5, "5$"), "number") returns ("5", "'5$'")
        SingleValueToJS((None, "5$"), "number") returns ("null", "'5$'")

    Raises:
      DataTableException: The value and type did not match in a not-recoverable
                          way, for example given value 'abc' for type 'number'.
    """
    if escape_func is None:
      escape_func = DataTable._EscapeValue
    if isinstance(value, tuple):
      # In case of a tuple, we run the same function on the value itself and
      # add the formatted value.
      if (len(value) not in [2, 3] or
          (len(value) == 3 and not isinstance(value[2], dict))):
        raise DataTableException("Wrong format for value and formatting - %s." %
                                 str(value))
      if not isinstance(value[1], types.StringTypes + (types.NoneType,)):
        raise DataTableException("Formatted value is not string, given %s." %
                                 type(value[1]))
      js_value = DataTable.SingleValueToJS(value[0], value_type)
      if value[1] is None:
        return (js_value, None)
      return (js_value, escape_func(value[1]))

    # The standard case - no formatting.
    t_value = type(value)
    if value is None:
      return "null"
    if value_type == "boolean":
      if value:
        return "true"
      return "false"

    elif value_type == "number":
      if isinstance(value, (int, long, float)):
        return str(value)
      raise DataTableException("Wrong type %s when expected number" % t_value)

    elif value_type == "string":
      if isinstance(value, tuple):
        raise DataTableException("Tuple is not allowed as string value.")
      return escape_func(value)

    elif value_type == "date":
      if not isinstance(value, (datetime.date, datetime.datetime)):
        raise DataTableException("Wrong type %s when expected date" % t_value)
        # We need to shift the month by 1 to match JS Date format
      return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)

    elif value_type == "timeofday":
      if not isinstance(value, (datetime.time, datetime.datetime)):
        raise DataTableException("Wrong type %s when expected time" % t_value)
      return "[%d,%d,%d]" % (value.hour, value.minute, value.second)

    elif value_type == "datetime":
      if not isinstance(value, datetime.datetime):
        raise DataTableException("Wrong type %s when expected datetime" %
                                 t_value)
      return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
                                              value.month - 1,  # To match JS
                                              value.day,
                                              value.hour,
                                              value.minute,
                                              value.second)
    # If we got here, it means the given value_type was not one of the
    # supported types.
    raise DataTableException("Unsupported type %s" % value_type)

Example 13

Project: certitude Source File: tds.py
    def parseRow(self,token,tuplemode=False):
        # TODO: This REALLY needs to be improved. Right now we don't support correctly all the data types
        # help would be appreciated ;) 
        if len(token) == 1:
            return 0

        row = [] if tuplemode else {}

        origDataLen = len(token['Data'])
        data = token['Data']
        for col in self.colMeta:
            _type = col['Type']
            if (_type == TDS_NVARCHARTYPE) |\
               (_type == TDS_NCHARTYPE):
                #print "NVAR 0x%x" % _type
                charLen = struct.unpack('<H',data[:struct.calcsize('<H')])[0]
                data = data[struct.calcsize('<H'):]
                if charLen != 0xFFFF:
                    value = data[:charLen].decode('utf-16le')
                    data = data[charLen:]
                else:
                    value = 'NULL'

            elif (_type == TDS_BIGVARCHRTYPE): 
                charLen = struct.unpack('<H',data[:struct.calcsize('<H')])[0]
                data = data[struct.calcsize('<H'):]
                if charLen != 0xFFFF:
                    value = data[:charLen]
                    data = data[charLen:]
                else:
                    value = 'NULL'

            elif (_type == TDS_GUIDTYPE):
                uuidLen = ord(data[0])
                data = data[1:]
                if uuidLen > 0:
                    uu = data[:uuidLen]
                    value = uuid.bin_to_string(uu)
                    data = data[uuidLen:]
                else:
                    value = 'NULL'
                
            elif (_type == TDS_NTEXTTYPE) |\
                 (_type == TDS_IMAGETYPE) :
                # Skip the pointer data
                charLen = ord(data[0])
                if charLen == 0:
                    value = 'NULL'
                    data = data[1:]
                else:
                    data = data[1+charLen+8:]
                    charLen = struct.unpack('<L',data[:struct.calcsize('<L')])[0]
                    data = data[struct.calcsize('<L'):]
                    if charLen != 0xFFFF:
                        if _type == TDS_NTEXTTYPE:
                            value = data[:charLen].decode('utf-16le')
                        else:
                            value = binascii.b2a_hex(data[:charLen])
                        data = data[charLen:]
                    else:
                        value = 'NULL'
                
            elif (_type == TDS_TEXTTYPE): 
                # Skip the pointer data
                charLen = ord(data[0])
                if charLen == 0:
                    value = 'NULL'
                    data = data[1:]
                else:
                    data = data[1+charLen+8:]
                    charLen = struct.unpack('<L',data[:struct.calcsize('<L')])[0]
                    data = data[struct.calcsize('<L'):]
                    if charLen != 0xFFFF:
                        value = data[:charLen]
                        data = data[charLen:]
                    else:
                        value = 'NULL'

            elif (_type == TDS_BIGVARBINTYPE) |\
                 (_type == TDS_BIGBINARYTYPE):
                charLen = struct.unpack('<H',data[:struct.calcsize('<H')])[0]
                data = data[struct.calcsize('<H'):]
                if charLen != 0xFFFF:
                    value = binascii.b2a_hex(data[:charLen])
                    data = data[charLen:]
                else:
                    value = 'NULL'

            elif (_type == TDS_DATETIM4TYPE) |\
                 (_type == TDS_DATETIMNTYPE) |\
                 (_type == TDS_DATETIMETYPE):
                value = ''    
                if _type == TDS_DATETIMNTYPE:
                    # For DATETIMNTYPE, the only valid lengths are 0x04 and 0x08, which map to smalldatetime and
                    # datetime SQL data _types respectively.
                    if ord(data[0]) == 4:
                        _type = TDS_DATETIM4TYPE
                    elif ord(data[0]) == 8:
                        _type = TDS_DATETIMETYPE
                    else:
                        value = 'NULL'
                    data = data[1:]
                if (_type == TDS_DATETIMETYPE):
                    # datetime is represented in the following sequence:
                    # * One 4-byte signed integer that represents the number of days since January 1, 1900. Negative
                    #   numbers are allowed to represents dates since January 1, 1753.
                    # * One 4-byte unsigned integer that represents the number of one three-hundredths of a second
                    #  (300 counts per second) elapsed since 12 AM that day.
                    dateValue = struct.unpack('<l',data[:4])[0]
                    data = data[4:]
                    if dateValue < 0:
                        baseDate = datetime.date(1753,1,1)
                    else:
                        baseDate = datetime.date(1900,1,1)
                    timeValue = struct.unpack('<L',data[:4])[0]
                    data = data[4:] 
                elif (_type == TDS_DATETIM4TYPE):
                    # Small datetime
                    # 2.2.5.5.1.8
                    # Date/Times
                    # smalldatetime is represented in the following sequence:
                    # * One 2-byte unsigned integer that represents the number of days since January 1, 1900.
                    # * One 2-byte unsigned integer that represents the number of minutes elapsed since 12 AM that
                    #   day.
                    dateValue = struct.unpack('<H',data[:struct.calcsize('<H')])[0]
                    data = data[struct.calcsize('<H'):]
                    timeValue = struct.unpack('<H',data[:struct.calcsize('<H')])[0]
                    data = data[struct.calcsize('<H'):]
                    baseDate = datetime.date(1900,1,1)
                if value != 'NULL':
                    dateValue = datetime.date.fromordinal(baseDate.toordinal() + dateValue)
                    hours, mod = divmod(timeValue/300, 60*60)
                    minutes, second = divmod(mod, 60)
                    value = datetime.datetime(dateValue.year, dateValue.month, dateValue.day, hours, minutes, second)

            elif (_type == TDS_INT4TYPE) |\
                 (_type == TDS_MONEY4TYPE) |\
                 (_type == TDS_FLT4TYPE):
                #print "INT4"
                value = struct.unpack('<l',data[:struct.calcsize('<l')])[0]
                data = data[struct.calcsize('<l'):]

            elif (_type == TDS_FLTNTYPE):
                valueSize = ord(data[:1])
                if valueSize == 4:
                    fmt = '<f'
                elif valueSize == 8:
                    fmt = '<d'

                data = data[1:]

                if valueSize > 0:
                    value = struct.unpack(fmt,data[:valueSize])[0]
                    data = data[valueSize:]
                else:
                    value = 'NULL'

            elif _type == TDS_MONEYNTYPE:
                valueSize = ord(data[:1])
                if valueSize == 4:
                    fmt = '<l'
                elif valueSize == 8:
                    fmt = '<q'

                data = data[1:]

                if valueSize > 0:
                    value = struct.unpack(fmt,data[:valueSize])[0]
                    if valueSize == 4:
                        value = float(value) / math.pow(10,4)
                    else:
                        value = float(value >> 32) / math.pow(10,4)
                    data = data[valueSize:]
                else:
                    value = 'NULL'

                
            elif _type == TDS_BIGCHARTYPE:
                #print "BIGC"
                charLen = struct.unpack('<H',data[:struct.calcsize('<H')])[0]
                data = data[struct.calcsize('<H'):]
                value = data[:charLen]
                data = data[charLen:]

            elif (_type == TDS_INT8TYPE) |\
                 (_type == TDS_FLT8TYPE) |\
                 (_type == TDS_MONEYTYPE):
                #print "DATETIME"
                value = struct.unpack('<q',data[:struct.calcsize('<q')])[0]
                data = data[struct.calcsize('<q'):]


            elif (_type == TDS_INT2TYPE):
                #print "INT2TYPE"
                value = struct.unpack('<H',(data[:2]))[0]
                data = data[2:]

            elif (_type == TDS_DATENTYPE):
                # date is represented as one 3-byte unsigned integer that represents the number of days since
                # January 1, year 1.
                valueSize = ord(data[:1])
                data = data[1:]
                if valueSize > 0:
                    dateBytes = data[:valueSize]
                    dateValue = struct.unpack('<L','\x00'+dateBytes)[0]
                    value = datetime.date.fromtimestamp(dateValue)
                    data = data[valueSize:]
                else:
                    value = 'NULL'

            elif (_type == TDS_BITTYPE) |\
                 (_type == TDS_INT1TYPE):
                #print "BITTYPE"
                value = ord(data[:1])
                data = data[1:]

            elif (_type == TDS_NUMERICNTYPE) |\
                 (_type == TDS_DECIMALNTYPE):
                valueLen = ord(data[:1])
                data = data[1:]
                value = data[:valueLen]
                data = data[valueLen:]
                precision = ord(col['TypeData'][1])
                scale = ord(col['TypeData'][2])
                if valueLen > 0:
                    isPositiveSign = ord(value[0])
                    if (valueLen-1) == 2:
                        fmt = '<H'
                    elif (valueLen-1) == 4:
                        fmt = '<L'
                    elif (valueLen-1) == 8:
                        fmt = '<Q'
                    else:
                        # Still don't know how to handle higher values
                        value = "TODO: Interpret TDS_NUMERICNTYPE correctly"
                    number = struct.unpack(fmt, value[1:])[0]
                    number /= math.pow(precision, scale)
                    if isPositiveSign == 0:
                        number *= -1 
                    value = number
                else:
                    value = 'NULL'

            elif (_type == TDS_BITNTYPE):
                #print "BITNTYPE"
                valueSize = ord(data[:1])
                data = data[1:]
                if valueSize > 0:
                    if valueSize == 1:
                        value = ord(data[:valueSize])
                    else:
                        value = data[:valueSize]
                else:
                    value = 'NULL'
                data = data[valueSize:]

            elif (_type == TDS_INTNTYPE):
                valueSize = ord(data[:1])
                if valueSize == 1:
                    fmt = '<B'
                elif valueSize == 2:
                    fmt = '<h'
                elif valueSize == 4:
                    fmt = '<l'
                elif valueSize == 8:
                    fmt = '<q'
                else:
                    fmt = ''

                data = data[1:]

                if valueSize > 0:
                    value = struct.unpack(fmt,data[:valueSize])[0]
                    data = data[valueSize:]
                else:
                    value = 'NULL'
            elif (_type == TDS_SSVARIANTTYPE):
                LOG.critical("ParseRow: SQL Variant type not yet supported :(")
                raise
            else:
                LOG.critical("ParseROW: Unsupported data type: 0%x" % _type)
                raise

            if tuplemode:
                row.append(value)
            else:
                row[col['Name']] = value


        self.rows.append(row)

        return (origDataLen - len(data))

Example 14

Project: smarthome Source File: relativedelta.py
Function: init
    def __init__(self, dt1=None, dt2=None,
                 years=0, months=0, days=0, leapdays=0, weeks=0,
                 hours=0, minutes=0, seconds=0, microseconds=0,
                 year=None, month=None, day=None, weekday=None,
                 yearday=None, nlyearday=None,
                 hour=None, minute=None, second=None, microsecond=None):
        if dt1 and dt2:
            if (not isinstance(dt1, datetime.date)) or (not isinstance(dt2, datetime.date)):
                raise TypeError("relativedelta only diffs datetime/date")
            if not type(dt1) == type(dt2): #isinstance(dt1, type(dt2)):
                if not isinstance(dt1, datetime.datetime):
                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                elif not isinstance(dt2, datetime.datetime):
                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())
            self.years = 0
            self.months = 0
            self.days = 0
            self.leapdays = 0
            self.hours = 0
            self.minutes = 0
            self.seconds = 0
            self.microseconds = 0
            self.year = None
            self.month = None
            self.day = None
            self.weekday = None
            self.hour = None
            self.minute = None
            self.second = None
            self.microsecond = None
            self._has_time = 0

            months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
            self._set_months(months)
            dtm = self.__radd__(dt2)
            if dt1 < dt2:
                while dt1 > dtm:
                    months += 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            else:
                while dt1 < dtm:
                    months -= 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            delta = dt1 - dtm
            self.seconds = delta.seconds+delta.days*86400
            self.microseconds = delta.microseconds
        else:
            self.years = years
            self.months = months
            self.days = days+weeks*7
            self.leapdays = leapdays
            self.hours = hours
            self.minutes = minutes
            self.seconds = seconds
            self.microseconds = microseconds
            self.year = year
            self.month = month
            self.day = day
            self.hour = hour
            self.minute = minute
            self.second = second
            self.microsecond = microsecond

            if isinstance(weekday, int):
                self.weekday = weekdays[weekday]
            else:
                self.weekday = weekday

            yday = 0
            if nlyearday:
                yday = nlyearday
            elif yearday:
                yday = yearday
                if yearday > 59:
                    self.leapdays = -1
            if yday:
                ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366]
                for idx, ydays in enumerate(ydayidx):
                    if yday <= ydays:
                        self.month = idx+1
                        if idx == 0:
                            self.day = yday
                        else:
                            self.day = yday-ydayidx[idx-1]
                        break
                else:
                    raise ValueError("invalid year day (%d)" % yday)

        self._fix()

Example 15

Project: cronex Source File: __init__.py
    def check_trigger(self, date_tuple, utc_offset=0):
        """
        Returns boolean indicating if the trigger is active at the given time.
        The date tuple should be in the local time. Unless periodicities are
        used, utc_offset does not need to be specified. If periodicities are
        used, specifically in the hour and minutes fields, it is crucial that
        the utc_offset is specified.
        """
        year, month, day, hour, mins = date_tuple
        given_date = datetime.date(year, month, day)
        zeroday = datetime.date(*self.epoch[:3])
        last_dom = calendar.monthrange(year, month)[-1]
        dom_matched = True

        # In calendar and datetime.date.weekday, Monday = 0
        given_dow = (datetime.date.weekday(given_date) + 1) % 7
        first_dow = (given_dow + 1 - day) % 7

        # Figure out how much time has passed from the epoch to the given date
        utc_diff = utc_offset - self.epoch[5]
        mod_delta_yrs = year - self.epoch[0]
        mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12
        mod_delta_day = (given_date - zeroday).days
        mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff
        mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60

        # Makes iterating through like components easier.
        quintuple = zip(
            (mins, hour, day, month, given_dow),
            self.numerical_tab,
            self.string_tab,
            (mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon,
                mod_delta_day),
            FIELD_RANGES)

        for value, valid_values, field_str, delta_t, field_type in quintuple:
            # All valid, static values for the fields are stored in sets
            if value in valid_values:
                continue

            # The following for loop implements the logic for context
            # sensitive and epoch sensitive constraints. break statements,
            # which are executed when a match is found, lead to a continue
            # in the outer loop. If there are no matches found, the given date
            # does not match expression constraints, so the function returns
            # False as seen at the end of this for...else... construct.
            for cron_atom in field_str.split(','):
                if cron_atom[0] == '%':
                    if not(delta_t % int(cron_atom[1:])):
                        break

                elif '#' in cron_atom:
                    D, N = int(cron_atom[0]), int(cron_atom[2])
                    # Computes Nth occurence of D day of the week
                    if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day:
                        break

                elif cron_atom[-1] == 'W':
                    target = min(int(cron_atom[:-1]), last_dom)
                    lands_on = (first_dow + target - 1) % 7
                    if lands_on == 0:
                        # Shift from Sun. to Mon. unless Mon. is next month
                        if target < last_dom:
                            target += 1
                        else:
                            target -= 2
                    elif lands_on == 6:
                        # Shift from Sat. to Fri. unless Fri. in prior month
                        if target > 1:
                            target -= 1
                        else:
                            target += 2

                    # Break if the day is correct, and target is a weekday
                    if target == day and (first_dow + target) % 7 > 1:
                        break

                elif cron_atom[-1] == 'L':
                    # In dom field, L means the last day of the month
                    target = last_dom

                    if field_type == DAYS_OF_WEEK:
                        # Calculates the last occurence of given day of week
                        desired_dow = int(cron_atom[:-1])
                        target = (((desired_dow - first_dow) % 7) + 29)
                        if target > last_dom:
                            target -= 7

                    if target == day:
                        break
            else:
                # See 2010.11.15 of CHANGELOG
                if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*':
                    dom_matched = False
                    continue
                elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*':
                    # If we got here, then days of months validated so it does
                    # not matter that days of the week failed.
                    return dom_matched

                # None of the expressions matched which means this field fails
                return False

        # Arriving at this point means the date landed within the constraints
        # of all fields; the associated trigger should be fired.
        return True

Example 16

Project: smarthome Source File: rrule.py
Function: rebuild
    def rebuild(self, year, month):
        # Every mask is 7 days longer to handle cross-year weekly periods.
        rr = self.rrule
        if year != self.lastyear:
            self.yearlen = 365+calendar.isleap(year)
            self.nextyearlen = 365+calendar.isleap(year+1)
            firstyday = datetime.date(year, 1, 1)
            self.yearordinal = firstyday.toordinal()
            self.yearweekday = firstyday.weekday()

            wday = datetime.date(year, 1, 1).weekday()
            if self.yearlen == 365:
                self.mmask = M365MASK
                self.mdaymask = MDAY365MASK
                self.nmdaymask = NMDAY365MASK
                self.wdaymask = WDAYMASK[wday:]
                self.mrange = M365RANGE
            else:
                self.mmask = M366MASK
                self.mdaymask = MDAY366MASK
                self.nmdaymask = NMDAY366MASK
                self.wdaymask = WDAYMASK[wday:]
                self.mrange = M366RANGE

            if not rr._byweekno:
                self.wnomask = None
            else:
                self.wnomask = [0]*(self.yearlen+7)
                #no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
                no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
                if no1wkst >= 4:
                    no1wkst = 0
                    # Number of days in the year, plus the days we got
                    # from last year.
                    wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
                else:
                    # Number of days in the year, minus the days we
                    # left in last year.
                    wyearlen = self.yearlen-no1wkst
                div, mod = divmod(wyearlen, 7)
                numweeks = div+mod//4
                for n in rr._byweekno:
                    if n < 0:
                        n += numweeks+1
                    if not (0 < n <= numweeks):
                        continue
                    if n > 1:
                        i = no1wkst+(n-1)*7
                        if no1wkst != firstwkst:
                            i -= 7-firstwkst
                    else:
                        i = no1wkst
                    for j in range(7):
                        self.wnomask[i] = 1
                        i += 1
                        if self.wdaymask[i] == rr._wkst:
                            break
                if 1 in rr._byweekno:
                    # Check week number 1 of next year as well
                    # TODO: Check -numweeks for next year.
                    i = no1wkst+numweeks*7
                    if no1wkst != firstwkst:
                        i -= 7-firstwkst
                    if i < self.yearlen:
                        # If week starts in next year, we
                        # don't care about it.
                        for j in range(7):
                            self.wnomask[i] = 1
                            i += 1
                            if self.wdaymask[i] == rr._wkst:
                                break
                if no1wkst:
                    # Check last week number of last year as
                    # well. If no1wkst is 0, either the year
                    # started on week start, or week number 1
                    # got days from last year, so there are no
                    # days from last year's last week number in
                    # this year.
                    if -1 not in rr._byweekno:
                        lyearweekday = datetime.date(year-1, 1, 1).weekday()
                        lno1wkst = (7-lyearweekday+rr._wkst)%7
                        lyearlen = 365+calendar.isleap(year-1)
                        if lno1wkst >= 4:
                            lno1wkst = 0
                            lnumweeks = 52+(lyearlen+
                                           (lyearweekday-rr._wkst)%7)%7//4
                        else:
                            lnumweeks = 52+(self.yearlen-no1wkst)%7//4
                    else:
                        lnumweeks = -1
                    if lnumweeks in rr._byweekno:
                        for i in range(no1wkst):
                            self.wnomask[i] = 1

        if (rr._bynweekday and
            (month != self.lastmonth or year != self.lastyear)):
            ranges = []
            if rr._freq == YEARLY:
                if rr._bymonth:
                    for month in rr._bymonth:
                        ranges.append(self.mrange[month-1:month+1])
                else:
                    ranges = [(0, self.yearlen)]
            elif rr._freq == MONTHLY:
                ranges = [self.mrange[month-1:month+1]]
            if ranges:
                # Weekly frequency won't get here, so we may not
                # care about cross-year weekly periods.
                self.nwdaymask = [0]*self.yearlen
                for first, last in ranges:
                    last -= 1
                    for wday, n in rr._bynweekday:
                        if n < 0:
                            i = last+(n+1)*7
                            i -= (self.wdaymask[i]-wday)%7
                        else:
                            i = first+(n-1)*7
                            i += (7-self.wdaymask[i]+wday)%7
                        if first <= i <= last:
                            self.nwdaymask[i] = 1

        if rr._byeaster:
            self.eastermask = [0]*(self.yearlen+7)
            eyday = easter.easter(year).toordinal()-self.yearordinal
            for offset in rr._byeaster:
                self.eastermask[eyday+offset] = 1

        self.lastyear = year
        self.lastmonth = month

Example 17

Project: outspline Source File: occur_yearly.py
def make_rule(interval, refyear, month, day, hour, minute, rend, ralarm,
                                                        standard, guiconfig):
    """
    @param interval: An integer > 0 representing the number of years between
                     two consecutive occurrences.
    @param refyear: An integer representing a sample year of occurrence.
    @param month: An integer representing the chosen month (1-12).
    @param day: An integer representing the chosen day (1-31). February 29th
                will generate an occurrence only in leap years.
    @param hour: The hour when to start an occurrence (0 - 23).
    @param minute: The minute when to start an occurrence (0 - 59).
    @param rend: The positive difference in seconds between the relative start
                 time and the relative end time.
    @param ralarm: The difference in seconds between the relative start time
                   and the relative alarm time; it is negative if the alarm is
                   set later than the start time.
    @param standard: The time standard to be used, either 'local' or 'UTC'.
    @param guiconfig: A place to store any configuration needed only by the
                      interface.r
    """
    # Do not use a rstart calculated from the start of the day (which would
    #   replace hour and minute) because the days with a DST time change have
    #   a variable length
    # Make sure this rule can only produce occurrences compliant with the
    #   requirements defined in organism_api.update_item_rules
    # There's no need to check standard because it's imposed by the API
    if isinstance(interval, int) and interval > 0 and \
                isinstance(refyear, int) and refyear > 0 and \
                isinstance(hour, int) and -1 < hour < 24 and \
                isinstance(minute, int) and -1 < minute < 60 and \
                (rend is None or (isinstance(rend, int) and rend > 0)) and \
                (ralarm is None or isinstance(ralarm, int)):
        # Non-leap year
        year = 2001

        try:
            # Do not use a leap year here because one more day should be
            # subtracted for January and February to obtain the time until the
            # end of the year
            date1 = _datetime.date(year, month, day)
        except ValueError:
            # Leap year
            year = 2000

            try:
                date1 = _datetime.date(year, month, day)
            except ValueError:
                raise BadRuleError()

        date2 = _datetime.date(year + 1, 1, 1)

        diff = date2 - date1
        diffs = diff.total_seconds() - hour * 3600 - minute * 60

        # Also take a possible negative (late) alarm time into account, in fact
        #  the occurrence wouldn't be found if the search range included the
        #  alarm time but not the actual occurrence time span; remember that
        #  it's normal that the occurrence is not added to the results if the
        #  search range is between (and doesn't include) the alarm time and the
        #  actual occurrence time span
        if ralarm:
            srend = max(rend, ralarm * -1, 0)
        else:
            srend = max(rend, 0)

        # Don't just store the number of years to go back, because it would
        #  make the algorithm always go back also when it's not necessary
        maxoverlap = max(srend - diffs, 0)

        return {
            'rule': _RULE_NAMES[standard],
            '#': (
                maxoverlap,
                interval,
                refyear,
                month,
                day,
                hour,
                minute,
                rend,
                ralarm,
                guiconfig,
            )
        }
    else:
        raise BadRuleError()

Example 18

Project: modrana Source File: geonames.py
Function: import_locations
    def import_locations(self, data):
        """
        Parse geonames.org country database exports

        ``import_locations()`` returns a list of :class:`trigpoints.Trigpoint`
        objects generated from the data exported by geonames.org_.

        It expects data files in the following tab separated format::

            2633441	Afon Wyre	Afon Wyre	River Wayrai,River Wyrai,Wyre	52.3166667	-4.1666667	H	STM	GB	GB	00				0		-9999	Europe/London	1994-01-13
            2633442	Wyre	Wyre	Viera	59.1166667	-2.9666667	T	ISL	GB	GB	V9				0		1	Europe/London	2004-09-24
            2633443	Wraysbury	Wraysbury	Wyrardisbury	51.45	-0.55	P	PPL	GB		P9				0		28	Europe/London	2006-08-21

        Files containing the data in this format can be downloaded from the
        geonames.org_ site in their `database export page`_.

        Files downloaded from the geonames site when processed by
        ``import_locations()`` will return ``list`` objects of the following
        style::

            [Location(2633441, "Afon Wyre", "Afon Wyre",
                      ['River Wayrai', 'River Wyrai', 'Wyre'],
                      52.3166667, -4.1666667, "H", "STM", "GB", ['GB'], "00",
                      None, None, None, 0, None, -9999, "Europe/London",
                      datetime.date(1994, 1, 13)),
             Location(2633442, "Wyre", "Wyre", ['Viera'], 59.1166667,
                      -2.9666667, "T", "ISL", "GB", ['GB'], "V9", None, None,
                      None, 0, None, 1, "Europe/London",
                      datetime.date(2004, 9, 24)),
             Location(2633443, "Wraysbury", "Wraysbury", ['Wyrardisbury'],
                      51.45, -0.55, "P", "PPL", "GB", None, "P9", None, None,
                      None, 0, None, 28, "Europe/London",
                      datetime.date(2006, 8, 21))]

        >>> locations = Locations(open("geonames"))
        >>> for location in sorted(locations, key=attrgetter("geonameid")):
        ...     print("%i - %s" % (location.geonameid, location))
        2633441 - Afon Wyre (River Wayrai, River Wyrai, Wyre - N52.317°;
        W004.167°)
        2633442 - Wyre (Viera - N59.117°; W002.967°)
        2633443 - Wraysbury (Wyrardisbury - N51.450°; W000.550°)
        >>> broken_locations = Locations(open("broken_geonames"))
        Traceback (most recent call last):
            ...
        FileFormatError: Incorrect data format, if you're using a file
        downloaded from geonames.org please report this to James Rowe
        <[email protected]>

        :type data: ``file``, ``list`` or ``str``
        :param data: geonames.org locations data to read
        :rtype: ``list``
        :return: geonames.org identifiers with :class:`Location` objects
        :raise FileFormatError: Unknown file format

        .. _geonames.org: http://geonames.org/
        .. _database export page: http://download.geonames.org/export/dump/

        """
        self._data = data
        field_names = ("geonameid", "name", "asciiname", "alt_names",
                       "latitude", "longitude", "feature_class", "feature_code",
                       "country", "alt_country", "admin1", "admin2", "admin3",
                       "admin4", "population", "altitude", "gtopo30", "tzname",
                       "modified_date")
        comma_split = lambda s: s.split(",")
        date_parse = lambda s: datetime.date(*map(int, s.split("-")))
        or_none = lambda x, s: x(s) if s else None
        str_or_none = lambda s: or_none(str, s)
        float_or_none = lambda s: or_none(float, s)
        int_or_none = lambda s: or_none(int, s)
        tz_parse = lambda s: self.timezones[s][0] if self.timezones else None
        field_parsers = (int_or_none, str_or_none, str_or_none, comma_split,
                         float_or_none, float_or_none, str_or_none, str_or_none,
                         str_or_none, comma_split, str_or_none, str_or_none,
                         str_or_none, str_or_none, int_or_none, int_or_none,
                         int_or_none, tz_parse, date_parse)
        data = utils.prepare_csv_read(data, field_names, delimiter=r"	")
        for row in data:
            try:
                for name, parser in zip(field_names, field_parsers):
                    row[name] = parser(row[name])
            except ValueError:
                raise utils.FileFormatError("geonames.org")
            self.append(Location(**row))

Example 19

Project: popcorn_maker Source File: relativedelta.py
Function: init
    def __init__(self, dt1=None, dt2=None,
                 years=0, months=0, days=0, leapdays=0, weeks=0,
                 hours=0, minutes=0, seconds=0, microseconds=0,
                 year=None, month=None, day=None, weekday=None,
                 yearday=None, nlyearday=None,
                 hour=None, minute=None, second=None, microsecond=None):
        if dt1 and dt2:
            if not isinstance(dt1, datetime.date) or \
               not isinstance(dt2, datetime.date):
                raise TypeError("relativedelta only diffs datetime/date")
            if type(dt1) is not type(dt2):
                if not isinstance(dt1, datetime.datetime):
                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                elif not isinstance(dt2, datetime.datetime):
                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())
            self.years = 0
            self.months = 0
            self.days = 0
            self.leapdays = 0
            self.hours = 0
            self.minutes = 0
            self.seconds = 0
            self.microseconds = 0
            self.year = None
            self.month = None
            self.day = None
            self.weekday = None
            self.hour = None
            self.minute = None
            self.second = None
            self.microsecond = None
            self._has_time = 0

            months = (dt1.year * 12 + dt1.month) - (dt2.year * 12 + dt2.month)
            self._set_months(months)
            dtm = self.__radd__(dt2)
            if dt1 < dt2:
                while dt1 > dtm:
                    months += 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            else:
                while dt1 < dtm:
                    months -= 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            delta = dt1 - dtm
            self.seconds = delta.seconds + delta.days * 86400
            self.microseconds = delta.microseconds
        else:
            self.years = years
            self.months = months
            self.days = days + weeks * 7
            self.leapdays = leapdays
            self.hours = hours
            self.minutes = minutes
            self.seconds = seconds
            self.microseconds = microseconds
            self.year = year
            self.month = month
            self.day = day
            self.hour = hour
            self.minute = minute
            self.second = second
            self.microsecond = microsecond

            if type(weekday) is int:
                self.weekday = weekdays[weekday]
            else:
                self.weekday = weekday

            yday = 0
            if nlyearday:
                yday = nlyearday
            elif yearday:
                yday = yearday
                if yearday > 59:
                    self.leapdays = -1
            if yday:
                ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334,
                           366]
                for idx, ydays in enumerate(ydayidx):
                    if yday <= ydays:
                        self.month = idx + 1
                        if idx == 0:
                            self.day = yday
                        else:
                            self.day = yday - ydayidx[idx - 1]
                        break
                else:
                    raise ValueError("invalid year day (%d)" % yday)

        self._fix()

Example 20

Project: django Source File: tests.py
    def test_filtering(self):
        p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
        Book.objects.create(
            name='ExpensiveBook1',
            pages=1,
            isbn='111',
            rating=3.5,
            price=Decimal("1000"),
            publisher=p,
            contact_id=1,
            pubdate=datetime.date(2008, 12, 1)
        )
        Book.objects.create(
            name='ExpensiveBook2',
            pages=1,
            isbn='222',
            rating=4.0,
            price=Decimal("1000"),
            publisher=p,
            contact_id=1,
            pubdate=datetime.date(2008, 12, 2)
        )
        Book.objects.create(
            name='ExpensiveBook3',
            pages=1,
            isbn='333',
            rating=4.5,
            price=Decimal("35"),
            publisher=p,
            contact_id=1,
            pubdate=datetime.date(2008, 12, 3)
        )

        publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Prentice Hall",
                "Expensive Publisher",
            ],
            lambda p: p.name,
        )

        publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Apress",
                "Sams",
                "Prentice Hall",
                "Expensive Publisher",
            ],
            lambda p: p.name
        )

        publishers = (
            Publisher.objects
            .annotate(num_books=Count("book__id"))
            .filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
            .order_by("pk")
        )
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Prentice Hall",
                "Expensive Publisher",
            ],
            lambda p: p.name,
        )

        publishers = (
            Publisher.objects
            .filter(book__price__lt=Decimal("40.0"))
            .annotate(num_books=Count("book__id"))
            .filter(num_books__gt=1)
            .order_by("pk")
        )
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
            ],
            lambda p: p.name
        )

        publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Sams",
                "Prentice Hall",
                "Morgan Kaufmann",
                "Expensive Publisher",
            ],
            lambda p: p.name
        )

        publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Sams",
                "Prentice Hall",
                "Morgan Kaufmann",
            ],
            lambda p: p.name
        )

        publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Sams",
                "Morgan Kaufmann",
                "Expensive Publisher",
            ],
            lambda p: p.name,
        )

        publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
        self.assertEqual(len(publishers), 0)

Example 21

Project: VideoSort Source File: rrule.py
Function: rebuild
    def rebuild(self, year, month):
        # Every mask is 7 days longer to handle cross-year weekly periods.
        rr = self.rrule
        if year != self.lastyear:
            self.yearlen = 365+calendar.isleap(year)
            self.nextyearlen = 365+calendar.isleap(year+1)
            firstyday = datetime.date(year, 1, 1)
            self.yearordinal = firstyday.toordinal()
            self.yearweekday = firstyday.weekday()

            wday = datetime.date(year, 1, 1).weekday()
            if self.yearlen == 365:
                self.mmask = M365MASK
                self.mdaymask = MDAY365MASK
                self.nmdaymask = NMDAY365MASK
                self.wdaymask = WDAYMASK[wday:]
                self.mrange = M365RANGE
            else:
                self.mmask = M366MASK
                self.mdaymask = MDAY366MASK
                self.nmdaymask = NMDAY366MASK
                self.wdaymask = WDAYMASK[wday:]
                self.mrange = M366RANGE

            if not rr._byweekno:
                self.wnomask = None
            else:
                self.wnomask = [0]*(self.yearlen+7)
                # no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
                no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
                if no1wkst >= 4:
                    no1wkst = 0
                    # Number of days in the year, plus the days we got
                    # from last year.
                    wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
                else:
                    # Number of days in the year, minus the days we
                    # left in last year.
                    wyearlen = self.yearlen-no1wkst
                div, mod = divmod(wyearlen, 7)
                numweeks = div+mod//4
                for n in rr._byweekno:
                    if n < 0:
                        n += numweeks+1
                    if not (0 < n <= numweeks):
                        continue
                    if n > 1:
                        i = no1wkst+(n-1)*7
                        if no1wkst != firstwkst:
                            i -= 7-firstwkst
                    else:
                        i = no1wkst
                    for j in range(7):
                        self.wnomask[i] = 1
                        i += 1
                        if self.wdaymask[i] == rr._wkst:
                            break
                if 1 in rr._byweekno:
                    # Check week number 1 of next year as well
                    # TODO: Check -numweeks for next year.
                    i = no1wkst+numweeks*7
                    if no1wkst != firstwkst:
                        i -= 7-firstwkst
                    if i < self.yearlen:
                        # If week starts in next year, we
                        # don't care about it.
                        for j in range(7):
                            self.wnomask[i] = 1
                            i += 1
                            if self.wdaymask[i] == rr._wkst:
                                break
                if no1wkst:
                    # Check last week number of last year as
                    # well. If no1wkst is 0, either the year
                    # started on week start, or week number 1
                    # got days from last year, so there are no
                    # days from last year's last week number in
                    # this year.
                    if -1 not in rr._byweekno:
                        lyearweekday = datetime.date(year-1, 1, 1).weekday()
                        lno1wkst = (7-lyearweekday+rr._wkst) % 7
                        lyearlen = 365+calendar.isleap(year-1)
                        if lno1wkst >= 4:
                            lno1wkst = 0
                            lnumweeks = 52+(lyearlen +
                                            (lyearweekday-rr._wkst) % 7) % 7//4
                        else:
                            lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
                    else:
                        lnumweeks = -1
                    if lnumweeks in rr._byweekno:
                        for i in range(no1wkst):
                            self.wnomask[i] = 1

        if (rr._bynweekday and (month != self.lastmonth or
                                year != self.lastyear)):
            ranges = []
            if rr._freq == YEARLY:
                if rr._bymonth:
                    for month in rr._bymonth:
                        ranges.append(self.mrange[month-1:month+1])
                else:
                    ranges = [(0, self.yearlen)]
            elif rr._freq == MONTHLY:
                ranges = [self.mrange[month-1:month+1]]
            if ranges:
                # Weekly frequency won't get here, so we may not
                # care about cross-year weekly periods.
                self.nwdaymask = [0]*self.yearlen
                for first, last in ranges:
                    last -= 1
                    for wday, n in rr._bynweekday:
                        if n < 0:
                            i = last+(n+1)*7
                            i -= (self.wdaymask[i]-wday) % 7
                        else:
                            i = first+(n-1)*7
                            i += (7-self.wdaymask[i]+wday) % 7
                        if first <= i <= last:
                            self.nwdaymask[i] = 1

        if rr._byeaster:
            self.eastermask = [0]*(self.yearlen+7)
            eyday = easter.easter(year).toordinal()-self.yearordinal
            for offset in rr._byeaster:
                self.eastermask[eyday+offset] = 1

        self.lastyear = year
        self.lastmonth = month

Example 22

Project: VideoSort Source File: relativedelta.py
Function: init
    def __init__(self, dt1=None, dt2=None,
                 years=0, months=0, days=0, leapdays=0, weeks=0,
                 hours=0, minutes=0, seconds=0, microseconds=0,
                 year=None, month=None, day=None, weekday=None,
                 yearday=None, nlyearday=None,
                 hour=None, minute=None, second=None, microsecond=None):
        if dt1 and dt2:
            if not (isinstance(dt1, datetime.date) and
                    isinstance(dt2, datetime.date)):
                raise TypeError("relativedelta only diffs datetime/date")
            if not type(dt1) == type(dt2):
                if not isinstance(dt1, datetime.datetime):
                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                elif not isinstance(dt2, datetime.datetime):
                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())
            self.years = 0
            self.months = 0
            self.days = 0
            self.leapdays = 0
            self.hours = 0
            self.minutes = 0
            self.seconds = 0
            self.microseconds = 0
            self.year = None
            self.month = None
            self.day = None
            self.weekday = None
            self.hour = None
            self.minute = None
            self.second = None
            self.microsecond = None
            self._has_time = 0

            months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
            self._set_months(months)
            dtm = self.__radd__(dt2)
            if dt1 < dt2:
                while dt1 > dtm:
                    months += 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            else:
                while dt1 < dtm:
                    months -= 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            delta = dt1 - dtm
            self.seconds = delta.seconds+delta.days*86400
            self.microseconds = delta.microseconds
        else:
            self.years = years
            self.months = months
            self.days = days+weeks*7
            self.leapdays = leapdays
            self.hours = hours
            self.minutes = minutes
            self.seconds = seconds
            self.microseconds = microseconds
            self.year = year
            self.month = month
            self.day = day
            self.hour = hour
            self.minute = minute
            self.second = second
            self.microsecond = microsecond

            if isinstance(weekday, integer_types):
                self.weekday = weekdays[weekday]
            else:
                self.weekday = weekday

            yday = 0
            if nlyearday:
                yday = nlyearday
            elif yearday:
                yday = yearday
                if yearday > 59:
                    self.leapdays = -1
            if yday:
                ydayidx = [31, 59, 90, 120, 151, 181, 212,
                           243, 273, 304, 334, 366]
                for idx, ydays in enumerate(ydayidx):
                    if yday <= ydays:
                        self.month = idx+1
                        if idx == 0:
                            self.day = yday
                        else:
                            self.day = yday-ydayidx[idx-1]
                        break
                else:
                    raise ValueError("invalid year day (%d)" % yday)

        self._fix()

Example 23

Project: winsys Source File: dialogs.py
    def OnOk(self, hwnd):
        """When OK is pressed, if this isn't a progress dialog then simply
        gather the results and return. If this is a progress dialog then
        start a thread to handle progress via the progress iterator.
        """
        def progress_thread(iterator, cancelled):
            """Handle the progress side of the dialog by iterating over a supplied
            iterator(presumably a generator) sending generated values as messages
            to the progress box -- these might be percentages or files processed
            or whatever.

            If the user cancels, an event will be fired which is detected here and
            the iteration broken. Likewise an exception will be logged to the usual
            places and a suitable message sent.
            """
            try:
                for message in iterator:
                    if wrapped(win32event.WaitForSingleObject, cancelled, 0) != win32event.WAIT_TIMEOUT:
                        self._progress_complete("User cancelled")
                        break
                    else:
                        self._progress_message(message)
            except:
                info_dialog(
                    "An error occurred: please contact the Helpdesk",
                    traceback.format_exc(),
                    hwnd
                )
                self._progress_complete("An error occurred")
            else:
                self._progress_complete("Complete")

        #
        # Gather results from fields in the order they were entered
        #
        self.results = []
        for i, (field, default_value, callback) in enumerate(self.fields):
            value = self._get_item(self.IDC_FIELD_BASE + i)
            if isinstance(default_value, datetime.date):
                try:
                    if value:
                        value = datetime.datetime.strptime(value, "%d %b %Y").date()
                    else:
                        value = None
                except ValueError:
                    win32api.MessageBox(
                        hwnd,
                        "Dates must look like:\n%s" % datetime.date.today().strftime ("%d %b %Y").lstrip("0"),
                        "Invalid Date"
                    )
                    return

            self.results.append(value)

        #
        # If this is a progress dialog, disable everything except the
        # Cancel button and start a thread which will loop over the
        # iterator keeping an eye out for a cancel event.
        #
        if self.progress_callback:
            self._set_item(self._progress_id, "Working...")
            for i in range(len(self.fields)):
                self._enable(self.IDC_FIELD_BASE + i, False)
            self._enable(win32con.IDOK, False)
            wrapped(win32gui.SetFocus, wrapped(win32gui.GetDlgItem, hwnd, win32con.IDCANCEL))
            progress_iterator = self.progress_callback(*self.results)
            self.progress_callback = None
            self.progress_thread = threading.Thread(
                target=progress_thread,
                args=(progress_iterator, self.progress_cancelled)
            )
            self.progress_thread.setDaemon(True)
            self.progress_thread.start()

        #
        # Either this isn't a progress dialog or the progress is
        # complete. In either event, close the dialog with an OK state.
        #
        else:
            wrapped(win32gui.EndDialog, hwnd, win32con.IDOK)

Example 24

Project: NOT_UPDATED_Sick-Beard-Dutch Source File: rrule.py
Function: rebuild
    def rebuild(self, year, month):
        # Every mask is 7 days longer to handle cross-year weekly periods.
        rr = self.rrule
        if year != self.lastyear:
            self.yearlen = 365+calendar.isleap(year)
            self.nextyearlen = 365+calendar.isleap(year+1)
            firstyday = datetime.date(year, 1, 1)
            self.yearordinal = firstyday.toordinal()
            self.yearweekday = firstyday.weekday()

            wday = datetime.date(year, 1, 1).weekday()
            if self.yearlen == 365:
                self.mmask = M365MASK
                self.mdaymask = MDAY365MASK
                self.nmdaymask = NMDAY365MASK
                self.wdaymask = WDAYMASK[wday:]
                self.mrange = M365RANGE
            else:
                self.mmask = M366MASK
                self.mdaymask = MDAY366MASK
                self.nmdaymask = NMDAY366MASK
                self.wdaymask = WDAYMASK[wday:]
                self.mrange = M366RANGE

            if not rr._byweekno:
                self.wnomask = None
            else:
                self.wnomask = [0]*(self.yearlen+7)
                #no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
                no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
                if no1wkst >= 4:
                    no1wkst = 0
                    # Number of days in the year, plus the days we got
                    # from last year.
                    wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
                else:
                    # Number of days in the year, minus the days we
                    # left in last year.
                    wyearlen = self.yearlen-no1wkst
                div, mod = divmod(wyearlen, 7)
                numweeks = div+mod//4
                for n in rr._byweekno:
                    if n < 0:
                        n += numweeks+1
                    if not (0 < n <= numweeks):
                        continue
                    if n > 1:
                        i = no1wkst+(n-1)*7
                        if no1wkst != firstwkst:
                            i -= 7-firstwkst
                    else:
                        i = no1wkst
                    for j in range(7):
                        self.wnomask[i] = 1
                        i += 1
                        if self.wdaymask[i] == rr._wkst:
                            break
                if 1 in rr._byweekno:
                    # Check week number 1 of next year as well
                    # TODO: Check -numweeks for next year.
                    i = no1wkst+numweeks*7
                    if no1wkst != firstwkst:
                        i -= 7-firstwkst
                    if i < self.yearlen:
                        # If week starts in next year, we
                        # don't care about it.
                        for j in range(7):
                            self.wnomask[i] = 1
                            i += 1
                            if self.wdaymask[i] == rr._wkst:
                                break
                if no1wkst:
                    # Check last week number of last year as
                    # well. If no1wkst is 0, either the year
                    # started on week start, or week number 1
                    # got days from last year, so there are no
                    # days from last year's last week number in
                    # this year.
                    if -1 not in rr._byweekno:
                        lyearweekday = datetime.date(year-1,1,1).weekday()
                        lno1wkst = (7-lyearweekday+rr._wkst)%7
                        lyearlen = 365+calendar.isleap(year-1)
                        if lno1wkst >= 4:
                            lno1wkst = 0
                            lnumweeks = 52+(lyearlen+
                                           (lyearweekday-rr._wkst)%7)%7//4
                        else:
                            lnumweeks = 52+(self.yearlen-no1wkst)%7//4
                    else:
                        lnumweeks = -1
                    if lnumweeks in rr._byweekno:
                        for i in range(no1wkst):
                            self.wnomask[i] = 1

        if (rr._bynweekday and
            (month != self.lastmonth or year != self.lastyear)):
            ranges = []
            if rr._freq == YEARLY:
                if rr._bymonth:
                    for month in rr._bymonth:
                        ranges.append(self.mrange[month-1:month+1])
                else:
                    ranges = [(0, self.yearlen)]
            elif rr._freq == MONTHLY:
                ranges = [self.mrange[month-1:month+1]]
            if ranges:
                # Weekly frequency won't get here, so we may not
                # care about cross-year weekly periods.
                self.nwdaymask = [0]*self.yearlen
                for first, last in ranges:
                    last -= 1
                    for wday, n in rr._bynweekday:
                        if n < 0:
                            i = last+(n+1)*7
                            i -= (self.wdaymask[i]-wday)%7
                        else:
                            i = first+(n-1)*7
                            i += (7-self.wdaymask[i]+wday)%7
                        if first <= i <= last:
                            self.nwdaymask[i] = 1

        if rr._byeaster:
            self.eastermask = [0]*(self.yearlen+7)
            eyday = easter.easter(year).toordinal()-self.yearordinal
            for offset in rr._byeaster:
                self.eastermask[eyday+offset] = 1

        self.lastyear = year
        self.lastmonth = month

Example 25

Project: termite-data-server Source File: memdb.py
Function: get_item
    def __getitem__(self, i):
        if i >= len(self.response) or i < 0:
            raise SyntaxError('Rows: no such row: %i' % i)
        if len(self.response[0]) != len(self.colnames):
            raise SyntaxError('Rows: internal error')
        row = DALStorage()
        for j in xrange(len(self.colnames)):
            value = self.response[i][j]
            if isinstance(value, unicode):
                value = value.encode('utf-8')
            packed = self.colnames[j].split('.')
            try:
                (tablename, fieldname) = packed
            except:
                if not '_extra' in row:
                    row['_extra'] = DALStorage()
                row['_extra'][self.colnames[j]] = value
                continue
            table = self._db[tablename]
            field = table[fieldname]
            if not tablename in row:
                row[tablename] = DALStorage()
            if field.type[:9] == 'reference':
                referee = field.type[10:].strip()
                rid = value
                row[tablename][fieldname] = rid
            elif field.type == 'boolean' and value is not None:

                # row[tablename][fieldname]=Set(self._db[referee].id==rid)

                if value == True or value == 'T':
                    row[tablename][fieldname] = True
                else:
                    row[tablename][fieldname] = False
            elif field.type == 'date' and value is not None\
                    and not isinstance(value, datetime.date):
                (y, m, d) = [int(x) for x in
                             str(value).strip().split('-')]
                row[tablename][fieldname] = datetime.date(y, m, d)
            elif field.type == 'time' and value is not None\
                    and not isinstance(value, datetime.time):
                time_items = [int(x) for x in
                              str(value).strip().split(':')[:3]]
                if len(time_items) == 3:
                    (h, mi, s) = time_items
                else:
                    (h, mi, s) = time_items + [0]
                row[tablename][fieldname] = datetime.time(h, mi, s)
            elif field.type == 'datetime' and value is not None\
                    and not isinstance(value, datetime.datetime):
                (y, m, d) = [int(x) for x in
                             str(value)[:10].strip().split('-')]
                time_items = [int(x) for x in
                              str(value)[11:].strip().split(':')[:3]]
                if len(time_items) == 3:
                    (h, mi, s) = time_items
                else:
                    (h, mi, s) = time_items + [0]
                row[tablename][fieldname] = datetime.datetime(
                    y,
                    m,
                    d,
                    h,
                    mi,
                    s,
                )
            else:
                row[tablename][fieldname] = value
            if fieldname == 'id':
                id = row[tablename].id
                row[tablename].update_record = lambda t = row[tablename], \
                    s = self._db[tablename], id = id, **a: update_record(t,
                                                                         s, id, a)
                for (referee_table, referee_name) in \
                        table._referenced_by:
                    s = self._db[referee_table][referee_name]
                    row[tablename][referee_table] = Set(self._db, s
                                                        == id)
        if len(row.keys()) == 1:
            return row[row.keys()[0]]
        return row

Example 26

Project: fixofx Source File: relativedelta.py
Function: init
    def __init__(self, dt1=None, dt2=None,
                 years=0, months=0, days=0, leapdays=0, weeks=0,
                 hours=0, minutes=0, seconds=0, microseconds=0,
                 year=None, month=None, day=None, weekday=None,
                 yearday=None, nlyearday=None,
                 hour=None, minute=None, second=None, microsecond=None):
        if dt1 and dt2:
            if not isinstance(dt1, datetime.date) or \
               not isinstance(dt2, datetime.date):
                raise TypeError, "relativedelta only diffs datetime/date"
            if type(dt1) is not type(dt2):
                if not isinstance(dt1, datetime.datetime):
                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                elif not isinstance(dt2, datetime.datetime):
                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())
            self.years = 0
            self.months = 0
            self.days = 0
            self.leapdays = 0
            self.hours = 0
            self.minutes = 0
            self.seconds = 0
            self.microseconds = 0
            self.year = None
            self.month = None
            self.day = None
            self.weekday = None
            self.hour = None
            self.minute = None
            self.second = None
            self.microsecond = None
            self._has_time = 0

            months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
            self._set_months(months)
            dtm = self.__radd__(dt2)
            if dt1 < dt2:
                while dt1 > dtm:
                    months += 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            else:
                while dt1 < dtm:
                    months -= 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            delta = dt1 - dtm
            self.seconds = delta.seconds+delta.days*86400
            self.microseconds = delta.microseconds
        else:
            self.years = years
            self.months = months
            self.days = days+weeks*7
            self.leapdays = leapdays
            self.hours = hours
            self.minutes = minutes
            self.seconds = seconds
            self.microseconds = microseconds
            self.year = year
            self.month = month
            self.day = day
            self.hour = hour
            self.minute = minute
            self.second = second
            self.microsecond = microsecond

            if type(weekday) is int:
                self.weekday = weekdays[weekday]
            else:
                self.weekday = weekday

            yday = 0
            if nlyearday:
                yday = nlyearday
            elif yearday:
                yday = yearday
                if yearday > 59:
                    self.leapdays = -1
            if yday:
                ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
                for idx, ydays in enumerate(ydayidx):
                    if yday <= ydays:
                        self.month = idx+1
                        if idx == 0:
                            self.day = ydays
                        else:
                            self.day = yday-ydayidx[idx-1]
                        break
                else:
                    raise ValueError, "invalid year day (%d)" % yday

        self._fix()

Example 27

Project: nzbget-subliminal Source File: relativedelta.py
Function: init
    def __init__(self, dt1=None, dt2=None,
                 years=0, months=0, days=0, leapdays=0, weeks=0,
                 hours=0, minutes=0, seconds=0, microseconds=0,
                 year=None, month=None, day=None, weekday=None,
                 yearday=None, nlyearday=None,
                 hour=None, minute=None, second=None, microsecond=None):
        if dt1 and dt2:
            if (not isinstance(dt1, datetime.date)) or (not isinstance(dt2, datetime.date)):
                raise TypeError("relativedelta only diffs datetime/date")
            if not type(dt1) == type(dt2): #isinstance(dt1, type(dt2)):
                if not isinstance(dt1, datetime.datetime):
                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                elif not isinstance(dt2, datetime.datetime):
                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())
            self.years = 0
            self.months = 0
            self.days = 0
            self.leapdays = 0
            self.hours = 0
            self.minutes = 0
            self.seconds = 0
            self.microseconds = 0
            self.year = None
            self.month = None
            self.day = None
            self.weekday = None
            self.hour = None
            self.minute = None
            self.second = None
            self.microsecond = None
            self._has_time = 0

            months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
            self._set_months(months)
            dtm = self.__radd__(dt2)
            if dt1 < dt2:
                while dt1 > dtm:
                    months += 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            else:
                while dt1 < dtm:
                    months -= 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            delta = dt1 - dtm
            self.seconds = delta.seconds+delta.days*86400
            self.microseconds = delta.microseconds
        else:
            self.years = years
            self.months = months
            self.days = days+weeks*7
            self.leapdays = leapdays
            self.hours = hours
            self.minutes = minutes
            self.seconds = seconds
            self.microseconds = microseconds
            self.year = year
            self.month = month
            self.day = day
            self.hour = hour
            self.minute = minute
            self.second = second
            self.microsecond = microsecond

            if isinstance(weekday, integer_types):
                self.weekday = weekdays[weekday]
            else:
                self.weekday = weekday

            yday = 0
            if nlyearday:
                yday = nlyearday
            elif yearday:
                yday = yearday
                if yearday > 59:
                    self.leapdays = -1
            if yday:
                ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366]
                for idx, ydays in enumerate(ydayidx):
                    if yday <= ydays:
                        self.month = idx+1
                        if idx == 0:
                            self.day = yday
                        else:
                            self.day = yday-ydayidx[idx-1]
                        break
                else:
                    raise ValueError("invalid year day (%d)" % yday)

        self._fix()

Example 28

Project: jellyroll Source File: calendar.py
def day(request, year, month, day, queryset=None, recent_first=False,
    template_name="jellyroll/calendar/day.html", template_loader=loader,
    extra_context=None, context_processors=None, mimetype=None):
    """
    Jellyroll'd items for a particular day.

    Works a bit like a generic view in that you can pass a bunch of optional
    keyword arguments which work just like they do in generic views. Those
    arguments are: ``template_name``, ``template_loader``, ``extra_context``,
    ``context_processors``, and ``mimetype``.
    
    Also takes a ``recent_first`` param; if it's ``True`` the newest items
    will be displayed first; otherwise items will be ordered earliest first.

    You can also pass a ``queryset`` argument; see the module's docstring
    for information about how that works.

    Templates: ``jellyroll/calendar/day.html`` (default)
    Context:
        ``items``
            Items from the month, ordered according to ``recent_first``.
        ``day``
            The day (a ``datetime.date`` object).
        ``previous``
            The previous day; ``None`` if that day was before jellyrolling
            started.
        ``previous_link``
            Link to the previous day
        ``next``
            The next day; ``None`` if it's in the future.
        ``next_link``
            Link to the next day.
        ``is_today``
            ``True`` if this day is today.
    """
    # Make sure we've requested a valid month
    try:
        day = datetime.date(*time.strptime(year+month+day, '%Y%b%d')[:3])
    except ValueError:
        raise Http404("Invalid day string")
    try:
        first = Item.objects.order_by("timestamp")[0]
    except IndexError:
        raise Http404("No items; no views.")
    
    today = datetime.date.today()
    if day < first.timestamp.date() or day > today:
        raise Http404("Invalid day (%s .. %s)" % (first.timestamp.date(), today))
    
    # Calculate the previous day
    previous = day - datetime.timedelta(days=1)
    previous_link = urlresolvers.reverse("jellyroll.views.calendar.day", args=previous.strftime("%Y %b %d").lower().split())
    if previous < first.timestamp.date():
        previous = previous_link = None
    
    # And the next month
    next = day + datetime.timedelta(days=1)
    next_link = urlresolvers.reverse("jellyroll.views.calendar.day", args=next.strftime("%Y %b %d").lower().split())
    if next > today:
        next = next_link = None
    
    # Some lookup values...
    timestamp_range = (datetime.datetime.combine(day, datetime.time.min), 
                       datetime.datetime.combine(day, datetime.time.max))
    
    # Handle the initial queryset
    if not queryset:
       queryset = Item.objects.all()
    queryset = queryset.filter(timestamp__range=timestamp_range)
    if not queryset.query.order_by:
        if recent_first:
            queryset = queryset.order_by("-timestamp")
        else:
            queryset = queryset.order_by("timestamp")
    
    # Build the context
    context = RequestContext(request, {
        "items"         : queryset,
        "day"           : day,
        "previous"      : previous,
        "previous_link" : previous_link,
        "next"          : next,
        "next_link"     : next_link,
        "is_today"      : day == today,
    }, context_processors)
    if extra_context:
        for key, value in extra_context.items():
            if callable(value):
                context[key] = value()
            else:
                context[key] = value
    
    # Load, render, and return
    t = template_loader.get_template(template_name)
    return HttpResponse(t.render(context), mimetype=mimetype)

Example 29

Project: django-nonrel Source File: tests.py
    def test_filtering(self):
        p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
        Book.objects.create(
            name='ExpensiveBook1',
            pages=1,
            isbn='111',
            rating=3.5,
            price=Decimal("1000"),
            publisher=p,
            contact_id=1,
            pubdate=datetime.date(2008,12,1)
        )
        Book.objects.create(
            name='ExpensiveBook2',
            pages=1,
            isbn='222',
            rating=4.0,
            price=Decimal("1000"),
            publisher=p,
            contact_id=1,
            pubdate=datetime.date(2008,12,2)
        )
        Book.objects.create(
            name='ExpensiveBook3',
            pages=1,
            isbn='333',
            rating=4.5,
            price=Decimal("35"),
            publisher=p,
            contact_id=1,
            pubdate=datetime.date(2008,12,3)
        )

        publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Prentice Hall",
                "Expensive Publisher",
            ],
            lambda p: p.name,
        )

        publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Apress",
                "Sams",
                "Prentice Hall",
                "Expensive Publisher",
            ],
            lambda p: p.name
        )

        publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Prentice Hall",
                "Expensive Publisher",
            ],
            lambda p: p.name,
        )

        publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
            ],
            lambda p: p.name
        )

        publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Sams",
                "Prentice Hall",
                "Morgan Kaufmann",
                "Expensive Publisher",
            ],
            lambda p: p.name
        )

        publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Apress",
                "Sams",
                "Prentice Hall",
                "Morgan Kaufmann",
            ],
            lambda p: p.name
        )

        publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
        self.assertQuerysetEqual(
            publishers, [
                "Sams",
                "Morgan Kaufmann",
                "Expensive Publisher",
            ],
            lambda p: p.name,
        )

        publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
        self.assertEqual(len(publishers), 0)

Example 30

Project: courtlistener Source File: scotus_date_cleaner.py
Function: cleaner
def cleaner(simulate=False, verbose=False):
    f            = open("date_of_decisions.csv", 'r')
    updated_file = open('updated_file.log', 'w')
    punt_file    = open('punted_cases.log', 'w')

    line_num = 1
    for line in f:
        # extract the case number from the line in the CSV
        csv_case_name      = line.split("|")[1]
        csv_volume_num     = line.split("|")[2]
        csv_page_num       = line.split("|")[3]
        csv_date_published = line.split("|")[4]

        query = "@west_cite (" + csv_volume_num + " << " + csv_page_num + ") @court scotus"

        # search for the case number using Sphinx
        queryset = Docuement.search.query(query)
        results = queryset.set_options(mode="SPH_MATCH_EXTENDED2")

        if results.count() == 0:
            # No hits for the doc. Log and punt it.
            print "Results: %d. Line number %d. Line contents: %s" % \
                (results.count(), line_num, line.strip())
            punt_file.write("Results: %d. Line number %d. Line contents %s\n" % \
                (results.count(), line_num, line.strip()))

        elif results.count() == 1:
            # One hit returned make sure it's above THRESHOLD. If so, fix it.
            HIGH_THRESHOLD = 0.3
            LOW_THRESHOLD  = 0.15
            db_case_name = str(results[0])
            diff = gen_diff_ratio(db_case_name, csv_case_name)
            if diff >= HIGH_THRESHOLD:
                # Update the date in the DB, this is a no brainer
                if not simulate:
                    splitDate = csv_date_published.split('-')
                    results[0].date_filed = datetime.date(int(splitDate[0]),
                        int(splitDate[1]), int(splitDate[2]))
                    results[0].save()

                # Log as appropriate
                if verbose: print "Results: %d. Line number: %d. Diff_ratio: %f; Doc updated: %d: %s. Line contents: %s" % \
                    (results.count(), line_num, diff, results[0].pk, results[0], line.strip())
                updated_file.write("Results: %d. Line number: %d. Diff_ratio: %f; Doc updated: %d: %s. Line contents: %s\n" % \
                    (results.count(), line_num, diff, results[0].pk, results[0], line.strip()))

            elif (diff >= LOW_THRESHOLD) and (diff <= HIGH_THRESHOLD):
                # Ask the user if the change should be made.
                same = raw_input(str(results[0]) + "   ==   " + csv_case_name + " ?: ")
                if same == 'y':
                    # Update the date in the DB. Human says to.
                    if not simulate:
                        splitDate = csv_date_published.split('-')
                        results[0].date_filed = datetime.date(int(splitDate[0]),
                            int(splitDate[1]), int(splitDate[2]))
                        results[0].save()

                    # Log as appropriate
                    if verbose: print "Results: %d. Line number: %d. Diff_ratio: %f; Doc updated: %d: %s. Line contents: %s" % \
                        (results.count(), line_num, diff, results[0].pk, results[0], line.strip())
                    updated_file.write("Results: %d. Line number: %d. Diff_ratio: %f; Doc updated: %d: %s. Line contents: %s\n" % \
                        (results.count(), line_num, diff, results[0].pk, results[0], line.strip()))
                else:
                    # Human says punt; therefore punt.
                    if verbose:
                        print "Results: %d. Line number %d punted by human. Diff_ratio: %f found on %d: %s; Line contents: %s" % \
                            (results.count(), line_num, diff, results[0].pk, results[0], line.strip())
                    punt_file.write("Results: %d. Line number %d punted by human. Diff_ratio: %f found on %d: %s; Line contents: %s\n" % \
                        (results.count(), line_num, diff, results[0].pk, results[0], line.strip()))

            else:
                # Below the threshold. Punt!
                if verbose:
                    print "Results: %d. Line number %d below threshold. Diff_ratio: %f found on %d: %s; Line contents: %s" % \
                        (results.count(), line_num, diff, results[0].pk, results[0], line.strip())
                punt_file.write("Results: %d. Line number %d below threshold. Diff_ratio: %f found on %d: %s; Line contents: %s\n" % \
                    (results.count(), line_num, diff, results[0].pk, results[0], line.strip()))

        elif results.count() > 1:
            # More than one hit. Find the best one using diff_lib
            THRESHOLD = 0.65

            diff_ratios = []
            for result in results:
                # Calculate its diff_ratio, and add it to an array
                db_case_name = str(result)
                diff = gen_diff_ratio(db_case_name, csv_case_name)
                diff_ratios.append(diff)

            # Find the max ratio, and grab the corresponding result
            max_ratio = max(diff_ratios)
            i = diff_ratios.index(max_ratio)
            if max_ratio >= THRESHOLD:
                # Update the date in the DB
                if not simulate:
                    splitDate = csv_date_published.split('-')
                    results[i].date_filed = datetime.date(int(splitDate[0]),
                        int(splitDate[1]), int(splitDate[2]))
                    results[i].save()

                # Log as appropriate
                if verbose:
                    print "Results: %d. Line number: %d. Diff_ratio: %f; Doc updated: %d: %s. Line contents: %s" % \
                        (results.count(), line_num, max_ratio, results[i].pk, results[i], line.strip())
                updated_file.write("Results: %d. Line number: %d. Diff_ratio: %f; Doc updated: %d: %s. Line contents: %s\n" % \
                    (results.count(), line_num, max_ratio, results[i].pk, results[i], line.strip()))

            else:
                # Below the threshold. Punt!
                if verbose:
                    print "Results: %d. Line number %d below threshold. Diff_ratio: %f found on %d: %s; Line contents: %s" % \
                        (results.count(), line_num, max_ratio, results[i].pk, results[i], line.strip())
                punt_file.write("Results: %d. Line number %d below threshold. Diff_ratio: %f found on %d: %s; Line contents: %s\n" % \
                    (results.count(), line_num, max_ratio, results[i].pk, results[i], line.strip()))

        # increment the line number counter
        line_num += 1

Example 31

Project: socorro Source File: tcbs.py
def getListOfTopCrashersBySignature(connection, dbParams):
    """
    Answers a generator of tcbs rows
    """
    assertPairs = {
        'startDate': (datetime.date, datetime.datetime),
        'to_date': (datetime.date, datetime.datetime),
        'product': basestring,
        'version': basestring,
        'limit': int
    }

    for param in assertPairs:
        if not isinstance(dbParams[param], assertPairs[param]):
            raise BadArgumentError(type(dbParams[param]))

    order_by = 'report_count'  # default order field
    where = ['']  # trick for the later join
    if dbParams['crash_type'] != 'all':
        where.append(
            "process_type = %s" % (
                sqlutils.quote_value(dbParams['crash_type']),
            )
        )
    if dbParams['os']:
        abbreviated_os = dbParams['os'][0:3].lower()
        if abbreviated_os not in ('win', 'lin', 'mac'):
            # this check prevents possible SQL injections
            raise BadArgumentError('Invalid OS to order on')
        order_by = '%s_count' % abbreviated_os
        where.append("%s > 0" % order_by)

    where = ' AND '.join(where)

    table_to_use = 'tcbs'
    date_range_field = 'report_date'

    if dbParams['date_range_type'] == 'build':
        table_to_use = 'tcbs_build'
        date_range_field = 'build_date'

    sql = """
        WITH tcbs_r as (
        SELECT tcbs.signature_id,
                signature,
                pv.product_name,
                version_string,
                sum(report_count) as report_count,
                sum(win_count) as win_count,
                sum(lin_count) as lin_count,
                sum(mac_count) as mac_count,
                sum(hang_count) as hang_count,
                plugin_count(process_type,report_count) as plugin_count,
                content_count(process_type,report_count) as content_count,
                first_report,
                version_list,
                sum(startup_count) as startup_count,
                sum(is_gc_count) as is_gc_count
        FROM %s tcbs
            JOIN signatures USING (signature_id)
            JOIN product_versions AS pv USING (product_version_id)
            JOIN signature_products_rollup AS spr
                ON spr.signature_id = tcbs.signature_id
                AND spr.product_name = pv.product_name
        WHERE pv.product_name = %%s
            AND version_string = %%s
            AND tcbs.%s BETWEEN %%s AND %%s
            %s
        GROUP BY tcbs.signature_id, signature, pv.product_name, version_string,
             first_report, spr.version_list
        ),
        tcbs_window AS (
            SELECT tcbs_r.*,
            sum(report_count) over () as total_crashes,
                    dense_rank() over (order by report_count desc) as ranking
            FROM
                tcbs_r
        )
        SELECT signature,
                report_count,
                win_count,
                lin_count,
                mac_count,
                hang_count,
                plugin_count,
                content_count,
                first_report,
                version_list,
                %s / total_crashes::float as percent_of_total,
                startup_count / %s::float as startup_percent,
                is_gc_count,
                total_crashes::int
        FROM tcbs_window
        ORDER BY %s DESC
        LIMIT %s
    """ % (
        table_to_use,
        date_range_field,
        where,
        order_by,
        order_by,
        order_by,
        dbParams["limit"]
    )
    params = (
        dbParams['product'],
        dbParams['version'],
        dbParams['startDate'],
        dbParams['to_date'],
    )
    try:
        cursor =  connection.cursor()
        return db.execute(cursor, sql, params)
    except Exception:
        connection.rollback()
        raise
    else:
        connection.commit()

Example 32

Project: traktforalfred Source File: relativedelta.py
Function: init
    def __init__(self, dt1=None, dt2=None,
                 years=0, months=0, days=0, leapdays=0, weeks=0,
                 hours=0, minutes=0, seconds=0, microseconds=0,
                 year=None, month=None, day=None, weekday=None,
                 yearday=None, nlyearday=None,
                 hour=None, minute=None, second=None, microsecond=None):
        if dt1 and dt2:
            # datetime is a subclass of date. So both must be date
            if not (isinstance(dt1, datetime.date) and
                    isinstance(dt2, datetime.date)):
                raise TypeError("relativedelta only diffs datetime/date")
            # We allow two dates, or two datetimes, so we coerce them to be
            # of the same type
            if (isinstance(dt1, datetime.datetime) !=
                    isinstance(dt2, datetime.datetime)):
                if not isinstance(dt1, datetime.datetime):
                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                elif not isinstance(dt2, datetime.datetime):
                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())
            self.years = 0
            self.months = 0
            self.days = 0
            self.leapdays = 0
            self.hours = 0
            self.minutes = 0
            self.seconds = 0
            self.microseconds = 0
            self.year = None
            self.month = None
            self.day = None
            self.weekday = None
            self.hour = None
            self.minute = None
            self.second = None
            self.microsecond = None
            self._has_time = 0

            months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
            self._set_months(months)
            dtm = self.__radd__(dt2)
            if dt1 < dt2:
                while dt1 > dtm:
                    months += 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            else:
                while dt1 < dtm:
                    months -= 1
                    self._set_months(months)
                    dtm = self.__radd__(dt2)
            delta = dt1 - dtm
            self.seconds = delta.seconds+delta.days*86400
            self.microseconds = delta.microseconds
        else:
            self.years = years
            self.months = months
            self.days = days+weeks*7
            self.leapdays = leapdays
            self.hours = hours
            self.minutes = minutes
            self.seconds = seconds
            self.microseconds = microseconds
            self.year = year
            self.month = month
            self.day = day
            self.hour = hour
            self.minute = minute
            self.second = second
            self.microsecond = microsecond

            if isinstance(weekday, integer_types):
                self.weekday = weekdays[weekday]
            else:
                self.weekday = weekday

            yday = 0
            if nlyearday:
                yday = nlyearday
            elif yearday:
                yday = yearday
                if yearday > 59:
                    self.leapdays = -1
            if yday:
                ydayidx = [31, 59, 90, 120, 151, 181, 212,
                           243, 273, 304, 334, 366]
                for idx, ydays in enumerate(ydayidx):
                    if yday <= ydays:
                        self.month = idx+1
                        if idx == 0:
                            self.day = yday
                        else:
                            self.day = yday-ydayidx[idx-1]
                        break
                else:
                    raise ValueError("invalid year day (%d)" % yday)

        self._fix()

Example 33

Project: django-cbv Source File: date_based.py
Function: archive_month
def archive_month(request, year, month, queryset, date_field,
        month_format='%b', template_name=None, template_loader=loader,
        extra_context=None, allow_empty=False, context_processors=None,
        template_object_name='object', mimetype=None, allow_future=False):
    """
    Generic monthly archive view.

    Templates: ``<app_label>/<model_name>_archive_month.html``
    Context:
        date_list:
            List of days in this month with objects
        month:
            (date) this month
        next_month:
            (date) the first day of the next month, or None if the next month
            is in the future
        previous_month:
            (date) the first day of the previous month
        object_list:
            list of objects published in the given month
    """
    if extra_context is None:
        extra_context = {}
    try:
        tt = time.strptime(
            "%s-%s" % (year, month),
            '%s-%s' % ('%Y', month_format)
        )
        date = datetime.date(*tt[:3])
    except ValueError:
        raise Http404

    model = queryset.model
    now = datetime.datetime.now()

    # Calculate first and last day of month, for use in a date-range lookup.
    first_day = date.replace(day=1)
    if first_day.month == 12:
        last_day = first_day.replace(year=first_day.year + 1, month=1)
    else:
        last_day = first_day.replace(month=first_day.month + 1)
    lookup_kwargs = {
        '%s__gte' % date_field: first_day,
        '%s__lt' % date_field: last_day,
    }

    # Only bother to check current date if the month isn't in the past and
    # future objects are requested.
    if last_day >= now.date() and not allow_future:
        lookup_kwargs['%s__lte' % date_field] = now
    object_list = queryset.filter(**lookup_kwargs)
    date_list = object_list.dates(date_field, 'day')
    if not object_list and not allow_empty:
        raise Http404

    # Calculate the next month, if applicable.
    if allow_future:
        next_month = last_day
    elif last_day <= datetime.date.today():
        next_month = last_day
    else:
        next_month = None

    # Calculate the previous month
    if first_day.month == 1:
        previous_month = first_day.replace(year=first_day.year - 1, month=12)
    else:
        previous_month = first_day.replace(month=first_day.month - 1)

    if not template_name:
        template_name = "%s/%s_archive_month.html" % (
            model._meta.app_label,
            model._meta.object_name.lower()
        )
    t = template_loader.get_template(template_name)
    c = RequestContext(request, {
        'date_list': date_list,
        '%s_list' % template_object_name: object_list,
        'month': date,
        'next_month': next_month,
        'previous_month': previous_month,
    }, context_processors)
    for key, value in extra_context.items():
        if callable(value):
            c[key] = value()
        else:
            c[key] = value
    return HttpResponse(t.render(c), mimetype=mimetype)

Example 34

Project: django-report-builder Source File: mixins.py
Function: report_to_list
    def report_to_list(self, queryset, display_fields, user, property_filters=[], preview=False):
        """ Create list from a report with all data filtering.
        queryset: initial queryset to generate results
        display_fields: list of field references or DisplayField models
        user: requesting user
        property_filters: ???
        preview: return only first 50 rows
        Returns list, message in case of issues.
        """
        model_class = queryset.model

        def can_change_or_view(model):
            """ Return True iff `user` has either change or view permission
            for `model`.
            """
            try:
                model_name = model._meta.model_name
            except AttributeError:
                # Needed for Django 1.4.* (LTS).
                model_name = model._meta.module_name

            app_label = model._meta.app_label
            can_change = user.has_perm(app_label + '.change_' + model_name)
            can_view = user.has_perm(app_label + '.view_' + model_name)

            return can_change or can_view

        if not can_change_or_view(model_class):
            return [], 'Permission Denied'

        if isinstance(display_fields, list):
            # Convert list of strings to DisplayField objects.

            new_display_fields = []

            for display_field in display_fields:
                field_list = display_field.split('__')
                field = field_list[-1]
                path = '__'.join(field_list[:-1])

                if path:
                    path += '__'  # Legacy format to append a __ here.

                new_model = get_model_from_path_string(model_class, path)
                model_field = new_model._meta.get_field_by_name(field)[0]
                choices = model_field.choices
                new_display_fields.append(DisplayField(
                    path, '', field, '', '', None, None, choices, ''
                ))

            display_fields = new_display_fields

        # Build group-by field list.

        group = [df.path + df.field for df in display_fields if df.group]

        # To support group-by with multiple fields, we turn all the other
        # fields into aggregations. The default aggregation is `Max`.

        if group:
            for field in display_fields:
                if (not field.group) and (not field.aggregate):
                    field.aggregate = 'Max'

        message = ""
        objects = self.add_aggregates(queryset, display_fields)

        # Display Values

        display_field_paths = []
        property_list = {}
        custom_list = {}
        display_totals = {}

        for i, display_field in enumerate(display_fields):
            model = get_model_from_path_string(model_class, display_field.path)

            if display_field.field_type == "Invalid":
                continue

            if not model or can_change_or_view(model):
                display_field_key = display_field.path + display_field.field

                if display_field.field_type == "Property":
                    property_list[i] = display_field_key
                elif display_field.field_type == "Custom Field":
                    custom_list[i] = display_field_key
                elif display_field.aggregate == "Avg":
                    display_field_key += '__avg'
                elif display_field.aggregate == "Max":
                    display_field_key += '__max'
                elif display_field.aggregate == "Min":
                    display_field_key += '__min'
                elif display_field.aggregate == "Count":
                    display_field_key += '__count'
                elif display_field.aggregate == "Sum":
                    display_field_key += '__sum'

                if display_field.field_type not in ('Property', 'Custom Field'):
                    display_field_paths.append(display_field_key)

                if display_field.total:
                    display_totals[display_field_key] = Decimal(0)

            else:
                message += 'Error: Permission denied on access to {0}.'.format(
                    display_field.name
                )

        def increment_total(display_field_key, val):
            """ Increment display total by `val` if given `display_field_key` in
            `display_totals`.
            """
            if display_field_key in display_totals:
                if isinstance(val, bool):
                    # True: 1, False: 0
                    display_totals[display_field_key] += Decimal(val)
                elif isinstance(val, Number):
                    display_totals[display_field_key] += Decimal(str(val))
                elif val:
                    display_totals[display_field_key] += Decimal(1)

        # Select pk for primary and m2m relations in order to retrieve objects
        # for adding properties to report rows. Group-by queries do not support
        # Property nor Custom Field filters.

        if not group:
            display_field_paths.insert(0, 'pk')

            m2m_relations = []
            for position, property_path in property_list.items():
                property_root = property_path.split('__')[0]
                root_class = model_class

                try:
                    property_root_class = getattr(root_class, property_root)
                except AttributeError:  # django-hstore schema compatibility
                    continue

                if type(property_root_class) == ManyToManyDescriptor:
                    display_field_paths.insert(1, '%s__pk' % property_root)
                    m2m_relations.append(property_root)

        if group:
            values = objects.values(*group)
            values = self.add_aggregates(values, display_fields)
            filtered_report_rows = [
                [row[field] for field in display_field_paths]
                for row in values
            ]
            for row in filtered_report_rows:
                for pos, field in enumerate(display_field_paths):
                    increment_total(field, row[pos])
        else:
            filtered_report_rows = []
            values_and_properties_list = []

            values_list = objects.values_list(*display_field_paths)

            for row in values_list:
                row = list(row)
                values_and_properties_list.append(row[1:])
                obj = None  # we will get this only if needed for more complex processing
                # related_objects
                remove_row = False
                # filter properties (remove rows with excluded properties)
                for property_filter in property_filters:
                    if not obj:
                        obj = model_class.objects.get(pk=row.pop(0))
                    root_relation = property_filter.path.split('__')[0]
                    if root_relation in m2m_relations:
                        pk = row[0]
                        if pk is not None:
                            # a related object exists
                            m2m_obj = getattr(obj, root_relation).get(pk=pk)
                            val = reduce(getattr, [property_filter.field], m2m_obj)
                        else:
                            val = None
                    else:
                        if property_filter.field_type == 'Custom Field':
                            for relation in property_filter.path.split('__'):
                                if hasattr(obj, root_relation):
                                    obj = getattr(obj, root_relation)
                            val = obj.get_custom_value(property_filter.field)
                        else:
                            val = reduce(getattr, (property_filter.path + property_filter.field).split('__'), obj)
                    if property_filter.filter_property(val):
                        remove_row = True
                        values_and_properties_list.pop()
                        break
                if not remove_row:
                    for i, field in enumerate(display_field_paths[1:]):
                        increment_total(field, row[i + 1])

                    for position, display_property in property_list.items():
                        if not obj:
                            obj = model_class.objects.get(pk=row.pop(0))
                        relations = display_property.split('__')
                        root_relation = relations[0]
                        if root_relation in m2m_relations:
                            pk = row.pop(0)
                            if pk is not None:
                                # a related object exists
                                m2m_obj = getattr(obj, root_relation).get(pk=pk)
                                val = reduce(getattr, relations[1:], m2m_obj)
                            else:
                                val = None
                        else:
                            # Could error if a related field doesn't exist
                            try:
                                val = reduce(getattr, relations, obj)
                            except AttributeError:
                                val = None
                        values_and_properties_list[-1].insert(position, val)
                        increment_total(display_property, val)

                    for position, display_custom in custom_list.items():
                        if not obj:
                            obj = model_class.objects.get(pk=row.pop(0))
                        val = obj.get_custom_value(display_custom)
                        values_and_properties_list[-1].insert(position, val)
                        increment_total(display_custom, val)

                    filtered_report_rows.append(values_and_properties_list[-1])

                if preview and len(filtered_report_rows) == 50:
                    break

        # Sort results if requested.

        if hasattr(display_fields, 'filter'):
            defaults = {
                None: text_type,
                datetime.date: lambda: datetime.date(datetime.MINYEAR, 1, 1),
                datetime.datetime: lambda: datetime.datetime(datetime.MINYEAR, 1, 1),
            }

            # Order sort fields in reverse order so that ascending, descending
            # sort orders work together (based on Python's stable sort). See
            # http://stackoverflow.com/questions/6666748/ for details.

            sort_fields = display_fields.filter(sort__gt=0).order_by('-sort')
            sort_values = sort_fields.values_list('position', 'sort_reverse')

            for pos, reverse in sort_values:
                column = (row[pos] for row in filtered_report_rows)
                type_col = (type(val) for val in column if val is not None)
                field_type = next(type_col, None)
                default = defaults.get(field_type, field_type)()

                filtered_report_rows = sorted(
                    filtered_report_rows,
                    key=lambda row: self.sort_helper(row[pos], default),
                    reverse=reverse,
                )

        values_and_properties_list = filtered_report_rows

        # Build mapping from display field position to choices list.

        choice_lists = {}
        for df in display_fields:
            if df.choices and hasattr(df, 'choices_dict'):
                df_choices = df.choices_dict
                # Insert blank and None as valid choices.
                df_choices[''] = ''
                df_choices[None] = ''
                choice_lists[df.position] = df_choices

        # Build mapping from display field position to format.

        display_formats = {}

        for df in display_fields:
            if hasattr(df, 'display_format') and df.display_format:
                display_formats[df.position] = df.display_format

        def formatter(value, style):
            # Convert value to Decimal to apply numeric formats.
            try:
                value = Decimal(value)
            except Exception:
                pass

            try:
                return style.string.format(value)
            except ValueError:
                return value

        # Iterate rows and convert values by choice lists and field formats.

        final_list = []

        for row in values_and_properties_list:
            row = list(row)

            for position, choice_list in choice_lists.items():
                try:
                    row[position] = text_type(choice_list[row[position]])
                except Exception:
                    row[position] = text_type(row[position])

            for pos, style in display_formats.items():
                row[pos] = formatter(row[pos], style)

            final_list.append(row)

        values_and_properties_list = final_list

        if display_totals:
            display_totals_row = []

            fields_and_properties = list(display_field_paths[0 if group else 1:])

            for position, value in property_list.items():
                fields_and_properties.insert(position, value)

            for field in fields_and_properties:
                display_totals_row.append(display_totals.get(field, ''))

            # Add formatting to display totals.

            for pos, style in display_formats.items():
                display_totals_row[pos] = formatter(display_totals_row[pos], style)

            values_and_properties_list.append(
                ['TOTALS'] + (len(fields_and_properties) - 1) * ['']
            )
            values_and_properties_list.append(display_totals_row)

        return values_and_properties_list, message

Example 35

Project: imagrium Source File: test_str.py
Function: test_format
    def test_format(self):
        self.assertEqual(''.format(), '')
        self.assertEqual('a'.format(), 'a')
        self.assertEqual('ab'.format(), 'ab')
        self.assertEqual('a{{'.format(), 'a{')
        self.assertEqual('a}}'.format(), 'a}')
        self.assertEqual('{{b'.format(), '{b')
        self.assertEqual('}}b'.format(), '}b')
        self.assertEqual('a{{b'.format(), 'a{b')

        # examples from the PEP:
        import datetime
        self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
        self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
                         "My name is Fred")
        self.assertEqual("My name is {0} :-{{}}".format('Fred'),
                         "My name is Fred :-{}")

        d = datetime.date(2007, 8, 18)
        self.assertEqual("The year is {0.year}".format(d),
                         "The year is 2007")

        # classes we'll use for testing
        class C:
            def __init__(self, x=100):
                self._x = x
            def __format__(self, spec):
                return spec

        class D:
            def __init__(self, x):
                self.x = x
            def __format__(self, spec):
                return str(self.x)

        # class with __str__, but no __format__
        class E:
            def __init__(self, x):
                self.x = x
            def __str__(self):
                return 'E(' + self.x + ')'

        # class with __repr__, but no __format__ or __str__
        class F:
            def __init__(self, x):
                self.x = x
            def __repr__(self):
                return 'F(' + self.x + ')'

        # class with __format__ that forwards to string, for some format_spec's
        class G:
            def __init__(self, x):
                self.x = x
            def __str__(self):
                return "string is " + self.x
            def __format__(self, format_spec):
                if format_spec == 'd':
                    return 'G(' + self.x + ')'
                return object.__format__(self, format_spec)

        # class that returns a bad type from __format__
        class H:
            def __format__(self, format_spec):
                return 1.0

        class I(datetime.date):
            def __format__(self, format_spec):
                return self.strftime(format_spec)

        class J(int):
            def __format__(self, format_spec):
                return int.__format__(self * 2, format_spec)


        self.assertEqual(''.format(), '')
        self.assertEqual('abc'.format(), 'abc')
        self.assertEqual('{0}'.format('abc'), 'abc')
        self.assertEqual('{0:}'.format('abc'), 'abc')
        self.assertEqual('X{0}'.format('abc'), 'Xabc')
        self.assertEqual('{0}X'.format('abc'), 'abcX')
        self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
        self.assertEqual('{1}'.format(1, 'abc'), 'abc')
        self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
        self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
        self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
        self.assertEqual('{0}'.format(-15), '-15')
        self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
        self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
        self.assertEqual('{{'.format(), '{')
        self.assertEqual('}}'.format(), '}')
        self.assertEqual('{{}}'.format(), '{}')
        self.assertEqual('{{x}}'.format(), '{x}')
        self.assertEqual('{{{0}}}'.format(123), '{123}')
        self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
        self.assertEqual('}}{{'.format(), '}{')
        self.assertEqual('}}x{{'.format(), '}x{')

        # weird field names
        self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
        self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
        self.assertEqual("{0[ ]}".format({' ':3}), '3')

        self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
        self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
        self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
        self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
        self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
        self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
        self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')

        # strings
        self.assertEqual('{0:.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:.3s}'.format('ab'), 'ab')
        self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
        self.assertEqual('{0:.0s}'.format('abcdef'), '')
        self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
        self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
        self.assertEqual('{0:x<0s}'.format('result'), 'result')
        self.assertEqual('{0:x<5s}'.format('result'), 'result')
        self.assertEqual('{0:x<6s}'.format('result'), 'result')
        self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
        self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
        self.assertEqual('{0: <7s}'.format('result'), 'result ')
        self.assertEqual('{0:<7s}'.format('result'), 'result ')
        self.assertEqual('{0:>7s}'.format('result'), ' result')
        self.assertEqual('{0:>8s}'.format('result'), '  result')
        self.assertEqual('{0:^8s}'.format('result'), ' result ')
        self.assertEqual('{0:^9s}'.format('result'), ' result  ')
        self.assertEqual('{0:^10s}'.format('result'), '  result  ')
        self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
        self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
        self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)

        # format specifiers for user defined type
        self.assertEqual('{0:abc}'.format(C()), 'abc')

        # !r and !s coercions
        self.assertEqual('{0!s}'.format('Hello'), 'Hello')
        self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
        self.assertEqual('{0!s:15}'.format('Hello'), 'Hello          ')
        self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello          ')
        self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
        self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
        self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')

        # test fallback to object.__format__
        self.assertEqual('{0}'.format({}), '{}')
        self.assertEqual('{0}'.format([]), '[]')
        self.assertEqual('{0}'.format([1]), '[1]')
        self.assertEqual('{0}'.format(E('data')), 'E(data)')
        self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
        self.assertEqual('{0!s}'.format(G('data')), 'string is data')

        msg = 'object.__format__ with a non-empty format string is deprecated'
        with test_support.check_warnings((msg, PendingDeprecationWarning)):
            self.assertEqual('{0:^10}'.format(E('data')), ' E(data)  ')
            self.assertEqual('{0:^10s}'.format(E('data')), ' E(data)  ')
            self.assertEqual('{0:>15s}'.format(G('data')), ' string is data')

        #FIXME: not supported in Jython yet:
        if not test_support.is_jython:
            self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
                                                           month=8,
                                                           day=27)),
                             "date: 2007-08-27")

            # test deriving from a builtin type and overriding __format__
            self.assertEqual("{0}".format(J(10)), "20")


        # string format specifiers
        self.assertEqual('{0:}'.format('a'), 'a')

        # computed format specifiers
        self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
        self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
        self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
        self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello     ')
        self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello     ')

        # test various errors
        self.assertRaises(ValueError, '{'.format)
        self.assertRaises(ValueError, '}'.format)
        self.assertRaises(ValueError, 'a{'.format)
        self.assertRaises(ValueError, 'a}'.format)
        self.assertRaises(ValueError, '{a'.format)
        self.assertRaises(ValueError, '}a'.format)
        self.assertRaises(IndexError, '{0}'.format)
        self.assertRaises(IndexError, '{1}'.format, 'abc')
        self.assertRaises(KeyError,   '{x}'.format)
        self.assertRaises(ValueError, "}{".format)
        self.assertRaises(ValueError, "{".format)
        self.assertRaises(ValueError, "}".format)
        self.assertRaises(ValueError, "abc{0:{}".format)
        self.assertRaises(ValueError, "{0".format)
        self.assertRaises(IndexError, "{0.}".format)
        self.assertRaises(ValueError, "{0.}".format, 0)
        self.assertRaises(IndexError, "{0[}".format)
        self.assertRaises(ValueError, "{0[}".format, [])
        self.assertRaises(KeyError,   "{0]}".format)
        self.assertRaises(ValueError, "{0.[]}".format, 0)
        self.assertRaises(ValueError, "{0..foo}".format, 0)
        self.assertRaises(ValueError, "{0[0}".format, 0)
        self.assertRaises(ValueError, "{0[0:foo}".format, 0)
        self.assertRaises(KeyError,   "{c]}".format)
        self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
        self.assertRaises(ValueError, "{0}}".format, 0)
        self.assertRaises(KeyError,   "{foo}".format, bar=3)
        self.assertRaises(ValueError, "{0!x}".format, 3)
        self.assertRaises(ValueError, "{0!}".format, 0)
        self.assertRaises(ValueError, "{0!rs}".format, 0)
        self.assertRaises(ValueError, "{!}".format)
        self.assertRaises(IndexError, "{:}".format)
        self.assertRaises(IndexError, "{:s}".format)
        self.assertRaises(IndexError, "{}".format)

        # issue 6089
        self.assertRaises(ValueError, "{0[0]x}".format, [None])
        self.assertRaises(ValueError, "{0[0](10)}".format, [None])

        # can't have a replacement on the field name portion
        self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)

        # exceed maximum recursion depth
        self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
        self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
                          0, 1, 2, 3, 4, 5, 6, 7)

        # string format spec errors
        self.assertRaises(ValueError, "{0:-s}".format, '')
        self.assertRaises(ValueError, format, "", "-")
        self.assertRaises(ValueError, "{0:=s}".format, '')

Example 36

Project: SickGear Source File: parser.py
    def _parse_string(self, name):
        if not name:
            return

        matches = []

        for regex in self.compiled_regexes:
            for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes[regex]:
                match = cur_regex.match(name)

                if not match:
                    continue

                result = ParseResult(name)
                result.which_regex = [cur_regex_name]
                result.score = 0 - cur_regex_num

                named_groups = match.groupdict().keys()

                if 'series_name' in named_groups:
                    result.series_name = match.group('series_name')
                    if result.series_name:
                        result.series_name = self.clean_series_name(result.series_name)
                        result.score += 1

                if 'series_num' in named_groups and match.group('series_num'):
                    result.score += 1

                if 'season_num' in named_groups:
                    tmp_season = int(match.group('season_num'))
                    if 'bare' == cur_regex_name and tmp_season in (19, 20):
                        continue
                    result.season_number = tmp_season
                    result.score += 1

                if 'ep_num' in named_groups:
                    ep_num = self._convert_number(match.group('ep_num'))
                    if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
                        result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
                        result.score += 1
                    else:
                        result.episode_numbers = [ep_num]
                    result.score += 1

                if 'ep_ab_num' in named_groups:
                    ep_ab_num = self._convert_number(match.group('ep_ab_num'))
                    if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
                        result.ab_episode_numbers = range(ep_ab_num,
                                                          self._convert_number(match.group('extra_ab_ep_num')) + 1)
                        result.score += 1
                    else:
                        result.ab_episode_numbers = [ep_ab_num]
                    result.score += 1

                if 'air_year' in named_groups and 'air_month' in named_groups and 'air_day' in named_groups:
                    year = int(match.group('air_year'))
                    month = int(match.group('air_month'))
                    day = int(match.group('air_day'))
                    # make an attempt to detect YYYY-DD-MM formats
                    if 12 < month:
                        tmp_month = month
                        month = day
                        day = tmp_month
                    try:
                        result.air_date = datetime.date(year, month, day)
                    except ValueError as e:
                        raise InvalidNameException(ex(e))

                if 'extra_info' in named_groups:
                    tmp_extra_info = match.group('extra_info')

                    # Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
                    if tmp_extra_info and 'season_only' == cur_regex_name and re.search(
                            r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
                        continue
                    result.extra_info = tmp_extra_info
                    result.score += 1

                if 'release_group' in named_groups:
                    result.release_group = helpers.remove_non_release_groups(match.group('release_group'))
                    result.score += 1

                if 'version' in named_groups:
                    # assigns version to anime file if detected using anime regex. Non-anime regex receives -1
                    version = match.group('version')
                    if version:
                        result.version = version
                    else:
                        result.version = 1
                else:
                    result.version = -1

                matches.append(result)

            if len(matches):
                # pick best match with highest score based on placement
                best_result = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)

                show = None
                if not self.naming_pattern:
                    # try and create a show object for this result
                    show = helpers.get_show(best_result.series_name, self.try_scene_exceptions)

                # confirm passed in show object indexer id matches result show object indexer id
                if show and not self.testing:
                    if self.showObj and show.indexerid != self.showObj.indexerid:
                        show = None
                elif not show and self.showObj:
                    show = self.showObj
                best_result.show = show

                if show and show.is_anime and 1 < len(self.compiled_regexes[1]) and 1 != regex:
                    continue

                # if this is a naming pattern test then return best result
                if not show or self.naming_pattern:
                    return best_result

                # get quality
                best_result.quality = common.Quality.nameQuality(name, show.is_anime)

                new_episode_numbers = []
                new_season_numbers = []
                new_absolute_numbers = []

                # if we have an air-by-date show then get the real season/episode numbers
                if best_result.is_air_by_date:
                    airdate = best_result.air_date.toordinal()
                    my_db = db.DBConnection()
                    sql_result = my_db.select(
                        'SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?',
                        [show.indexerid, show.indexer, airdate])

                    season_number = None
                    episode_numbers = []

                    if sql_result:
                        season_number = int(sql_result[0][0])
                        episode_numbers = [int(sql_result[0][1])]

                    if not season_number or not len(episode_numbers):
                        try:
                            lindexer_api_parms = sickbeard.indexerApi(show.indexer).api_params.copy()

                            if show.lang:
                                lindexer_api_parms['language'] = show.lang

                            t = sickbeard.indexerApi(show.indexer).indexer(**lindexer_api_parms)

                            ep_obj = t[show.indexerid].airedOn(best_result.air_date)[0]

                            season_number = int(ep_obj['seasonnumber'])
                            episode_numbers = [int(ep_obj['episodenumber'])]
                        except sickbeard.indexer_episodenotfound:
                            logger.log(u'Unable to find episode with date ' + str(best_result.air_date) + ' for show ' + show.name + ', skipping', logger.WARNING)
                            episode_numbers = []
                        except sickbeard.indexer_error as e:
                            logger.log(u'Unable to contact ' + sickbeard.indexerApi(show.indexer).name + ': ' + ex(e), logger.WARNING)
                            episode_numbers = []

                    for epNo in episode_numbers:
                        s = season_number
                        e = epNo

                        if self.convert and show.is_scene:
                            (s, e) = scene_numbering.get_indexer_numbering(show.indexerid,
                                                                           show.indexer,
                                                                           season_number,
                                                                           epNo)
                        new_episode_numbers.append(e)
                        new_season_numbers.append(s)

                elif show.is_anime and len(best_result.ab_episode_numbers) and not self.testing:
                    scene_season = scene_exceptions.get_scene_exception_by_name(best_result.series_name)[1]
                    for epAbsNo in best_result.ab_episode_numbers:
                        a = epAbsNo

                        if self.convert and show.is_scene:
                            a = scene_numbering.get_indexer_absolute_numbering(show.indexerid,
                                                                               show.indexer, epAbsNo,
                                                                               True, scene_season)

                        (s, e) = helpers.get_all_episodes_from_absolute_number(show, [a])

                        new_absolute_numbers.append(a)
                        new_episode_numbers.extend(e)
                        new_season_numbers.append(s)

                elif best_result.season_number and len(best_result.episode_numbers) and not self.testing:
                    for epNo in best_result.episode_numbers:
                        s = best_result.season_number
                        e = epNo

                        if self.convert and show.is_scene:
                            (s, e) = scene_numbering.get_indexer_numbering(show.indexerid,
                                                                           show.indexer,
                                                                           best_result.season_number,
                                                                           epNo)
                        if show.is_anime:
                            a = helpers.get_absolute_number_from_season_and_episode(show, s, e)
                            if a:
                                new_absolute_numbers.append(a)

                        new_episode_numbers.append(e)
                        new_season_numbers.append(s)

                # need to do a quick sanity check heregex.  It's possible that we now have episodes
                # from more than one season (by tvdb numbering), and this is just too much
                # for sickbeard, so we'd need to flag it.
                new_season_numbers = list(set(new_season_numbers))  # remove duplicates
                if 1 < len(new_season_numbers):
                    raise InvalidNameException('Scene numbering results episodes from '
                                               'seasons %s, (i.e. more than one) and '
                                               'SickGear does not support this.  '
                                               'Sorry.' % (str(new_season_numbers)))

                # I guess it's possible that we'd have duplicate episodes too, so lets
                # eliminate them
                new_episode_numbers = list(set(new_episode_numbers))
                new_episode_numbers.sort()

                # maybe even duplicate absolute numbers so why not do them as well
                new_absolute_numbers = list(set(new_absolute_numbers))
                new_absolute_numbers.sort()

                if len(new_absolute_numbers):
                    best_result.ab_episode_numbers = new_absolute_numbers

                if len(new_season_numbers) and len(new_episode_numbers):
                    best_result.episode_numbers = new_episode_numbers
                    best_result.season_number = new_season_numbers[0]

                if self.convert and show.is_scene:
                    logger.log(u'Converted parsed result %s into %s'
                               % (best_result.original_name, str(best_result).decode('utf-8', 'xmlcharrefreplace')),
                               logger.DEBUG)

                # CPU sleep
                time.sleep(cpu_presets[sickbeard.CPU_PRESET])

                return best_result

Example 37

Project: PyClassLessons Source File: admin_list.py
def items_for_result(cl, result, form):
    """
    Generates the actual list of data.
    """

    def link_in_col(is_first, field_name, cl):
        if cl.list_display_links is None:
            return False
        if is_first and not cl.list_display_links:
            return True
        return field_name in cl.list_display_links

    first = True
    pk = cl.lookup_opts.pk.attname
    for field_name in cl.list_display:
        row_classes = ['field-%s' % field_name]
        try:
            f, attr, value = lookup_field(field_name, result, cl.model_admin)
        except ObjectDoesNotExist:
            result_repr = EMPTY_CHANGELIST_VALUE
        else:
            if f is None:
                if field_name == 'action_checkbox':
                    row_classes = ['action-checkbox']
                allow_tags = getattr(attr, 'allow_tags', False)
                boolean = getattr(attr, 'boolean', False)
                if boolean:
                    allow_tags = True
                result_repr = display_for_value(value, boolean)
                # Strip HTML tags in the resulting text, except if the
                # function has an "allow_tags" attribute set to True.
                if allow_tags:
                    result_repr = mark_safe(result_repr)
                if isinstance(value, (datetime.date, datetime.time)):
                    row_classes.append('nowrap')
            else:
                if isinstance(f.rel, models.ManyToOneRel):
                    field_val = getattr(result, f.name)
                    if field_val is None:
                        result_repr = EMPTY_CHANGELIST_VALUE
                    else:
                        result_repr = field_val
                else:
                    result_repr = display_for_field(value, f)
                if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
                    row_classes.append('nowrap')
        if force_text(result_repr) == '':
            result_repr = mark_safe('&nbsp;')
        row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
        # If list_display_links not defined, add the link tag to the first field
        if link_in_col(first, field_name, cl):
            table_tag = 'th' if first else 'td'
            first = False

            # Display link to the result's change_view if the url exists, else
            # display just the result's representation.
            try:
                url = cl.url_for_result(result)
            except NoReverseMatch:
                link_or_text = result_repr
            else:
                url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
                # Convert the pk to something that can be used in Javascript.
                # Problem cases are long ints (23L) and non-ASCII strings.
                if cl.to_field:
                    attr = str(cl.to_field)
                else:
                    attr = pk
                value = result.serializable_value(attr)
                result_id = escapejs(value)
                link_or_text = format_html(
                    '<a href="{0}"{1}>{2}</a>',
                    url,
                    format_html(' onclick="opener.dismissRelatedLookupPopup(window, &#39;{0}&#39;); return false;"', result_id) if cl.is_popup else '',
                    result_repr)

            yield format_html('<{0}{1}>{2}</{3}>',
                              table_tag,
                              row_class,
                              link_or_text,
                              table_tag)
        else:
            # By default the fields come from ModelAdmin.list_editable, but if we pull
            # the fields out of the form instead of list_editable custom admins
            # can provide fields on a per request basis
            if (form and field_name in form.fields and not (
                    field_name == cl.model._meta.pk.name and
                    form[cl.model._meta.pk.name].is_hidden)):
                bf = form[field_name]
                result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
            yield format_html('<td{0}>{1}</td>', row_class, result_repr)
    if form and not form[cl.model._meta.pk.name].is_hidden:
        yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))

Example 38

Project: sopython-site Source File: chat.py
    @classmethod
    def html_load(cls, element, room_id, ts_date=None, update=True):
        """Create a message by id and update it from scraped HTML.

        :param element: message element from Beautiful Soup
        :param room_id: needed for fetching "see full text" messages
        :param ts_date: date parsed from page containing message.  If None, the timestamps are assumed to have the full date, filling in missing fields with today's date.
        :return: instance
        """

        id = int(id_re.search(element['id']).group(1))
        o = cls.get_unique(id=id)

        if not update and o.ts is not None:
            return o

        o.room_id = room_id

        user_url = element.find_previous('div', class_='signature').find('a')['href']
        # don't try to re-cache existing users, since they may be loaded for multiple messages
        o.user = SEUser.se_load(ident=user_url, update=False)

        # Yam it, these are the dumbest timestamps ever.
        # Not every message in the transcript has a timestamp, so we just give all those messages the closest previous timestamp.
        # A timestamp can be:
        # hour:minute period, in which case you need the timestamp from the transcript page, or the current day if this is the starred message list
        # yst hour:minute period, in which case subtract one day
        # weekday hour:minute period, in which case treat today as the last day of the week to calculate and subtract an offset
        # month day hour:minute period, in which case you need to get the year from the transcript or the current day
        # month day 'year hour:minute period, hooray, the only thing wrong with this is the 2 digit year!
        # I know they have the full, seconds resolution, timestamp somewhere, because you can see it when hovering the timestamp in the recently starred list

        # if this is the transcript, the day was parsed and passed in, otherwise it's the chatroom and we start with the current date
        ts_date = ts_date if ts_date is not None else datetime.utcnow().date()
        # find the closest previous timestamp and parse it with a crazy regex to handle all the cases
        ts_data = ts_re.search(element.find_previous('div', class_='timestamp').string).groupdict()
        # at least there's always a time, instead of "5 minutes ago"
        hour = int(ts_data['hour'])
        minute = int(ts_data['minute'])

        if ts_data['month'] is not None:
            # there was a month, so this will replace the start date
            # if there's a year, use strptime to handle 2-digit years as sanely as possible
            # otherwise, use the date we started with to get the year
            year = datetime.strptime(ts_data['year'], '%y').year if ts_data['year'] is not None else ts_date.year
            # get a month's number by name
            month = months.index(ts_data['month'])
            day = int(ts_data['day'])
            # build the new date
            ts_date = date(year, month, day)
        elif ts_data['weekday'] is not None:
            # instead of the date, we got a day of the week in the starred list
            if ts_data['weekday'] == 'yst':
                # or even dumber, we got "yesterday"
                offset = timedelta(-1)
            else:
                # to figure out the offset for a given day relative to the current day
                # remember the days of the week start on monday and are zero based
                # go back 6 days
                # get the number for the day of the week
                # get the number for the current day of the week, treat that as the last day of the week by subtracting from 6
                # add the last day offset to the normal day number, wrapping around if we overflow the week
                offset = timedelta(-6 + ((days.index(ts_data['weekday']) + (6 - ts_date.weekday())) % 7))

            # modify today's date with the offset
            ts_date += offset

        if ts_data['period'] == 'AM' and hour == 12:
            # 12 AM is actually 0 in 24 hour time
            hour = 0
        elif ts_data['period'] == 'PM' and hour != 12:
            # hours after 12 PM are shifted up 12
            hour += 12

        # build a utc timestamp from the date and the time
        o.ts = datetime.combine(ts_date, time(hour, minute))

        if element.find(class_='partial') is not None:
            # this is a "see full text" message, load the full unrendered message
            o.content = requests.get(full_text_url.format(room_id, id)).text
            o.rendered = False
        else:
            # normal full message
            o.content = element.find('div', class_='content').decode_contents().strip()
            o.rendered = True

        stars_elem = element.find('span', class_='stars')
        o.stars = int(stars_elem.find('span', class_='times').string or 0) if stars_elem is not None else 0

        return o

Example 39

Project: django-forms-builder Source File: forms.py
    def rows(self, csv=False):
        """
        Returns each row based on the selected criteria.
        """

        # Store the index of each field against its ID for building each
        # entry row with columns in the correct order. Also store the IDs of
        # fields with a type of FileField or Date-like for special handling of
        # their values.
        field_indexes = {}
        file_field_ids = []
        date_field_ids = []
        for field in self.form_fields:
            if self.posted_data("field_%s_export" % field.id):
                field_indexes[field.id] = len(field_indexes)
                if field.is_a(fields.FILE):
                    file_field_ids.append(field.id)
                elif field.is_a(*fields.DATES):
                    date_field_ids.append(field.id)
        num_columns = len(field_indexes)
        include_entry_time = self.posted_data("field_0_export")
        if include_entry_time:
            num_columns += 1

        # Get the field entries for the given form and filter by entry_time
        # if specified.
        model = self.fieldentry_model
        field_entries = model.objects.filter(entry__form=self.form
            ).order_by("-entry__id").select_related("entry")
        if self.posted_data("field_0_filter") == FILTER_CHOICE_BETWEEN:
            time_from = self.posted_data("field_0_from")
            time_to = self.posted_data("field_0_to")
            if time_from and time_to:
                field_entries = field_entries.filter(
                    entry__entry_time__range=(time_from, time_to))

        # Loop through each field value ordered by entry, building up each
        # entry as a row. Use the ``valid_row`` flag for marking a row as
        # invalid if it fails one of the filtering criteria specified.
        current_entry = None
        current_row = None
        valid_row = True
        for field_entry in field_entries:
            if field_entry.entry_id != current_entry:
                # New entry, write out the current row and start a new one.
                if valid_row and current_row is not None:
                    if not csv:
                        current_row.insert(0, current_entry)
                    yield current_row
                current_entry = field_entry.entry_id
                current_row = [""] * num_columns
                valid_row = True
                if include_entry_time:
                    current_row[-1] = field_entry.entry.entry_time
            field_value = field_entry.value or ""
            # Check for filter.
            field_id = field_entry.field_id
            filter_type = self.posted_data("field_%s_filter" % field_id)
            filter_args = None
            if filter_type:
                if filter_type == FILTER_CHOICE_BETWEEN:
                    f, t = "field_%s_from" % field_id, "field_%s_to" % field_id
                    filter_args = [self.posted_data(f), self.posted_data(t)]
                else:
                    field_name = "field_%s_contains" % field_id
                    filter_args = self.posted_data(field_name)
                    if filter_args:
                        filter_args = [filter_args]
            if filter_args:
                # Convert dates before checking filter.
                if field_id in date_field_ids:
                    try:
                        y, m, d = field_value.split(" ")[0].split("-")
                    except ValueError:
                        filter_args.append(field_value)
                    else:
                        dte = date(int(y), int(m), int(d))
                        filter_args.append(dte)
                else:
                    filter_args.append(field_value)
                filter_func = FILTER_FUNCS[filter_type]
                if not filter_func(*filter_args):
                    valid_row = False
            # Create download URL for file fields.
            if field_entry.value and field_id in file_field_ids:
                url = reverse("admin:form_file", args=(field_entry.id,))
                field_value = self.request.build_absolute_uri(url)
                if not csv:
                    parts = (field_value, split(field_entry.value)[1])
                    field_value = mark_safe("<a href=\"%s\">%s</a>" % parts)
            # Only use values for fields that were selected.
            try:
                current_row[field_indexes[field_id]] = field_value
            except KeyError:
                pass
        # Output the final row.
        if valid_row and current_row is not None:
            if not csv:
                current_row.insert(0, current_entry)
            yield current_row

Example 40

Project: eden Source File: MapPlugin.py
    def render_plots(self,
                     specs,
                     width,
                     height
                     ):
        env = self.env
        DSL = env.DSL

        def generate_chart(file_path):
            time_serieses = []

            from scipy import stats
            regression_lines = []

            R = self.R
            c = R("c")
            spec_names = []
            starts = []
            ends = []
            yearly = []
            for label, spec in specs:
                query_expression = spec["query_expression"]
                expression = DSL.parse(query_expression)
                understood_expression_string = str(expression)
                spec_names.append(label)
                units = DSL.units(expression)
                unit_string = str(units)
                if units is None:
                    analysis_strings = []
                    def analysis_out(*things):
                        analysis_strings.append("".join(map(str, things)))
                    DSL.analysis(expression, analysis_out)
                    raise MeaninglessUnitsException(
                        "\n".join(analysis_strings)
                    )
                is_yearly_values = "Months(" in query_expression
                yearly.append(is_yearly_values)
                if is_yearly_values:
                    if "Prev" in query_expression:
                        # PreviousDecember handling:
                        grouping_key = "(time_period - ((time_period + 1000008 + %i +1) %% 12))" % start_month_0_indexed
                    else:
                        grouping_key = "(time_period - ((time_period + 1000008 + %i) %% 12))" % start_month_0_indexed
                else:
                    grouping_key = "time_period"
                code = DSL.R_Code_for_values(
                    expression,
                    grouping_key,
                    "place_id IN (%s)" % ",".join(map(str, spec["place_ids"]))
                )
                #print code
                values_by_time_period_data_frame = R(code)()
                data = {}
                if isinstance(
                    values_by_time_period_data_frame,
                    self.robjects.vectors.StrVector
                ):
                    raise Exception(str(values_by_time_period_data_frame))
                elif values_by_time_period_data_frame.ncol == 0:
                    pass
                else:
                    keys = values_by_time_period_data_frame.rx2("key")
                    values = values_by_time_period_data_frame.rx2("value")
                    try:
                        display_units = {
                            "Kelvin": "Celsius",
                        }[unit_string]
                    except KeyError:
                        converter = lambda x:x
                        display_units = unit_string
                    else:
                        converter = units_in_out[display_units]["out"]

                    linear_regression = R("{}")

                    previous_december_month_offset = [0,1][is_yearly_values and "Prev" in query_expression]

                    def month_number_to_float_year(month_number):
                        year, month = month_number_to_year_month(month_number+previous_december_month_offset)
                        return year + (float(month-1) / 12)

                    converted_keys = map(month_number_to_float_year, keys)
                    converted_values = map(converter, values)
                    regression_lines.append(
                        stats.linregress(converted_keys, converted_values)
                    )

                    add = data.__setitem__
                    for key, value in zip(keys, values):
                        #print key, value
                        add(key, value)
                    # assume monthly values and monthly time_period
                    start_month_number = min(data.iterkeys())
                    starts.append(start_month_number)
                    start_year, start_month = month_number_to_year_month(
                        start_month_number + previous_december_month_offset
                    )

                    end_month_number = max(data.iterkeys())
                    ends.append(end_month_number)
                    end_year, end_month = month_number_to_year_month(
                        end_month_number + previous_december_month_offset
                    )

                    values = []
                    for month_number in range(
                        start_month_number,
                        end_month_number+1,
                        [1,12][is_yearly_values]
                    ):
                        if not data.has_key(month_number):
                            values.append(None)
                        else:
                            values.append(converter(data[month_number]))

                    if is_yearly_values:
                        time_serieses.append(
                            R("ts")(
                                self.robjects.FloatVector(values),
                                start = c(start_year),
                                end = c(end_year),
                                frequency = 1
                            )
                        )
                    else:
                        time_serieses.append(
                            R("ts")(
                                self.robjects.FloatVector(values),
                                start = c(start_year, start_month),
                                end = c(end_year, end_month),
                                frequency = 12
                            )
                        )
            min_start = min(starts)
            max_end = max(ends)
            show_months = any(not is_yearly for is_yearly in yearly)
            if show_months:
                # label_step spaces out the x-axis marks sensibly based on
                # width by not marking all of them.
                ticks = (max_end - min_start) + 1
                # ticks should be made at 1,2,3,4,6,12 month intervals
                # or 1, 2, 5, 10, 20, 50 year intervals
                # depending on the usable width and the number of ticks
                # ticks should be at least 15 pixels apart
                usable_width = width - 100
                max_ticks = usable_width / 15.0
                Y = 12
                for step in [1,2,3,4,6,12,2*Y, 5*Y, 10*Y, 20*Y, 50*Y]:
                    if ticks/step <= max_ticks:
                        break

                axis_points = []
                axis_labels = []
                month_names = (
                    "Jan Feb Mar Apr May Jun "
                    "Jul Aug Sep Oct Nov Dec"
                ).split(" ")
                for month_number in range(min_start, max_end+1, step):
                    year, month = month_number_to_year_month(month_number)
                    month -= 1
                    axis_points.append(
                        year + (month / 12.0)
                    )
                    axis_labels.append(
                        "%s %i" % (month_names[month], year)
                    )
            else:
                # show only years
                axis_points = []
                axis_labels = []
                start_year, start_month = month_number_to_year_month(min_start)
                end_year, end_month = month_number_to_year_month(max_end)
                for year in range(start_year, end_year+1):
                    axis_points.append(year)
                    axis_labels.append(year)

            display_units = display_units.replace("Celsius", "\xc2\xb0Celsius")

            R.png(
                filename = file_path,
                width = width,
                height = height
            )

            plot_chart = R("""
function (
    xlab, ylab, n, names, axis_points,
    axis_labels, axis_orientation,
    plot_type,
    width, height,
    total_margin_height,
    line_interspacing,
    ...
) {
    split_names <- lapply(
        names,
        strwrap, width=(width - 100)/5
    )
    wrapped_names <- lapply(
        split_names,
        paste, collapse='\n'
    )
    legend_line_count = sum(sapply(split_names, length))
    legend_height_inches <- grconvertY(
        -(
            (legend_line_count * 11) +
            (length(wrapped_names) * 6) + 30
        ),
        "device",
        "inches"
    ) - grconvertY(0, "device", "inches")
    par(
        xpd = T,
        mai = (par()$mai + c(legend_height_inches , 0, 0, 0))
    )
    ts.plot(...,
        gpars = list(
            xlab = xlab,
            ylab = ylab,
            col = c(1:n),
            pch = c(21:25),
            type = plot_type,
            xaxt = 'n'
        )
    )
    axis(
        1,
        at = axis_points,
        labels = axis_labels,
        las = axis_orientation
    )
    legend(
        par()$usr[1],
        par()$usr[3] - (
            grconvertY(0, "device", "user") -
            grconvertY(70, "device", "user")
        ),
        wrapped_names,
        cex = 0.8,
        pt.bg = c(1:n),
        pch = c(21:25),
        bty = 'n',
        y.intersp = line_interspacing,
        text.width = 3
    )
}""" )
            for regression_line, i in zip(
                regression_lines,
                range(len(time_serieses))
            ):
                slope, intercept, r, p, stderr = regression_line
                if isnan(slope) or isnan(intercept):
                    spec_names[i] += "   {cannot calculate linear regression}"
                else:
                    if isnan(p):
                        p_str = "NaN"
                    else:
                        p_str = str(round_to_4_sd(p))
                    if isnan(stderr):
                        stderr_str = "NaN"
                    else:
                        stderr_str = str(round_to_4_sd(p))

                    slope_str, intercept_str, r_str = map(
                        str,
                        map(round_to_4_sd, (slope, intercept, r))
                    )

                    spec_names[i] += (
                        u"   {"
                            "y=%(slope_str)s x year %(add)s%(intercept_str)s, "
                            "r= %(r_str)s, "
                            "p= %(p_str)s, "
                            "S.E.= %(stderr_str)s"
                        "}"
                    ) % dict(
                        locals(),
                        add = [u"+ ",u""][intercept_str.startswith("-")]
                    )

            plot_chart(
                xlab = "",
                ylab = display_units,
                n = len(time_serieses),
                names = spec_names,
                axis_points = axis_points,
                axis_labels = axis_labels,
                axis_orientation = [0,2][show_months],
                plot_type= "lo"[is_yearly_values],
                width = width,
                height = height,
                # R uses Normalised Display coordinates.
                # these have been found by recursive improvement
                # they place the legend legibly. tested up to 8 lines
                total_margin_height = 150,
                line_interspacing = 1.8,
                *time_serieses
            )

            for regression_line, colour_number in zip(
                regression_lines,
                range(len(time_serieses))
            ):
                slope = regression_line[0]
                intercept = regression_line[1]
                if isnan(slope) or isnan(intercept):
                    pass
                else:
                    R.par(xpd = False)
                    R.abline(
                        intercept,
                        slope,
                        col = colour_number+1
                    )
            R("dev.off()")

            import Image, ImageEnhance

            RGBA = "RGBA"
            def reduce_opacity(image, opacity):
                """Returns an image with reduced opacity."""
                assert opacity >= 0 and opacity <= 1
                if image.mode != RGBA:
                    image = image.convert(RGBA)
                else:
                    image = image.copy()
                alpha = image.split()[3]
                alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
                image.putalpha(alpha)
                return image

            def scale_preserving_aspect_ratio(image, ratio):
                return image.resize(
                    map(int, map(ratio.__mul__, image.size))
                )

            def watermark(image, mark, position, opacity=1):
                """Adds a watermark to an image."""
                if opacity < 1:
                    mark = reduce_opacity(mark, opacity)
                if image.mode != RGBA:
                    image = image.convert(RGBA)
                # create a transparent layer the size of the
                # image and draw the watermark in that layer.
                layer = Image.new(RGBA, image.size, (0,0,0,0))
                if position == 'tile':
                    for y in range(0, image.size[1], mark.size[1]):
                        for x in range(0, image.size[0], mark.size[0]):
                            layer.paste(mark, (x, y))
                elif position == 'scale':
                    # scale, but preserve the aspect ratio
                    ratio = min(
                        float(image.size[0]) / mark.size[0],
                        float(image.size[1]) / mark.size[1]
                    )
                    w = int(mark.size[0] * ratio)
                    h = int(mark.size[1] * ratio)
                    mark = mark.resize((w, h))
                    layer.paste(
                        mark,
                        (
                            (image.size[0] - w) / 2,
                            (image.size[1] - h) / 2
                        )
                    )
                else:
                    layer.paste(mark, position)
                # composite the watermark with the layer
                return Image.composite(layer, image, layer)

            image = Image.open(file_path)
            watermark_image_path = os.path.join(
                os.path.realpath("."),
                "applications",
                current.request.application,
                "static", "img",
                "Nepal-Government-Logo.png"
            )
            watermark_image = Image.open(watermark_image_path)
            #watermark_image = scale_preserving_aspect_ratio(watermark_image, 0.5)
            watermark(image, watermark_image, 'scale', 0.05).save(file_path)

        def serialiseDate(obj):
            if isinstance(obj, (datetime.date, datetime.datetime, datetime.time)):
                return obj.isoformat()[:19].replace("T"," ")
            else:
                raise TypeError("%r is not JSON serializable" % (obj,))

        return get_cached_or_generated_file(
            "".join((
                hashlib.md5(
                    json.dumps(
                        [specs, width, height],
                        sort_keys=True,
                        default=serialiseDate,
                    )
                ).hexdigest(),
                ".png"
            )),
            generate_chart
        )

Example 41

Project: nsepy Source File: archives.py
Function: get_price_history_small
def get_price_history_small(stock, period = '+', start = '', end = '',
                        symbol_count = '',
                        proxies = {}):
    
    if type(start) == type(datetime.date(2000,1,1)):
        start = date_to_str(start)
    if type(end) == type(datetime.date(2000,1,1)):
        end = date_to_str(end)
    
    cell_cnt = 0
    row_cnt = 0
    
    DATE = 3
    PREV_CLOSE = 4
    OPEN_PRICE = 5
    HIGH_PRICE = 6
    LOW_PRICE = 7
    LAST_PRICE = 8
    CLOSE_PRICE = 9
    VWAP = 10
    TOTAL_Q =  11
    TURNOVER = 12
    TRADES = 13
    DELIVERABLE_Q = 14
    PERC_DELIVERABLE = 15
    
    text = __get_html_data_raw(symbol = stock, period = period, 
                               start = start, end = end,
                               symbol_count = symbol_count, proxies = proxies)
    soup = BeautifulSoup(text, 'html.parser')
    table_rows = soup.find_all(name = 'tr')
    arr_len = len(table_rows) - 1

    dates = np.array([0] * arr_len, dtype='datetime64[D]')    
    prev_close = np.zeros(arr_len)
    open_price = np.zeros(arr_len)
    high_price = np.zeros(arr_len)
    low_price  = np.zeros(arr_len)
    last_price = np.zeros(arr_len)
    close_price = np.zeros(arr_len)
    vwap = np.zeros(arr_len)
    total_q = np.zeros(arr_len)
    turnover = np.zeros(arr_len)
    trades = np.zeros(arr_len)
    deliverable_q = np.zeros(arr_len)
    perc_deliverable = np.zeros(arr_len)
    
    
    for row in table_rows:
        if row.get_text().find('Symbol') >= 0:
            continue        
        for cell in row.get_text().split('\n'):
            try:
                cell_val = float(cell.replace(',',''))
            except:
                cell_val = np.NaN
            if cell_cnt == DATE:
                dates[row_cnt] =  str_to_date(cell)
            if cell_cnt == PREV_CLOSE:
                prev_close[row_cnt] = cell_val
            if cell_cnt == OPEN_PRICE:
                open_price[row_cnt] = cell_val
            if cell_cnt == HIGH_PRICE:
                high_price[row_cnt] = cell_val
            if cell_cnt == LOW_PRICE:
                low_price[row_cnt] = cell_val
            if cell_cnt == LAST_PRICE:
                last_price[row_cnt] = cell_val
            if cell_cnt == CLOSE_PRICE:
                close_price[row_cnt] = cell_val
            if cell_cnt == VWAP:
                vwap[row_cnt] = cell_val
            if cell_cnt == TOTAL_Q:
                total_q[row_cnt] = cell_val
            if cell_cnt == TURNOVER:
                turnover[row_cnt] = cell_val * 100000.0
            if cell_cnt == TRADES:
                trades[row_cnt] = cell_val
            if cell_cnt == DELIVERABLE_Q:
                deliverable_q[row_cnt] = cell_val
            if cell_cnt == PERC_DELIVERABLE:
                perc_deliverable[row_cnt] = cell_val / 100.0
            cell_cnt += 1
        cell_cnt = 0
        row_cnt += 1
        
    df = pd.DataFrame(prev_close)
    df.columns = ['Previous']
    df['Open'] = open_price
    df['High'] = high_price
    df['Low'] = low_price
    df['Last'] = last_price
    df['Close'] = close_price
    df['VWAP'] = vwap
    df['Volume'] = total_q
    df['Turnover'] = turnover
    df['Trades'] = trades
    df['Deliverable Volume'] = deliverable_q
    df['Percentage Deliverables'] = perc_deliverable
    df.index = dates
    return df

Example 42

Project: silver Source File: test_subscription.py
    def test_subscription_billing_cycle_intervals(self):
        subscription = SubscriptionFactory.create()
        metered_feature = MeteredFeatureFactory.create()

        subscription.plan.metered_features.add(metered_feature)

        start_date = datetime.date(year=2015, month=2, day=17)

        subscription.start_date = start_date
        subscription.activate()
        subscription.save()

        with patch('silver.models.subscriptions.timezone') as mock_timezone:
            # Every month, 16 days of trial
            subscription.plan.interval = Plan.INTERVALS.MONTH
            subscription.plan.interval_count = 1
            subscription.plan.save()

            subscription.trial_end = (subscription.start_date +
                                      datetime.timedelta(days=15))
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=2, day=28)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2015, month=3, day=1)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=3, day=31)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2015, month=4, day=1)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=4, day=30)
            assert end_date == subscription.current_end_date

            # Every 2 months, 5 months of trial (2015-05-30)
            subscription.plan.interval = Plan.INTERVALS.MONTH
            subscription.plan.interval_count = 2
            subscription.plan.save()

            subscription.start_date = datetime.date(year=2014, month=12, day=31)
            subscription.trial_end = (subscription.start_date +
                                      datetime.timedelta(days=150))
            subscription.save()

            start_date = subscription.start_date
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2014, month=12, day=31)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2015, month=1, day=1)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=2, day=28)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2015, month=3, day=1)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=4, day=30)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2015, month=5, day=1)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=6, day=30)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2015, month=7, day=1)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=8, day=31)
            assert end_date == subscription.current_end_date

            # Every 2 weeks, 8 days of trial
            subscription.plan.interval = Plan.INTERVALS.WEEK
            subscription.plan.interval_count = 2
            subscription.plan.save()

            subscription.start_date = datetime.date(year=2015, month=5, day=31)
            subscription.trial_end = (subscription.start_date +
                                      datetime.timedelta(days=7))
            subscription.save()

            start_date = subscription.start_date
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=5, day=31)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2015, month=6, day=1)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=6, day=14)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2015, month=6, day=15)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=6, day=28)
            assert end_date == subscription.current_end_date

            # Every year, 3 months (90 days) of trial
            subscription.plan.interval = Plan.INTERVALS.YEAR
            subscription.plan.interval_count = 1
            subscription.plan.save()

            subscription.start_date = datetime.date(year=2015, month=2, day=2)
            subscription.trial_end = (subscription.start_date +
                                      datetime.timedelta(days=90))
            subscription.save()

            start_date = subscription.start_date
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2015, month=12, day=31)
            assert end_date == subscription.current_end_date

            start_date = datetime.date(year=2016, month=1, day=1)
            mock_timezone.now.return_value = datetime.datetime.combine(
                start_date, datetime.datetime.min.time())
            assert start_date == subscription.current_start_date

            end_date = datetime.date(year=2016, month=12, day=31)
            assert end_date == subscription.current_end_date

Example 43

Project: django-adminactions Source File: mass_update.py
def mass_update(modeladmin, request, queryset):  # noqa
    """
        mass update queryset
    """

    def not_required(field, **kwargs):
        """ force all fields as not required"""
        kwargs['required'] = False
        return field.formfield(**kwargs)

    def _doit():
        errors = {}
        updated = 0
        for record in queryset:
            for field_name, value_or_func in list(form.cleaned_data.items()):
                if callable(value_or_func):
                    old_value = getattr(record, field_name)
                    setattr(record, field_name, value_or_func(old_value))
                else:
                    setattr(record, field_name, value_or_func)
            if clean:
                record.clean()
            record.save()
            updated += 1
        if updated:
            messages.info(request, _("Updated %s records") % updated)

        if len(errors):
            messages.error(request, "%s records not updated due errors" % len(errors))
        adminaction_end.send(sender=modeladmin.model,
                             action='mass_update',
                             request=request,
                             queryset=queryset,
                             modeladmin=modeladmin,
                             form=form,
                             errors=errors,
                             updated=updated)

    opts = modeladmin.model._meta
    perm = "{0}.{1}".format(opts.app_label, get_permission_codename('adminactions_massupdate', opts))
    if not request.user.has_perm(perm):
        messages.error(request, _('Sorry you do not have rights to execute this action'))
        return

    try:
        adminaction_requested.send(sender=modeladmin.model,
                                   action='mass_update',
                                   request=request,
                                   queryset=queryset,
                                   modeladmin=modeladmin)
    except ActionInterrupted as e:
        messages.error(request, str(e))
        return

    # Allows to specified a custom mass update Form in the ModelAdmin
    mass_update_form = getattr(modeladmin, 'mass_update_form', MassUpdateForm)

    MForm = modelform_factory(modeladmin.model, form=mass_update_form,
                              exclude=('pk',),
                              formfield_callback=not_required)
    grouped = defaultdict(lambda: [])
    selected_fields = []
    initial = {'_selected_action': request.POST.getlist(helpers.ACTION_CHECKBOX_NAME),
               'select_across': request.POST.get('select_across') == '1',
               'action': 'mass_update'}

    if 'apply' in request.POST:
        form = MForm(request.POST)
        if form.is_valid():
            try:
                adminaction_start.send(sender=modeladmin.model,
                                       action='mass_update',
                                       request=request,
                                       queryset=queryset,
                                       modeladmin=modeladmin,
                                       form=form)
            except ActionInterrupted as e:
                messages.error(request, str(e))
                return HttpResponseRedirect(request.get_full_path())

            # need_transaction = form.cleaned_data.get('_unique_transaction', False)
            validate = form.cleaned_data.get('_validate', False)
            clean = form.cleaned_data.get('_clean', False)

            if validate:
                with compat.atomic():
                    _doit()

            else:
                values = {}
                for field_name, value in list(form.cleaned_data.items()):
                    if isinstance(form.fields[field_name], ModelMultipleChoiceField):
                        messages.error(request, "Unable no mass update ManyToManyField without 'validate'")
                        return HttpResponseRedirect(request.get_full_path())
                    elif callable(value):
                        messages.error(request, "Unable no mass update using operators without 'validate'")
                        return HttpResponseRedirect(request.get_full_path())
                    elif field_name not in ['_selected_action', '_validate', 'select_across', 'action',
                                            '_unique_transaction', '_clean']:
                        values[field_name] = value
                queryset.update(**values)

            return HttpResponseRedirect(request.get_full_path())
    else:
        initial.update({'action': 'mass_update', '_validate': 1})
        # form = MForm(initial=initial)
        prefill_with = request.POST.get('prefill-with', None)
        prefill_instance = None
        try:
            # Gets the instance directly from the queryset for data security
            prefill_instance = queryset.get(pk=prefill_with)
        except ObjectDoesNotExist:
            pass

        form = MForm(initial=initial, instance=prefill_instance)

    for el in queryset.all()[:10]:
        for f in modeladmin.model._meta.fields:
            if f.name not in form._no_sample_for:
                if hasattr(f, 'flatchoices') and f.flatchoices:
                    grouped[f.name] = list(dict(getattr(f, 'flatchoices')).values())
                elif hasattr(f, 'choices') and f.choices:
                    grouped[f.name] = list(dict(getattr(f, 'choices')).values())
                elif isinstance(f, df.BooleanField):
                    grouped[f.name] = [True, False]
                else:
                    value = getattr(el, f.name)
                    if value is not None and value not in grouped[f.name]:
                        grouped[f.name].append(value)
                    initial[f.name] = initial.get(f.name, value)

    adminForm = helpers.AdminForm(form, modeladmin.get_fieldsets(request), {}, [], model_admin=modeladmin)
    media = modeladmin.media + adminForm.media
    dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime.date) else str(obj)
    tpl = 'adminactions/mass_update.html'
    ctx = {'adminform': adminForm,
           'form': form,
           'action_short_description': mass_update.short_description,
           'title': u"%s (%s)" % (
               mass_update.short_description.capitalize(),
               smart_text(modeladmin.opts.verbose_name_plural),
           ),
           'grouped': grouped,
           'fieldvalues': json.dumps(grouped, default=dthandler),
           'change': True,
           'selected_fields': selected_fields,
           'is_popup': False,
           'save_as': False,
           'has_delete_permission': False,
           'has_add_permission': False,
           'has_change_permission': True,
           'opts': modeladmin.model._meta,
           'app_label': modeladmin.model._meta.app_label,
           # 'action': 'mass_update',
           # 'select_across': request.POST.get('select_across')=='1',
           'media': mark_safe(media),
           'selection': queryset}
    if django.VERSION[:2] > (1, 7):
        ctx.update(modeladmin.admin_site.each_context(request))
    else:
        ctx.update(modeladmin.admin_site.each_context())

    if django.VERSION[:2] > (1, 8):
        return render(request, tpl, context=ctx)
    else:
        return render_to_response(tpl, RequestContext(request, ctx))

Example 44

Project: eve-central.com Source File: market_stat.py
def plot_basket(basket_data, basket, dayrange=60, domoving=True, dobuysell=True, sellonly=False):
    days = basket_data['byday'].keys()
    days.sort()
    days = days[-dayrange:]


    fdays = []

    avg = array(zeros(len(days)), dtype=float)
    avg_s = array(zeros(len(days)), dtype=float)
    avg_b = array(zeros(len(days)), dtype=float)

    cday = 0

    for day in days:
	n = len(basket_data['byday'][day].keys())
	n += 0.0
	s = 0.0
	s_s = 0.0
	s_b = 0.0

	bail_today = False

	for type in basket_data['byday'][day].keys():
	    weight = get_weight(basket_data['contents'], type)

	    try:
		s += basket_data['byday'][day][type][0][0] * weight
		s_b += basket_data['byday'][day][type][1][0] * weight
		s_s += basket_data['byday'][day][type][2][0] * weight
	    except:
		bail_today = True


	fdays.append(datetime.date(day.year, day.month, day.day))


	if bail_today or s < 0.05 or s_s < 0.05 or s_b < 0.05:
	    if cday == 0:
		avg[cday] = 0
		avg_s[cday] = 0
		avg_b[cday] = 0
	    else:

		avg[cday] = avg[cday-1]
		avg_s[cday] = avg_s[cday-1]
		avg_b[cday] = avg_b[cday-1]
	    cday += 1
	    continue



	avg[cday] = s/n
	avg_s[cday] = s_s/n
	avg_b[cday] = s_b/n




	cday += 1



    if domoving:
	moving5 = build_moving5(days, avg)
	moving5_b = build_moving5(days, avg_b)
	moving5_s = build_moving5(days, avg_s)


    fdays = matplotlib.dates.date2num(fdays)



    ax = pylab.subplot(111)

    if not sellonly:
        pylab.plot_date(fdays,avg,'b-')


    if dobuysell and not sellonly:
	pylab.plot_date(fdays,avg_s,'c-')
	pylab.plot_date(fdays,avg_b,'r-')

    if sellonly:
        pylab.plot_date(fdays,avg_s,'c-')
        pylab.plot_date(fdays[4:],moving5_s,'g--')

    if domoving and not sellonly:
	pylab.plot_date(fdays[4:],moving5,'g-')
	if dobuysell:
	    pylab.plot_date(fdays[4:],moving5_s,'g--')
	    pylab.plot_date(fdays[4:],moving5_b,'g--')

    

    ymin = 0
    ymax = 0
    if dobuysell and not sellonly:
	ymin = min([min(avg_s),min(avg), min(avg_b)])
	ymax = max([max(avg), max(avg_s), max(avg_b)])
    elif not sellonly:
	ymin = min(avg)
	ymax = max(avg)
    elif sellonly:

        ymin = min(avg_s)
        ymax = max(avg_s)
        print "Sellonly",ymin,ymax


    interval = None
    if dayrange == 60:
	interval = 12
    elif dayrange == 180:
	interval = 36
    else:
	interval = 6

    ax.xaxis.set_major_locator(matplotlib.dates.DayLocator(interval=interval))
    ax.xaxis.set_minor_locator(matplotlib.dates.DayLocator(interval=1))
    ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%D'))
    ax.autoscale_view()
    pylab.grid(True)
    pylab.ylim(ymin-(ymin/32),ymax+(ymax/32))
    pylab.ylabel("Index Price (ISK)")
    pylab.title("Market Index: "+basket+", Past " +`dayrange`+" days")

    other = ""

    if sellonly:
        other = other + "-sellonly"
    else:
        if not dobuysell:
            other = other + "-nobuysell"
        if not domoving:
            other = other + "-nomoving"
    try:
        pylab.savefig('/www/eve-central.com/static_web/reports/'+basket+'-'+`dayrange`+other+'.png')
    except:
        print "Can't save figure... skipping for now"
    pylab.close()

Example 45

Project: ion Source File: attendance.py
@eighth_admin_required
def delinquent_students_view(request):
    lower_absence_limit = request.GET.get("lower", "")
    upper_absence_limit = request.GET.get("upper", "")

    include_freshmen = (request.GET.get("freshmen", "off") == "on")
    include_sophumores = (request.GET.get("sophumores", "off") == "on")
    include_juniors = (request.GET.get("juniors", "off") == "on")
    include_seniors = (request.GET.get("seniors", "off") == "on")

    if not request.META["QUERY_STRING"]:
        include_freshmen = True
        include_sophumores = True
        include_juniors = True
        include_seniors = True

    start_date = request.GET.get("start", "")
    end_date = request.GET.get("end", "")

    if not lower_absence_limit.isdigit():
        lower_absence_limit = ""
        lower_absence_limit_filter = 1
    else:
        lower_absence_limit_filter = lower_absence_limit

    if not upper_absence_limit.isdigit():
        upper_absence_limit = ""
        upper_absence_limit_filter = 1000
    else:
        upper_absence_limit_filter = upper_absence_limit

    try:
        start_date = datetime.strptime(start_date, "%Y-%m-%d")
        start_date_filter = start_date
    except ValueError:
        start_date = ""
        start_date_filter = date(MINYEAR, 1, 1)

    try:
        end_date = datetime.strptime(end_date, "%Y-%m-%d")
        end_date_filter = end_date
    except ValueError:
        end_date = ""
        end_date_filter = date(MAXYEAR, 12, 31)

    context = {
        "lower_absence_limit": lower_absence_limit,
        "upper_absence_limit": upper_absence_limit,
        "include_freshmen": include_freshmen,
        "include_sophumores": include_sophumores,
        "include_juniors": include_juniors,
        "include_seniors": include_seniors,
        "start_date": start_date,
        "end_date": end_date
    }

    query_params = ["lower", "upper", "freshmen", "sophumores", "juniors", "seniors", "start", "end"]

    if set(request.GET.keys()).intersection(set(query_params)):
        # attendance MUST have been taken on the activity for the absence to be valid
        non_delinquents = []
        delinquents = []
        if int(upper_absence_limit_filter) == 0 or int(lower_absence_limit_filter) == 0:
            users_with_absence = (EighthSignup.objects.filter(
                was_absent=True, scheduled_activity__attendance_taken=True, scheduled_activity__block__date__gte=start_date_filter,
                scheduled_activity__block__date__lte=end_date_filter).values("user").annotate(absences=Count("user")).filter(absences__gte=1)
                .values("user", "absences").order_by("user"))

            uids_with_absence = [row["user"] for row in users_with_absence]
            all_students = User.objects.get_students().values_list("id")
            uids_all_students = [row[0] for row in all_students]
            uids_without_absence = set(uids_all_students) - set(uids_with_absence)
            users_without_absence = User.objects.filter(id__in=uids_without_absence).order_by("id")
            non_delinquents = []
            for usr in users_without_absence:
                non_delinquents.append({"absences": 0, "user": usr})

            logger.debug(non_delinquents)

        if int(upper_absence_limit_filter) > 0:
            delinquents = (EighthSignup.objects.filter(
                was_absent=True, scheduled_activity__attendance_taken=True, scheduled_activity__block__date__gte=start_date_filter,
                scheduled_activity__block__date__lte=end_date_filter).values("user").annotate(absences=Count("user"))
                .filter(absences__gte=lower_absence_limit_filter,
                        absences__lte=upper_absence_limit_filter).values("user", "absences").order_by("user"))

            user_ids = [d["user"] for d in delinquents]
            delinquent_users = User.objects.filter(id__in=user_ids).order_by("id")
            for index, user in enumerate(delinquent_users):
                delinquents[index]["user"] = user
            logger.debug(delinquents)

            delinquents = list(delinquents)

        delinquents += non_delinquents

        def filter_by_grade(delinquent):
            grade = delinquent["user"].grade.number
            include = False
            if include_freshmen:
                include |= (grade == 9)
            if include_sophumores:
                include |= (grade == 10)
            if include_juniors:
                include |= (grade == 11)
            if include_seniors:
                include |= (grade == 12)
            return include

        delinquents = list(filter(filter_by_grade, delinquents))
        # most absences at top
        delinquents = sorted(delinquents, key=lambda x: (-1 * x["absences"], x["user"].last_name))

        logger.debug(delinquents)
    else:
        delinquents = None

    context["delinquents"] = delinquents

    if request.resolver_match.url_name == "eighth_admin_view_delinquent_students":
        context["admin_page_title"] = "Delinquent Students"
        return render(request, "eighth/admin/delinquent_students.html", context)
    else:
        response = http.HttpResponse(content_type="text/csv")
        response["Content-Disposition"] = "attachment; filename=\"delinquent_students.csv\""

        writer = csv.writer(response)
        writer.writerow(["Start Date", "End Date", "Absences", "Last Name", "First Name", "Student ID", "Grade", "Counselor", "TJ Email",
                         "Other Email"])

        for delinquent in delinquents:
            row = []
            row.append(str(start_date).split(" ", 1)[0])
            row.append(str(end_date).split(" ", 1)[0])
            row.append(delinquent["absences"])
            row.append(delinquent["user"].last_name)
            row.append(delinquent["user"].first_name)
            row.append(delinquent["user"].student_id)
            row.append(delinquent["user"].grade.number)
            counselor = delinquent["user"].counselor
            row.append(counselor.last_name if counselor else "")
            row.append("{}".format(delinquent["user"].tj_email))
            row.append(delinquent["user"].emails[0] if delinquent["user"].emails and len(delinquent["user"].emails) > 0 else "")
            writer.writerow(row)

        return response

Example 46

Project: django-dbsettings Source File: tests.py
    def test_settings(self):
        "Make sure settings groups are initialized properly"

        # Settings already in the database are available immediately
        self.assertEqual(Populated.settings.boolean, True)
        self.assertEqual(Populated.settings.integer, 42)
        self.assertEqual(Populated.settings.string, 'Ni!')
        self.assertEqual(Populated.settings.list_semi_colon, ['[email protected]', '[email protected]', '[email protected]'])
        self.assertEqual(Populated.settings.list_comma, ['[email protected]', '[email protected]', '[email protected]'])
        self.assertEqual(Populated.settings.date, datetime.date(2012, 6, 28))
        self.assertEqual(Populated.settings.time, datetime.time(16, 19, 17))
        self.assertEqual(Populated.settings.datetime, datetime.datetime(2012, 6, 28, 16, 19, 17))

        # Module settings are kept separate from model settings
        self.assertEqual(module_settings.boolean, False)
        self.assertEqual(module_settings.integer, 14)
        self.assertEqual(module_settings.string, 'Module')
        self.assertEqual(module_settings.list_semi_colon, ['[email protected]', '[email protected]', '[email protected]'])
        self.assertEqual(module_settings.list_comma, ['[email protected]', '[email protected]', '[email protected]'])
        self.assertEqual(module_settings.date, datetime.date(2011, 5, 27))
        self.assertEqual(module_settings.time, datetime.time(15, 18, 16))
        self.assertEqual(module_settings.datetime, datetime.datetime(2011, 5, 27, 15, 18, 16))

        # Settings can be added together
        self.assertEqual(Combined.settings.boolean, False)
        self.assertEqual(Combined.settings.integer, 1138)
        self.assertEqual(Combined.settings.string, 'THX')
        self.assertEqual(Combined.settings.enabled, True)
        self.assertEqual(Combined.settings.list_semi_colon, ['[email protected]', '[email protected]', '[email protected]'])
        self.assertEqual(Combined.settings.list_comma, ['[email protected]', '[email protected]', '[email protected]'])
        self.assertEqual(Combined.settings.date, datetime.date(2010, 4, 26))
        self.assertEqual(Combined.settings.time, datetime.time(14, 17, 15))
        self.assertEqual(Combined.settings.datetime, datetime.datetime(2010, 4, 26, 14, 17, 15))

        # Settings not in the database use empty defaults
        self.assertEqual(Unpopulated.settings.boolean, False)
        self.assertEqual(Unpopulated.settings.integer, None)
        self.assertEqual(Unpopulated.settings.string, '')
        self.assertEqual(Unpopulated.settings.list_semi_colon, [])
        self.assertEqual(Unpopulated.settings.list_comma, [])

        # ...Unless a default parameter was specified, then they use that
        self.assertEqual(Defaults.settings.boolean, True)
        self.assertEqual(Defaults.settings.boolean_false, False)
        self.assertEqual(Defaults.settings.integer, 1)
        self.assertEqual(Defaults.settings.string, 'default')
        self.assertEqual(Defaults.settings.list_semi_colon, ['one', 'two'])
        self.assertEqual(Defaults.settings.list_comma, ['one', 'two'])
        self.assertEqual(Defaults.settings.date, datetime.date(2012, 3, 14))
        self.assertEqual(Defaults.settings.time, datetime.time(12, 3, 14))
        self.assertEqual(Defaults.settings.datetime, datetime.datetime(2012, 3, 14, 12, 3, 14))

        # Settings should be retrieved in the order of definition
        self.assertEqual(Populated.settings.keys(),
                         ['boolean', 'integer', 'string', 'list_semi_colon',
                          'list_comma', 'date', 'time', 'datetime'])
        self.assertEqual(Combined.settings.keys(),
                         ['boolean', 'integer', 'string', 'list_semi_colon',
                          'list_comma', 'date', 'time', 'datetime', 'enabled'])

        # Values should be coerced to the proper Python types
        self.assertTrue(isinstance(Populated.settings.boolean, bool))
        self.assertTrue(isinstance(Populated.settings.integer, int))
        self.assertTrue(isinstance(Populated.settings.string, six.string_types))

        # Settings can not be accessed directly from models, only instances
        self.assertRaises(AttributeError, lambda: Populated().settings)
        self.assertRaises(AttributeError, lambda: Unpopulated().settings)

        # Updates are reflected in the live settings
        loading.set_setting_value(MODULE_NAME, 'Unpopulated', 'boolean', True)
        loading.set_setting_value(MODULE_NAME, 'Unpopulated', 'integer', 13)
        loading.set_setting_value(MODULE_NAME, 'Unpopulated', 'string', 'Friday')
        loading.set_setting_value(MODULE_NAME, 'Unpopulated', 'list_semi_colon',
                                  '[email protected];[email protected]')
        loading.set_setting_value(MODULE_NAME, 'Unpopulated', 'list_comma',
                                  '[email protected],[email protected]')
        # for date/time you can specify string (as above) or proper object
        loading.set_setting_value(MODULE_NAME, 'Unpopulated', 'date',
                                  datetime.date(1912, 6, 23))
        loading.set_setting_value(MODULE_NAME, 'Unpopulated', 'time',
                                  datetime.time(1, 2, 3))
        loading.set_setting_value(MODULE_NAME, 'Unpopulated', 'datetime',
                                  datetime.datetime(1912, 6, 23, 1, 2, 3))

        self.assertEqual(Unpopulated.settings.boolean, True)
        self.assertEqual(Unpopulated.settings.integer, 13)
        self.assertEqual(Unpopulated.settings.string, 'Friday')
        self.assertEqual(Unpopulated.settings.list_semi_colon, ['[email protected]', '[email protected]'])
        self.assertEqual(Unpopulated.settings.list_comma, ['[email protected]', '[email protected]'])
        self.assertEqual(Unpopulated.settings.date, datetime.date(1912, 6, 23))
        self.assertEqual(Unpopulated.settings.time, datetime.time(1, 2, 3))
        self.assertEqual(Unpopulated.settings.datetime, datetime.datetime(1912, 6, 23, 1, 2, 3))

        # Updating settings with defaults
        loading.set_setting_value(MODULE_NAME, 'Defaults', 'boolean', False)
        self.assertEqual(Defaults.settings.boolean, False)
        loading.set_setting_value(MODULE_NAME, 'Defaults', 'boolean_false', True)
        self.assertEqual(Defaults.settings.boolean_false, True)

        # Updating blankable settings
        self.assertEqual(Blankable.settings.string, '')
        loading.set_setting_value(MODULE_NAME, 'Blankable', 'string', 'Eli')
        self.assertEqual(Blankable.settings.string, 'Eli')
        loading.set_setting_value(MODULE_NAME, 'Blankable', 'string', '')
        self.assertEqual(Blankable.settings.string, '')

        # And they can be modified in-place
        Unpopulated.settings.boolean = False
        Unpopulated.settings.integer = 42
        Unpopulated.settings.string = 'Caturday'
        Unpopulated.settings.date = datetime.date(1939, 9, 1)
        Unpopulated.settings.time = '03:47:00'
        Unpopulated.settings.datetime = datetime.datetime(1939, 9, 1, 3, 47, 0)
        # Test correct stripping while we're at it.
        Unpopulated.settings.list_semi_colon = '[email protected]; [email protected]'
        Unpopulated.settings.list_comma = '[email protected] ,[email protected]'
        self.assertEqual(Unpopulated.settings.boolean, False)
        self.assertEqual(Unpopulated.settings.integer, 42)
        self.assertEqual(Unpopulated.settings.string, 'Caturday')
        self.assertEqual(Unpopulated.settings.list_semi_colon, ['[email protected]', '[email protected]'])
        self.assertEqual(Unpopulated.settings.list_comma, ['[email protected]', '[email protected]'])
        self.assertEqual(Unpopulated.settings.date, datetime.date(1939, 9, 1))
        self.assertEqual(Unpopulated.settings.time, datetime.time(3, 47, 0))
        self.assertEqual(Unpopulated.settings.datetime, datetime.datetime(1939, 9, 1, 3, 47, 0))

Example 47

Project: silver Source File: test_subscription.py
    def test_subscription_mf_units_log_intervals(self):
        subscription = SubscriptionFactory.create()
        metered_feature = MeteredFeatureFactory.create()

        subscription.plan.metered_features.add(metered_feature)

        subscription.start_date = datetime.date(year=2015, month=2, day=17)
        subscription.activate()
        subscription.save()

        # Every month, 16 days of trial
        subscription.plan.interval = Plan.INTERVALS.MONTH
        subscription.plan.interval_count = 1
        subscription.plan.save()

        subscription.trial_end = (subscription.start_date +
                                  datetime.timedelta(days=15))

        start_date = subscription.start_date
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=2, day=17))

        end_date = datetime.date(year=2015, month=2, day=28)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=2, day=23))

        start_date = datetime.date(year=2015, month=3, day=1)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=3, day=1))

        end_date = datetime.date(year=2015, month=3, day=4)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=3, day=1))

        start_date = datetime.date(year=2015, month=3, day=5)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=3, day=5))

        end_date = datetime.date(year=2015, month=3, day=31)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=3, day=22))

        start_date = datetime.date(year=2015, month=4, day=1)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=4, day=5))

        end_date = datetime.date(year=2015, month=4, day=30)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=4, day=22))

        # Every 2 months, 5 months of trial (2015-05-30)
        subscription.plan.interval = Plan.INTERVALS.MONTH
        subscription.plan.interval_count = 2
        subscription.plan.save()

        subscription.start_date = datetime.date(year=2014, month=12, day=31)
        subscription.trial_end = (subscription.start_date +
                                  datetime.timedelta(days=150))
        subscription.save()

        start_date = datetime.date(year=2014, month=12, day=31)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2014, month=12, day=31))

        end_date = datetime.date(year=2014, month=12, day=31)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2014, month=12, day=31))

        start_date = datetime.date(year=2015, month=1, day=1)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=1, day=1))

        end_date = datetime.date(year=2015, month=1, day=31)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=1, day=1))

        start_date = datetime.date(year=2015, month=3, day=1)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=3, day=23))

        end_date = datetime.date(year=2015, month=4, day=30)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=4, day=30))

        start_date = datetime.date(year=2015, month=5, day=1)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=5, day=23))

        end_date = datetime.date(year=2015, month=5, day=30)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=5, day=30))

        start_date = datetime.date(year=2015, month=6, day=1)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=6, day=1))

        end_date = datetime.date(year=2015, month=6, day=30)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=6, day=1))

        # Every 2 weeks, 8 days of trial
        subscription.plan.interval = Plan.INTERVALS.WEEK
        subscription.plan.interval_count = 2
        subscription.plan.save()

        subscription.start_date = datetime.date(year=2015, month=5, day=31)
        subscription.trial_end = (subscription.start_date +
                                  datetime.timedelta(days=7))
        subscription.save()

        start_date = datetime.date(year=2015, month=5, day=31)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=5, day=31))

        end_date = datetime.date(year=2015, month=5, day=31)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=5, day=31))

        start_date = datetime.date(year=2015, month=6, day=1)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=6, day=1))

        end_date = datetime.date(year=2015, month=6, day=7)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=6, day=1))

        start_date = datetime.date(year=2015, month=6, day=8)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=6, day=8))

        end_date = datetime.date(year=2015, month=6, day=14)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=6, day=8))

        start_date = datetime.date(year=2015, month=6, day=15)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=6, day=15))

        end_date = datetime.date(year=2015, month=6, day=28)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=6, day=28))

        # Every year, 3 months (90 days) of trial
        subscription.plan.interval = Plan.INTERVALS.YEAR
        subscription.plan.interval_count = 1
        subscription.plan.save()

        subscription.start_date = datetime.date(year=2015, month=2, day=2)
        subscription.trial_end = (subscription.start_date +
                                  datetime.timedelta(days=90))
        subscription.save()

        start_date = subscription.start_date
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=2, day=2)
        )

        end_date = datetime.date(year=2015, month=5, day=3)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=2, day=2))

        start_date = datetime.date(year=2015, month=5, day=4)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2015, month=5, day=4))

        end_date = datetime.date(year=2015, month=12, day=31)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2015, month=5, day=5))

        start_date = datetime.date(year=2016, month=1, day=1)
        assert start_date == subscription.bucket_start_date(
            reference_date=datetime.date(year=2016, month=1, day=1))

        end_date = datetime.date(year=2016, month=12, day=31)
        assert end_date == subscription.bucket_end_date(
            reference_date=datetime.date(year=2016, month=12, day=31))

Example 48

Project: django-admin-timeline Source File: views.py
@csrf_exempt
@never_cache
@staff_member_required
def log(request, template_name=TEMPLATE_NAME, \
        template_name_ajax=TEMPLATE_NAME_AJAX):
    """
    Get number of log entires. Serves both non-AJAX and AJAX driven requests.

    Since we have a breakdown of entries per day per entry and we have an AJAX
    driven infinite scroll and we want to avoid having duplicated date headers,
    we always pass a variable named "last_date" when making another request
    to our main AJAX-driven view. So... this is our case scenario:

    Initial timeline rendered as a normal HTML (non AJAX request) (from a list
    of log entries). We send date of last element as "last_date" to the context
    too, which will be used an an initial value for a global JavaScript
    variable. Later on that date will be used to send it to the AJAX driven
    view and used in rendering ("render_to_string" method). After we have
    rendered the HTML to send back, we get the last date of the last element
    and send it along with the HTML rendered to our view in JSON response.
    When receiving the JSON response, we update the above mentioned global
    JavaScript variable with the value given.

    :param request: django.http.HttpRequest
    :param template_name: str
    :param template_name_ajax: str
    :return: django.http.HttpResponse

    This view accepts the following POST variables (all optional).
    :param page: int - Page number to get.
    :param user_id: int - If set, used to filter the user by.
    :param last_date: str - Example value "2012-05-24".
    :param start_date: str - If set, used as a start date to filter the actions
        with. Example value "2012-05-24".
    :param end_date: str - If set, used as an end date to filter the actions
        with. Example value "2012-05-24".

    NOTE: If it gets too complicatd with filtering, we need to have forms to
    validate and process the POST data.
    """
    def _get_date_from_string(s):
        """
        Gets date from a string given.

        :param s: str - date in string format
        :return: datetime.datetime
        """
        try:
            return datetime.date(*map(lambda x: int(x), s.split("-")))
        except Exception as e:
            return ""

    try:
        page = int(request.POST.get('page', 1))
        if page < 1:
            page = 1
    except Exception as e:
        page = 1

    users = []
    content_types = []
    filter_form = None

    if 'POST' == request.method:
        post = dict(request.POST)
        if 'users[]' in post:
            post['users'] = post.pop('users[]')
        if 'content_types[]' in post:
            post['content_types'] = post.pop('content_types[]')

        filter_form = FilterForm(post)
        if filter_form.is_valid():
            users = filter_form.cleaned_data['users']
            content_types = filter_form.cleaned_data['content_types']
        else:
            pass # Anything to do here?
    else:
        filter_form = FilterForm()

    # Some kind of a pagination
    start = (page - 1) * NUMBER_OF_ENTRIES_PER_PAGE
    end = page * NUMBER_OF_ENTRIES_PER_PAGE

    # Getting admin log entires taking page number into consideration.
    log_entries = LogEntry.objects.all().select_related('content_type', 'user')

    start_date = _get_date_from_string(request.POST.get('start_date'))
    end_date = _get_date_from_string(request.POST.get('end_date'))

    if start_date:
        log_entries = log_entries.filter(action_time__gte=start_date) # TODO

    if end_date:
        log_entries = log_entries.filter(action_time__lte=end_date) # TODO

    # If users given, filtering by users
    if users:
        log_entries = log_entries.filter(user__id__in=users)

    # If content types given, filtering by content types
    if content_types:
        log_entries = log_entries.filter(content_type__id__in=content_types)

    # Applying limits / freezing the queryset
    log_entries = log_entries[start:end]

    if log_entries:
        last_date = date_format(
            log_entries[len(log_entries) - 1].action_time, "Y-m-d"
            )
    else:
        last_date = request.POST.get('last_date', None)

    # Using different template for AJAX driven requests
    if request.is_ajax():
        # Context to render the AJAX driven HTML with
        context = {
            'admin_log': log_entries,
            'number_of_entries_per_page': NUMBER_OF_ENTRIES_PER_PAGE,
            'page': page,
            'last_date': request.POST.get('last_date', None),
            'SINGLE_LOG_ENTRY_DATE_FORMAT': SINGLE_LOG_ENTRY_DATE_FORMAT,
            'LOG_ENTRIES_DAY_HEADINGS_DATE_FORMAT': \
                LOG_ENTRIES_DAY_HEADINGS_DATE_FORMAT
        }

        # Rendering HTML for an AJAX driven request
        html = render_to_string(
            template_name_ajax,
            context,
            context_instance=RequestContext(request)
        )

        # Context to send back to user in a JSON response
        context = {
            'html': html,
            'last_date': last_date,
            'success': 1 if len(log_entries) else 0
        }
        return HttpResponse(json.dumps(context))

    # Context for a non-AJAX request
    context = {
        'admin_log': log_entries,
        'number_of_entries_per_page': NUMBER_OF_ENTRIES_PER_PAGE,
        'page': page,
        'last_date': last_date,
        'start_date': date_format(start_date, "Y-m-d") if start_date else "",
        'end_date': date_format(end_date, "Y-m-d") if end_date else "",
        'users': [int(u) for u in users],
        'content_types': [int(ct) for ct in content_types],
        'filter_form': filter_form,
        'SINGLE_LOG_ENTRY_DATE_FORMAT': SINGLE_LOG_ENTRY_DATE_FORMAT,
        'LOG_ENTRIES_DAY_HEADINGS_DATE_FORMAT': \
            LOG_ENTRIES_DAY_HEADINGS_DATE_FORMAT,
        'title': _("Timeline") # For template breadcrumbs, etc.
    }

    return render_to_response(
        template_name, context, context_instance=RequestContext(request)
        )

Example 49

Project: pymo Source File: test_str.py
Function: test_format
    def test_format(self):
        self.assertEqual(''.format(), '')
        self.assertEqual('a'.format(), 'a')
        self.assertEqual('ab'.format(), 'ab')
        self.assertEqual('a{{'.format(), 'a{')
        self.assertEqual('a}}'.format(), 'a}')
        self.assertEqual('{{b'.format(), '{b')
        self.assertEqual('}}b'.format(), '}b')
        self.assertEqual('a{{b'.format(), 'a{b')

        # examples from the PEP:
        import datetime
        self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
        self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
                         "My name is Fred")
        self.assertEqual("My name is {0} :-{{}}".format('Fred'),
                         "My name is Fred :-{}")

        d = datetime.date(2007, 8, 18)
        self.assertEqual("The year is {0.year}".format(d),
                         "The year is 2007")

        # classes we'll use for testing
        class C:
            def __init__(self, x=100):
                self._x = x
            def __format__(self, spec):
                return spec

        class D:
            def __init__(self, x):
                self.x = x
            def __format__(self, spec):
                return str(self.x)

        # class with __str__, but no __format__
        class E:
            def __init__(self, x):
                self.x = x
            def __str__(self):
                return 'E(' + self.x + ')'

        # class with __repr__, but no __format__ or __str__
        class F:
            def __init__(self, x):
                self.x = x
            def __repr__(self):
                return 'F(' + self.x + ')'

        # class with __format__ that forwards to string, for some format_spec's
        class G:
            def __init__(self, x):
                self.x = x
            def __str__(self):
                return "string is " + self.x
            def __format__(self, format_spec):
                if format_spec == 'd':
                    return 'G(' + self.x + ')'
                return object.__format__(self, format_spec)

        # class that returns a bad type from __format__
        class H:
            def __format__(self, format_spec):
                return 1.0

        class I(datetime.date):
            def __format__(self, format_spec):
                return self.strftime(format_spec)

        class J(int):
            def __format__(self, format_spec):
                return int.__format__(self * 2, format_spec)


        self.assertEqual(''.format(), '')
        self.assertEqual('abc'.format(), 'abc')
        self.assertEqual('{0}'.format('abc'), 'abc')
        self.assertEqual('{0:}'.format('abc'), 'abc')
        self.assertEqual('X{0}'.format('abc'), 'Xabc')
        self.assertEqual('{0}X'.format('abc'), 'abcX')
        self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
        self.assertEqual('{1}'.format(1, 'abc'), 'abc')
        self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
        self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
        self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
        self.assertEqual('{0}'.format(-15), '-15')
        self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
        self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
        self.assertEqual('{{'.format(), '{')
        self.assertEqual('}}'.format(), '}')
        self.assertEqual('{{}}'.format(), '{}')
        self.assertEqual('{{x}}'.format(), '{x}')
        self.assertEqual('{{{0}}}'.format(123), '{123}')
        self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
        self.assertEqual('}}{{'.format(), '}{')
        self.assertEqual('}}x{{'.format(), '}x{')

        # weird field names
        self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
        self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
        self.assertEqual("{0[ ]}".format({' ':3}), '3')

        self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
        self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
        self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
        self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
        self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
        self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
        self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')

        # strings
        self.assertEqual('{0:.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:.3s}'.format('ab'), 'ab')
        self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
        self.assertEqual('{0:.0s}'.format('abcdef'), '')
        self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
        self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
        self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
        self.assertEqual('{0:x<0s}'.format('result'), 'result')
        self.assertEqual('{0:x<5s}'.format('result'), 'result')
        self.assertEqual('{0:x<6s}'.format('result'), 'result')
        self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
        self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
        self.assertEqual('{0: <7s}'.format('result'), 'result ')
        self.assertEqual('{0:<7s}'.format('result'), 'result ')
        self.assertEqual('{0:>7s}'.format('result'), ' result')
        self.assertEqual('{0:>8s}'.format('result'), '  result')
        self.assertEqual('{0:^8s}'.format('result'), ' result ')
        self.assertEqual('{0:^9s}'.format('result'), ' result  ')
        self.assertEqual('{0:^10s}'.format('result'), '  result  ')
        self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
        self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
        self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)

        # format specifiers for user defined type
        self.assertEqual('{0:abc}'.format(C()), 'abc')

        # !r and !s coercions
        self.assertEqual('{0!s}'.format('Hello'), 'Hello')
        self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
        self.assertEqual('{0!s:15}'.format('Hello'), 'Hello          ')
        self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello          ')
        self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
        self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
        self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')

        # test fallback to object.__format__
        self.assertEqual('{0}'.format({}), '{}')
        self.assertEqual('{0}'.format([]), '[]')
        self.assertEqual('{0}'.format([1]), '[1]')
        self.assertEqual('{0}'.format(E('data')), 'E(data)')
        self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
        self.assertEqual('{0!s}'.format(G('data')), 'string is data')

        msg = 'object.__format__ with a non-empty format string is deprecated'
        with test_support.check_warnings((msg, PendingDeprecationWarning)):
            self.assertEqual('{0:^10}'.format(E('data')), ' E(data)  ')
            self.assertEqual('{0:^10s}'.format(E('data')), ' E(data)  ')
            self.assertEqual('{0:>15s}'.format(G('data')), ' string is data')

        self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
                                                       month=8,
                                                       day=27)),
                         "date: 2007-08-27")

        # test deriving from a builtin type and overriding __format__
        self.assertEqual("{0}".format(J(10)), "20")


        # string format specifiers
        self.assertEqual('{0:}'.format('a'), 'a')

        # computed format specifiers
        self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
        self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
        self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
        self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello     ')
        self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello     ')

        # test various errors
        self.assertRaises(ValueError, '{'.format)
        self.assertRaises(ValueError, '}'.format)
        self.assertRaises(ValueError, 'a{'.format)
        self.assertRaises(ValueError, 'a}'.format)
        self.assertRaises(ValueError, '{a'.format)
        self.assertRaises(ValueError, '}a'.format)
        self.assertRaises(IndexError, '{0}'.format)
        self.assertRaises(IndexError, '{1}'.format, 'abc')
        self.assertRaises(KeyError,   '{x}'.format)
        self.assertRaises(ValueError, "}{".format)
        self.assertRaises(ValueError, "{".format)
        self.assertRaises(ValueError, "}".format)
        self.assertRaises(ValueError, "abc{0:{}".format)
        self.assertRaises(ValueError, "{0".format)
        self.assertRaises(IndexError, "{0.}".format)
        self.assertRaises(ValueError, "{0.}".format, 0)
        self.assertRaises(IndexError, "{0[}".format)
        self.assertRaises(ValueError, "{0[}".format, [])
        self.assertRaises(KeyError,   "{0]}".format)
        self.assertRaises(ValueError, "{0.[]}".format, 0)
        self.assertRaises(ValueError, "{0..foo}".format, 0)
        self.assertRaises(ValueError, "{0[0}".format, 0)
        self.assertRaises(ValueError, "{0[0:foo}".format, 0)
        self.assertRaises(KeyError,   "{c]}".format)
        self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
        self.assertRaises(ValueError, "{0}}".format, 0)
        self.assertRaises(KeyError,   "{foo}".format, bar=3)
        self.assertRaises(ValueError, "{0!x}".format, 3)
        self.assertRaises(ValueError, "{0!}".format, 0)
        self.assertRaises(ValueError, "{0!rs}".format, 0)
        self.assertRaises(ValueError, "{!}".format)
        self.assertRaises(IndexError, "{:}".format)
        self.assertRaises(IndexError, "{:s}".format)
        self.assertRaises(IndexError, "{}".format)

        # issue 6089
        self.assertRaises(ValueError, "{0[0]x}".format, [None])
        self.assertRaises(ValueError, "{0[0](10)}".format, [None])

        # can't have a replacement on the field name portion
        self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)

        # exceed maximum recursion depth
        self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
        self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
                          0, 1, 2, 3, 4, 5, 6, 7)

        # string format spec errors
        self.assertRaises(ValueError, "{0:-s}".format, '')
        self.assertRaises(ValueError, format, "", "-")
        self.assertRaises(ValueError, "{0:=s}".format, '')

Example 50

Project: coursys Source File: faculty_test_data.py
Function: personal_data
    def personal_data(self):
        # get the objects that should already be there
        greg = Person.objects.get(userid='ggbaker')
        diana = Person.objects.get(userid='diana')
        brad = Person.objects.get(userid='bbart')
        tony = Person.objects.get(userid='dixon')
        danyu = Person.objects.get(userid='dzhao')
        farid = Person.objects.get(userid='mfgolnar')
        phillip = Person.objects.get(userid='phillip')
        editor = tony

        cmpt = Unit.objects.get(slug='cmpt')
        ensc = Unit.objects.get(slug='ensc')
        mse = Unit.objects.get(slug='mse')
        phil = Unit.objects.get(slug='phil')
        fas = Unit.objects.get(slug='fas')

        # create basic roles
        Role.objects.get_or_create(person=greg, unit=cmpt, role='FAC')
        Role.objects.get_or_create(person=brad, unit=cmpt, role='FAC')
        r,_ = Role.objects.get_or_create(person=diana, unit=cmpt, role='FAC')
        r.gone = True
        r.save()
        Role.objects.get_or_create(person=tony, unit=cmpt, role='FAC')
        Role.objects.get_or_create(person=tony, unit=ensc, role='FAC')
        Role.objects.get_or_create(person=farid, unit=mse, role='FAC')
        Role.objects.get_or_create(person=phillip, unit=phil, role='FAC')
        Role.objects.get_or_create(person=tony, unit=cmpt, role='ADMN')
        set_privacy_signed(tony)
        Role.objects.get_or_create(person=danyu, unit=fas, role='ADMN')
        set_privacy_signed(danyu)

        # create some events

        for person in [tony, diana, greg]:
            # appointment
            e, h = event_get_or_create(person=person, unit=cmpt, event_type='APPOINT', start_date=date(2000,9,1),
                                    status='A')
            e.config = {'spousal_hire': False, 'leaving_reason': 'HERE'}
            h.save(editor=editor)
            appt = e

            # teaching load
            e, h = event_get_or_create(person=person, unit=cmpt, event_type='NORM_TEACH', start_date=date(2000,9,1),
                                    status='A')
            e.config = {'load': 2}
            h.save(editor=editor)

            # annual salary updates
            for year in range(2000, 2014):
                e, h = event_get_or_create(person=person, unit=cmpt, event_type='SALARY', start_date=date(year,9,1),
                                        status='A')
                e.config = {'rank': 'SLEC' if year>2005 else 'LECT',
                            'step': year-1999,
                            'base_salary': 60000 + (year-2000)*2000,
                            'add_salary': 1000 if year>2005 else 0,
                            'add_pay': 500 if year<2005 else 0,
                            }
                h.save(editor=editor)

        # teaching credits
        e, h = event_get_or_create(person=greg, unit=cmpt, event_type='TEACHING', start_date=date(2012,9,1),
                                end_date=date(2012,12,31), status='A')
        e.config = {'category': 'RELEASE', 'teaching_credits': '1', 'reason': "We just couldn't say no!"}
        h.save(editor=editor)

        e, h = event_get_or_create(person=greg, unit=cmpt, event_type='TEACHING', start_date=date(2013,9,1),
                                end_date=date(2014,4,30), status='NA',
                                comments='Note that this is one teaching credit spread across two semesters.')
        e.config = {'category': 'BUYOUT', 'teaching_credits': '1/2', 'reason': "Somebody gave money."}
        h.save(editor=editor)

        # admin position
        e, h = event_get_or_create(person=greg, unit=cmpt, event_type='ADMINPOS', start_date=date(2008,9,1),
                                end_date=date(2010,8,31), status='A')
        e.config = {'position': 'UGRAD_DIRECTOR', 'teaching_credit': '1/2'}
        h.save(editor=editor)

        # admin position in other unit
        e, h = event_get_or_create(person=greg, unit=ensc, event_type='ADMINPOS', start_date=date(2010,9,1),
                                end_date=date(2011,8,31), status='A')
        e.config = {'position': 'UGRAD_DIRECTOR', 'teaching_credit': '0'}
        h.save(editor=editor)

        # a memo
        mt = MemoTemplate.objects.filter(event_type='APPOINT')[0]
        m, _ = Memo.objects.get_or_create(career_event=appt, unit=cmpt, sent_date=date(1999,8,15), to_lines='Greg Baker',
                cc_lines='The FAS Dean\nVancouver Sun', from_person=tony, from_lines='Tony Dixon, CMPT',
                subject='Appointment as lecturer', template=mt, created_by=tony)
        m.memo_text = ("We are pleased to appoint Gregory Baker to a new job.\n\n" +
                "Because we are so excited to hire him, we will be throwing a party. Date to be announced.")
        m.save()

        # some leaves etc to demo salary/fallout
        e, h = event_get_or_create(person=diana, unit=cmpt, event_type='LEAVE', start_date=date(2014,1,1),
                                end_date=date(2014,12,31), status='A')
        e.config = {'reason': 'MEDICAL', 'leave_fraction': '1/2', 'teaching_load_decrease': 1, 'teaching_credits': 0}
        h.save(editor=editor)

        e, h = event_get_or_create(person=tony, unit=cmpt, event_type='STUDYLEAVE', start_date=date(2013,9,1),
                                end_date=date(2014,8,31), status='A')
        e.config = {'pay_fraction': '4/5', 'teaching_decrease': 2, 'study_leave_credits': 24, 'credits_forward': 0}
        h.save(editor=editor)

        e, h = event_get_or_create(person=tony, unit=cmpt, event_type='FELLOW', start_date=date(2012,1,1),
                                end_date=None, status='A')
        e.config = {'position': 'BBYM', 'add_salary': 0, 'add_pay': 10000, 'teaching_credit': 0}
        h.save(editor=editor)


        # out-of-unit events: Dean's office staff should see MSE stuff
        e, h = event_get_or_create(person=farid, unit=mse, event_type='SALARY', start_date=date(2000,9,1),
                                status='A')
        e.config = {'step': 7,
                    'base_salary': 100000,
                    'add_salary': 17,
                    'add_pay': '6.50',
                    'rank': 'FULL',
                    }
        h.save(editor=editor)

        e, h = event_get_or_create(person=farid, unit=mse, event_type='NORM_TEACH', start_date=date(2000,9,1),
                                status='A')
        e.config = {'load': 1}
        h.save(editor=editor)

        # out-of-unit events: nobody should be seeing PHIL events
        e, h = event_get_or_create(person=phillip, unit=phil, event_type='SALARY', start_date=date(2000,9,1),
                                status='A')
        e.config = {'step': 7,
                    'base_salary': 1000000,
                    'add_salary': 17,
                    'add_pay': '6.50',
                    }
        h.save(editor=editor)

        e, h = event_get_or_create(person=phillip, unit=phil, event_type='NORM_TEACH', start_date=date(2000,9,1),
                                status='A')
        e.config = {'load': 2}
        h.save(editor=editor)


        # some grants
        tg, _ = TempGrant.objects.get_or_create(label="Cukierman startup", initial=4000, project_code='13-12345',
                                             creator=danyu)
        tg.config['cur_month'] = 500
        tg.config['ytd_actual'] = 1000
        tg.config['cur_balance'] = 1500
        tg.save()

        g, _ = Grant.objects.get_or_create(title="Baker startup grant", label='Baker startup', unit=cmpt,
                                        project_code='13-23456', start_date=date(2000,9,1), initial=4000, overhead=0)
        go, _ = GrantOwner.objects.get_or_create(grant=g, person=greg)
        go.save()

        gb, _ = GrantBalance.objects.get_or_create(grant=g, date=date(2000,9,30), balance=4000, actual=0, month=0)
        gb.save()
        gb, _ = GrantBalance.objects.get_or_create(grant=g, date=date(2001,9,10), balance=3000, actual=1000, month=250)
        gb.save()
        gb, _ = GrantBalance.objects.get_or_create(grant=g, date=date(2002,9,10), balance=0, actual=3000, month=0)
        gb.save()
See More Examples - Go to Next Page
Page 1 Selected Page 2