datetime.datetime.today

Here are the examples of the python api datetime.datetime.today taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 51

Project: stopstalk-deployment
Source File: problems.py
View license
def trending():
    """
        Show trending problems globally and among friends
        @ToDo: Needs lot of comments explaining the code
    """

    stable = db.submission

    today = datetime.datetime.today()
    # Consider submissions only after PAST_DAYS(customizable)
    # for trending problems
    start_date = str(today - datetime.timedelta(days=current.PAST_DAYS))
    query = (stable.time_stamp >= start_date)
    last_submissions = db(query).select(stable.problem_name,
                                        stable.problem_link,
                                        stable.user_id,
                                        stable.custom_user_id)

    if auth.is_logged_in():
        friends, cusfriends = utilities.get_friends(session.user_id)

        # The Original IDs of duplicate custom_friends
        custom_friends = []
        for cus_id in cusfriends:
            if cus_id[1] is None:
                custom_friends.append(cus_id[0])
            else:
                custom_friends.append(cus_id[1])

    problems_dict = {}
    friends_problems_dict = {}
    for submission in last_submissions:
        plink = submission.problem_link
        pname = submission.problem_name
        uid = submission.user_id
        cid = submission.custom_user_id

        # @ToDo: Improve this code
        if problems_dict.has_key(plink):
            problems_dict[plink]["total_submissions"] += 1
        else:
            problems_dict[plink] = {"name": pname,
                                    "total_submissions": 1,
                                    "users": set([]),
                                    "custom_users": set([])}

        if auth.is_logged_in() and \
           ((uid and uid in friends) or \
            (cid and cid in custom_friends)):

            if friends_problems_dict.has_key(plink):
                friends_problems_dict[plink]["total_submissions"] += 1
            else:
                friends_problems_dict[plink] = {"name": pname,
                                                "total_submissions": 1,
                                                "users": set([]),
                                                "custom_users": set([])}
            if uid:
                friends_problems_dict[plink]["users"].add(uid)
            else:
                friends_problems_dict[plink]["custom_users"].add(cid)

        if uid:
            problems_dict[plink]["users"].add(uid)
        else:
            problems_dict[plink]["custom_users"].add(cid)

    # Sort the rows according to the number of users
    # who solved the problem in last PAST_DAYS
    custom_compare = lambda x: (len(x[1]["users"]) + \
                                len(x[1]["custom_users"]),
                                x[1]["total_submissions"])

    global_trending = sorted(problems_dict.items(),
                             key=custom_compare,
                             reverse=True)

    global_table = _render_trending("Trending Globally",
                                    global_trending[:current.PROBLEMS_PER_PAGE],
                                    "Users")
    if auth.is_logged_in():
        friends_trending = sorted(friends_problems_dict.items(),
                                  key=custom_compare,
                                  reverse=True)

        friend_table = _render_trending("Trending among friends",
                                        friends_trending[:current.PROBLEMS_PER_PAGE],
                                        "Friends")

        div = DIV(DIV(friend_table, _class="col s6"),
                  DIV(global_table, _class="col s6"),
                  _class="row col s12")
    else:
        div = DIV(global_table, _class="center")

    return dict(div=div)

Example 52

Project: sponge
Source File: repo.py
View license
def rebalance_sync_schedule(errors=None):
    repoapi = RepositoryAPI()
    repos = get_repos()

    # get a list of sync frequencies
    syncgroups = dict()  # dict of sync time -> [groups]
    default = None
    for ckey, sync in config.list(filter=dict(name__startswith="sync_frequency_")).items():
        group = ckey.replace("sync_frequency_", "")
        if sync is None:
            logger.error("Sync frequency for %s is None, skipping" % group)
            continue
        synctime = 60 * 60 * int(sync)
        if "group" == "default":
            default = synctime
        else:
            try:
                syncgroups[synctime].append(group)
            except KeyError:
                syncgroups[synctime] = [group]

    # divide the repos up by sync time and sort them by inheritance,
    # reversed, to ensure that children get synced before parents and
    # a package doesn't just go straight to the final child
    cycles = dict() # dict of repo -> sync time
    for repo in repos.values():
        cycles[repo['id']] = default
        for synctime, groups in syncgroups.items():
            if (set(groups) & set(repo['groupid']) and
                (cycles[repo['id']] is None or
                 synctime > cycles[repo['id']])):
                cycles[repo['id']] = synctime

    # finally, build a dict of sync time -> [repos]
    syncs = dict()
    for repoid, synctime in cycles.items():
        if synctime is None:
            continue
        try:
            syncs[synctime].append(repos[repoid])
        except KeyError:
            syncs[synctime] = [repos[repoid]]

    for synctime, syncrepos in syncs.items():
        syncrepos = sort_repos_by_ancestry(syncrepos)
        syncrepos.reverse()

        # we count the total number of packages in all repos, and
        # divide them evenly amongst the timespan allotted.  It's
        # worth noting that we count clones just the same as we count
        # "regular" repos, because it's createrepo, not the sync, that
        # really takes a lot of time and memory.
        pkgs = 0
        for repo in syncrepos:
            if repo['package_count'] < 10:
                # we still have to run createrepo even if there are
                # very few (or no!) packages, so count very small
                # repos as 10 packages
                pkgs += 10
            else:
                pkgs += repo['package_count']
    
        try:
            pkgtime = float(synctime) / pkgs
        except ZeroDivisionError:
            pkgtime = 1
            logger.debug("Allowing %s seconds per package" % pkgtime)

        # find tomorrow morning at 12:00 am
        tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
        start = datetime.datetime(tomorrow.year, tomorrow.month, tomorrow.day)

        if errors is None:
            errors = []

        for repo in syncrepos:
            iso8601_start = format_iso8601_datetime(start)
            iso8601_interval = \
                format_iso8601_interval(datetime.timedelta(seconds=synctime))
            logger.debug("Scheduling %s to start at %s, sync every %s" %
                         (repo['id'], iso8601_start, iso8601_interval))
            schedule = parse_interval_schedule(iso8601_interval,
                                               iso8601_start,
                                               None)

            try:
                repoapi.change_sync_schedule(repo['id'],
                                             dict(schedule=schedule,
                                                  options=dict()))
                reload_repo(repo['id'])
            except ServerRequestError, err:
                errors.append("Could not set schedule for %s: %s" %
                              (repo['id'], err[1]))
            
            start += datetime.timedelta(seconds=int(pkgtime *
                                                    repo['package_count']))
    return not errors

Example 53

Project: goristock
Source File: goristock.py
View license
  def __init__(self, stock_no, data_num = 75, debug=0):
    """ stock_no: Stock no.
        data_num: Default fetch numbers. (Default is 75)
        debug: For debug to print some info about data solution. (Default is 0)

        stock_no: 股票代碼。
        data_num: 預設抓取的筆數(交易日數,預設為 75 筆)
        debug: 除錯用,列印出相關除錯資訊。0:關閉(預設) 1:開啟

        property:
          self.raw_data = [list]    收盤資訊,[舊→新]
          self.stock_name = str()   該股票名稱
          self.stock_no = str()     該股票代號
          self.data_date = [list]   日期資訊,[舊→新]
          self.stock_range = [list] 漲跌幅
          self.stock_vol = [list]   成交量
          self.stock_open = [list]  開盤價
          self.stock_h = [list]     最高價
          self.stock_l = [list]     最低價
    """
    self.raw_data = []
    self.stock_name = ''
    self.stock_no = stock_no
    self.data_date = []
    self.stock_range = []
    self.stock_vol = []
    self.stock_open = []
    self.stock_h = []
    self.stock_l = []
    starttime = 0
    self.debug = debug

    try:
      while len(self.raw_data) < data_num:
        # start fetch data.
        self.csv_read = self.fetch_data(stock_no, datetime.today() - timedelta(days = 30 * starttime), starttime)
        try:
          result = self.list_data(self.csv_read)
        except:
          # In first day of months will fetch no data.
          if starttime == 0:
            starttime += 1
            self.csv_read = self.fetch_data(stock_no, datetime.today() - timedelta(days = 30 * starttime), starttime)
            result = self.list_data(self.csv_read)
          logging.info('In first day of months %s' % stock_no)

        self.raw_data = result['stock_price'] + self.raw_data
        self.data_date = result['data_date'] + self.data_date
        self.stock_name = result['stock_name']
        self.stock_range = result['stock_range'] + self.stock_range
        self.stock_vol = result['stock_vol'] + self.stock_vol
        self.stock_open = result['stock_open'] + self.stock_open
        self.stock_h = result['stock_h'] + self.stock_h
        self.stock_l = result['stock_l'] + self.stock_l
        starttime += 1
    except:
      logging.info('Data not enough! %s' % stock_no)

    logging.info('Fetch %s' % stock_no)

Example 54

Project: python-ach
Source File: builder.py
View license
    def add_batch(self, std_ent_cls_code, batch_entries=None,
                  credits=True, debits=False, eff_ent_date=None,
                  company_id=None):
        """
        Use this to add batches to the file. For valid std_ent_cls_codes see:
        http://en.wikipedia.org/wiki/Automated_Clearing_House#SEC_codes
        """
        if batch_entries is None:
            batch_entries = list()

        entry_desc = self.get_entry_desc(std_ent_cls_code)

        batch_count = len(self.batches) + 1

        if not eff_ent_date:
            eff_ent_date = datetime.today() + timedelta(days=1)

        if credits and debits:
            serv_cls_code = '200'
        elif credits:
            serv_cls_code = '220'
        elif debits:
            serv_cls_code = '225'

        batch_header = BatchHeader(
            serv_cls_code=serv_cls_code,
            batch_id=batch_count,
            company_id=company_id or self.settings['company_id'],
            std_ent_cls_code=std_ent_cls_code,
            entry_desc=entry_desc,
            desc_date='',
            eff_ent_date=eff_ent_date.strftime('%y%m%d'),  # YYMMDD
            orig_stat_code='1',
            orig_dfi_id=self.settings['immediate_dest'][:8],
            company_name=self.settings['immediate_org_name']
        )

        entries = list()
        entry_counter = 1

        for record in batch_entries:

            entry = EntryDetail(std_ent_cls_code)

            entry.transaction_code = record.get('type')
            entry.recv_dfi_id = record.get('routing_number')

            if len(record['routing_number']) < 9:
                entry.calc_check_digit()
            else:
                entry.check_digit = record['routing_number'][8]

            entry.dfi_acnt_num = record['account_number']
            entry.amount = int(round(float(record['amount']) * 100))
            entry.ind_name = record['name'].upper()[:22]
            entry.trace_num = self.settings['immediate_dest'][:8] \
                + entry.validate_numeric_field(entry_counter, 7)

            entries.append((entry, record.get('addenda', [])))
            entry_counter += 1

        self.batches.append(FileBatch(batch_header, entries))
        self.set_control()

Example 55

Project: daywatch
Source File: tests.py
View license
    def create_run_data(self, site):
        """Given a site, create runs and error logs every day for the last ten
        days.
        """

        def create_test_error(time, category):
            return ErrorLog.objects.create(site=site,
                                           date_time=time,
                                           category=category,
                                           error_level=STATUS_UNKNOWN)

        def generate_categories(categories):
            out = []
            for category in categories:
                out += [category] * random.randint(1, 10)
            return out

        def rand_int():
            return int(random.randint(10, 100))

        objects = []

        today = datetime.today()
        for hour in [today - timedelta(days=n) for n in range(0, 24)]:
            start = hour
            end = hour
            run = Run.objects.create(site=site,
                                     start=start,
                                     end=end,
                                     offers=rand_int())
            categories = generate_categories(["Database", "Category",
                                              "Unknown", "HTML", "Offer",
                                              "Merchant"])
            errors = [create_test_error(start, cat)
                      for cat in categories]

            random.shuffle(errors)

            # To minimize the overhead of SQL inserts, objects are added to a
            # list and then shipped to the database all at once
            objects += [offers, run] + errors

        print "Saving objects"
        for obj in objects:
            obj.save()

Example 56

Project: galah
Source File: create_assignment_csv.py
View license
def _create_assignment_csv(csv_id, requester, assignment):
    csv_id = ObjectId(csv_id)

    csv_file = temp_directory = ""

    # Find any expired archives and remove them
    deleted_files = []
    for i in CSV.objects(expires__lt = datetime.datetime.today()):
        deleted_files.append(i.file_location)

        if i.file_location:
            try:
                os.remove(i.file_location)
            except OSError as e:
                logger.warning(
                    "Could not remove expired csv file at %s: %s.",
                    i.file_location, str(e)
                )

        i.delete()

    if deleted_files:
        logger.info("Deleted csv files %s.", str(deleted_files))

    # This is the CSV object that will be added to the database
    new_csv = CSV(
        id = csv_id,
        requester = requester
    )

    temp_directory = csv_file = None
    try:
        assn = Assignment.objects.get(id = ObjectId(assignment))

        # Grab all student users for this class.
        users = list(
            User.objects(
                account_type = "student",
                classes = assn.for_class
            )
        )

        # Form the query
        query = {
            "assignment": ObjectId(assignment),
            "most_recent": True,
            "user__in": [i.id for i in users]
        }

        # Grab the most recent submissions from each user.
        submissions = list(Submission.objects(**query))

        # Create the actual csv file.
        csv_file = open(os.path.join(config["CSV_DIRECTORY"], str(csv_id)), "w")

        for i in submissions:
            score = "None"
            if i.test_results:
                test_result = TestResult.objects.get(id = i.test_results)
                score = str(test_result.score)

            print >> csv_file, "%s,%s,%s" % \
                (i.user, score, i.timestamp.strftime("%Y-%m-%d-%H-%M-%S"))

        csv_file.close()

        new_csv.file_location = os.path.join(config["CSV_DIRECTORY"], str(csv_id))

        new_csv.expires = \
            datetime.datetime.today() + config["TEACHER_CSV_LIFETIME"]

        new_csv.save(force_insert = True)
    except Exception as e:
        new_csv.file_location = None
        os.remove(os.path.join(config["CSV_DIRECTORY"], str(csv_id)))

        new_csv.error_string = str(e)
        new_csv.save(force_insert = True)

        raise

Example 57

Project: galah
Source File: create_gradebook_csv.py
View license
def _create_gradebook_csv(csv_id, requester, class_id, fill=0):
    csv_id = ObjectId(csv_id)

    csv_file = temp_directory = ""

    # Find any expired archives and remove them
    deleted_files = []
    for i in CSV.objects(expires__lt = datetime.datetime.today()):
        deleted_files.append(i.file_location)

        if i.file_location:
            try:
                os.remove(i.file_location)
            except OSError as e:
                logger.warning(
                    "Could not remove expired csv file at %s: %s.",
                    i.file_location, str(e)
                )

        i.delete()

    if deleted_files:
        logger.info("Deleted csv files %s.", str(deleted_files))

    # This is the CSV object that will be added to the database
    new_csv = CSV(
        id = csv_id,
        requester = requester
    )

    temp_directory = csv_file = None
    try:
        # Create the actual csv file.
        csv_file = open(os.path.join(config["CSV_DIRECTORY"], str(csv_id)), "w")

        the_class = Class.objects.get(id = ObjectId(class_id))

        # Grab all assignments in this class
        assns = list(
            Assignment.objects(for_class = the_class.id)
        )

        print >> csv_file, "%s,%s" % \
            ("Username", ",".join('"{0}"'.format(i.name) for i in assns))

        # Grab all student users for this class.
        users = list(
            User.objects(
                account_type = "student",
                classes = the_class.id
            )
        )

        assn_ids = [i.id for i in assns]
        for user in users:
            # Query for user's most recent submissions in the known assignments
            query = {
                "assignment__in": assn_ids,
                "most_recent": True,
                "user": user.id
            }

            submissions = list(Submission.objects(**query))

            # Initialize each assignment score to empty at first.
            assn_to_score = OrderedDict((i, str(fill)) for i in assn_ids)

            # Go through submissions, associating scores with assignment
            for sub in submissions:
                if sub.test_results:
                    test_result = TestResult.objects.get(id = sub.test_results)
                    if test_result.score is not None:
                        assn_to_score[sub.assignment] = str(test_result.score)

            # Write gradebook results to csv file.
            print >> csv_file, "%s,%s" % \
                (user.email, ",".join(assn_to_score.values()))

        csv_file.close()

        new_csv.file_location = os.path.join(config["CSV_DIRECTORY"], str(csv_id))

        new_csv.expires = \
            datetime.datetime.today() + config["TEACHER_CSV_LIFETIME"]

        new_csv.save(force_insert = True)
    except Exception as e:
        new_csv.file_location = None
        os.remove(os.path.join(config["CSV_DIRECTORY"], str(csv_id)))

        new_csv.error_string = str(e)
        new_csv.save(force_insert = True)

        raise

Example 58

Project: galah
Source File: zip_bulk_submissions.py
View license
def _zip_bulk_submissions(archive_id, requester, assignment, email = ""):
    archive_id = ObjectId(archive_id)

    archive_file = temp_directory = ""

    # Find any expired archives and remove them
    deleted_files = []
    for i in Archive.objects(expires__lt = datetime.datetime.today()):
        deleted_files.append(i.file_location)

        if i.file_location:
            try:
                os.remove(i.file_location)
            except OSError as e:
                logger.warning(
                    "Could not remove expired archive at %s: %s.",
                    i.file_location, str(e)
                )

        i.delete()

    if deleted_files:
        logger.info("Deleted archives %s.", str(deleted_files))

    # This is the archive object we will eventually add to the database
    new_archive = Archive(
        id = archive_id,
        requester = requester,
        archive_type = "assignment_package"
    )

    temp_directory = archive_file = None
    try:
        # Form the query
        query = {"assignment": ObjectId(assignment)}

        # Only mention email in the query if it's not None or the empty
        # string, otherwise mongo will look for submissions that list the
        # user as None or the empty string (which should be exactly none of
        # the submission in the system).
        if email:
            query["user"] = email
        else:
            # Otherwise, we need to be careful not to get teacher/TA submissions.
            assn = Assignment.objects.get(id = ObjectId(assignment))
            students = User.objects(
                account_type="student",
                classes = assn.for_class
            )
            query["user__in"] = [i.id for i in students]

        # Grab all the submissions
        submissions = list(Submission.objects(**query))

        if not submissions:
            logger.info("No submissions found matching query.")
            return

        # Organize all the submissions by user name, as this will closely
        # match the structure of the archive we will build.
        submission_map = {}
        for i in submissions:
            if i.user in submission_map:
                submission_map[i.user].append(i)
            else:
                submission_map[i.user] = [i]

        # Create a temporary directory we will create our archive in.
        temp_directory = tempfile.mkdtemp()

        # Create our directory tree. Instead of making new folders for each
        # submission and copying the user's files over however, we will
        # create symlinks to save space and time.
        for user, user_submissions in submission_map.items():
            # Create a directory for the user
            os.makedirs(os.path.join(temp_directory, user))

            # Create symlinks for all his submissions. Each symlink is
            # named after the submission date.
            for i in user_submissions:
                time_stamp = i.timestamp.strftime("%Y-%m-%d-%H-%M-%S")
                symlink_path = \
                    os.path.join(temp_directory, user, time_stamp)

                # In the highly unlikely event that two of the same user's
                # submissions have the same exact time stamp, we'll need to
                # add a marker to the end of the timestamp.
                marker = 0
                while os.path.exists(symlink_path +
                        ("-%d" % marker if marker > 0 else "")):
                    marker += 1

                if marker > 0:
                    symlink_path += "-%d" % marker

                original_path = i.getFilePath()

                # Detect if the submission's files are still on the filesystem
                if os.path.isdir(original_path):
                    # Create a symlink pointing to the actual submission
                    # directory with the name we gnerated
                    os.symlink(original_path, symlink_path)
                else:
                    # Create an empty text file marking the fact that a
                    # submissions existed but is no longer available.
                    open(symlink_path, "w").close()

        # Create the actual archive file.
        # TODO: Create it in galah's /var/ directory
        file_descriptor, archive_file = tempfile.mkstemp(suffix = ".zip")
        os.close(file_descriptor)

        # Run zip and do the actual archiving. Will block until it's finished.
        zipdir(temp_directory, archive_file)

        new_archive.file_location = archive_file

        new_archive.expires = \
            datetime.datetime.today() + config["TEACHER_ARCHIVE_LIFETIME"]

        new_archive.save(force_insert = True)
    except Exception as e:
        # If we created a temporary archive file we need to delete it.
        new_archive.file_location = None
        if archive_file:
            os.remove(archive_file)

        new_archive.error_string = str(e)
        new_archive.save(force_insert = True)

        raise
    finally:
        if temp_directory:
            shutil.rmtree(temp_directory)

Example 59

Project: galah
Source File: _browse_assignments.py
View license
@app.route("/assignments")
@account_type_required(("student", "teacher", "teaching_assistant"))
def browse_assignments():
    # Grab all the current user's classes
    classes = Class.objects(id__in = current_user.classes).only("name")

    # Get the current time so we don't have to do it over and over again.
    now = datetime.datetime.today()

    if "show_all" in request.args:
        assignments = list(Assignment.objects(
            Q(for_class__in = current_user.classes) &
            (Q(hide_until = None) | Q(hide_until__lt = now))
        ).only("name", "due", "due_cutoff", "for_class"))
    else:
        personal_deadlines = (current_user.personal_deadline.items() +
            current_user.personal_due_date.items())

        # Figure out which assignments the users have personal deadline
        # extensions first.
        due_date_exceptions = set()
        for k, v in personal_deadlines:
            if v > now - datetime.timedelta(weeks = 1):
                due_date_exceptions.add(ObjectId(k))
        due_date_exceptions = list(due_date_exceptions)

        assignments = list(Assignment.objects(
            Q(for_class__in = current_user.classes) &
            (Q(due__gt = now - datetime.timedelta(weeks = 1)) |
             Q(due_cutoff__gt = now - datetime.timedelta(weeks = 1)) |
             Q(id__in = due_date_exceptions)) &
            (Q(hide_until = None) | Q(hide_until__lt = now))
        ).only("name", "due", "due_cutoff", "for_class"))

    assignments = [i for i in assignments if
            not i.hide_until or i.hide_until < now]

    # Get the number of assignments that we could have gotten if we didn't
    # limit based on due date.
    all_assignments_count = Assignment.objects(
        Q(for_class__in = current_user.classes) &
        (Q(hide_until = None) | Q(hide_until__lt = now))
    ).count()

    submissions = list(Submission.objects(
            user = current_user.email,
            assignment__in = [i.id for i in assignments],
            most_recent = True
    ))

    # Add a property to all the assignments so the template can display their
    # respective class easier. Additionally, add a plain text version of the
    # due date
    for i in assignments:
        try:
            i.class_name = next((j.name for j in classes if j.id == i.for_class))
        except StopIteration:
            logger.error(
                "Assignment with id %s references non-existant class with id "
                "%s." % (str(i.id, i.for_class))
            )

            i.class_name = "DNE"

        i.apply_personal_deadlines(current_user)

        # Figure out the status messages that we want to display to the user.
        submitted = next((j for j in submissions if j.assignment == i.id), None)
        i.submitted = submitted
        i.status = i.status_color = None
        if submitted:
            i.status = (
                "You made a submission " +
                create_time_element(submitted.timestamp, now)
            )

            i.status_color = "#84B354"
        elif now < i.due:
            i.status = "You have not submitted yet"

            i.status_color = "#877150"
        elif now > i.due and i.due_cutoff and now > i.due_cutoff:
            i.status = "You have not submitted yet, and it is too late to do so"

            i.status_color = "#E9A400"
        elif now > i.due:
            i.status = "You have not submitted yet!"

            i.status_color = "#FB4313"

    # Sort the assignments by due_cutoff or due date if due_cutoff is not
    # assigned.
    assignments.sort(
        key = lambda i: i.due_cutoff if i.due_cutoff else i.due,
        reverse = True
    )

    return render_template(
        "assignments.html",
        assignments = assignments,
        hidden_assignments = -1 if "show_all" in request.args
                else all_assignments_count - len(assignments),
        create_time_element = create_time_element
    )

Example 60

Project: galah
Source File: _upload_submission.py
View license
@app.route("/assignments/<assignment_id>/upload", methods = ["POST"])
@account_type_required(("student", "teacher", "teaching_assistant"))
def upload_submission(assignment_id):
    # Figure out which assignment the user asked for.
    try:
        id = ObjectId(assignment_id)
        assignment = Assignment.objects.get(id = id)
    except (InvalidId, Assignment.DoesNotExist) as e:
        logger.info("Could not retrieve assignment: %s", str(e))

        abort(404)

    # Figure out where we should redirect the user to once we're done.
    redirect_to = request.args.get("next") or request.referrer

    if not is_url_on_site(app, redirect_to):
        # Default going back to the assignment screen
        redirect_to = url_for(
            "view_assignment",
            assignment_id = assignment_id
        )

    assignment.apply_personal_deadlines(current_user)

    # Check if the assignment's cutoff date has passed
    if assignment.due_cutoff and \
            assignment.due_cutoff < datetime.datetime.today():
        logger.info("Submission rejected, cutoff date has already passed.")

        flash(
            "The cutoff date has already passed, your submission was not "
            "accepted.", category = "error"
        )

        return redirect(redirect_to)

    form = SimpleArchiveForm()
    if not form.validate_on_submit():
        logger.info(
            "Submission rejected due to internal validation problem."
        )

        flash(
            "Submission rejected due to internal validation problem. Try "
            "again.", category = "error"
        )

        return redirect(redirect_to)

    if not [i for i in form.archive.entries if i.data.filename]:
        logger.info("Submission rejected. User did not submit any files.")

        flash("You did not submit any files.", category = "error")

        return redirect(redirect_to)

    new_submission = Submission(
        assignment = id,
        user = current_user.id,
        timestamp = datetime.datetime.now(),
        test_type = "final" if form.marked_as_final.data else "public",
        most_recent = True
    )
    new_submission.id = ObjectId()

    logger.info(str(new_submission.to_dict()))

    # Craft a unique directory path where we will store the new submission. We
    # are guarenteed an ObjectId is unique. However we are not guarenteed that
    # we will have the proper permissions and that we will be able to make the
    # directory thus this could error because of that.
    new_submission.testables = new_submission.getFilePath()
    os.makedirs(new_submission.testables)

    # Save each file the user uploaded into the submissions directory
    for i in form.archive.entries:
        if not i.data.filename:
            continue

        #  Figure out where we want to save the user's file
        file_path = os.path.join(
            new_submission.testables, secure_filename(i.data.filename)
        )

        # Do the actual saving
        i.data.save(file_path)

    new_submission.uploaded_filenames.extend(
        secure_filename(i.data.filename) for i in form.archive.entries
            if i.data.filename
    )

    logger.info(
        "Succesfully uploaded a new submission (id = %s) with files %s.",
        str(new_submission.id),
        str(new_submission.uploaded_filenames)
    )

    # The old "most_recent" submission is no longer the most recent.
    Submission.objects(
        user = current_user.email,
        assignment = id,
        most_recent = True
    ).update(
        multi = False,
        unset__most_recent = 1
    )

    if assignment.test_harness:
        new_submission.test_request_timestamp = datetime.datetime.now()
        logger.info("Sent test request to shepherd for %s" % \
                        str(new_submission.id))

    new_submission.save()

    # Tell shepherd to start running tests if there is a test_harness.
    if assignment.test_harness:
        send_test_request(config["PUBLIC_SOCKET"], new_submission.id)

    # Communicate to the next page what submission was just added.
    flash(str(new_submission.id), category = "new_submission")

    flash(
        "Successfully uploaded %s %s." %
            (
                plural_if("file", len(new_submission.uploaded_filenames)),
                pretty_list(new_submission.uploaded_filenames)
            ),
        category = "message"
    )

    # Everything seems to have gone well
    return redirect(redirect_to)

Example 61

Project: theconversation
Source File: posts.py
View license
    def get(self, day="today", page=1, sort_by="hot"):
        view = "list"
        sort_by = self.get_argument('sort_by', sort_by)
        page = abs(int(self.get_argument('page', page)))
        per_page = abs(int(self.get_argument('per_page', '20')))
        msg = self.get_argument('msg', '')
        slug = self.get_argument('slug', '')
        new_post = None
        if slug:
            new_post = postsdb.get_post_by_slug(slug)

        featured_posts = postsdb.get_featured_posts(1)
        posts = []
        post = {}
        hot_tags = tagsdb.get_hot_tags()

        is_today = False
        if day == "today":
            is_today = True
            day = datetime.datetime.today()
        else:
            day = datetime.datetime.strptime(day, "%Y-%m-%d")

        show_day_permalink = True
        infinite_scroll = False
        if self.request.path == ('/'):
            show_day_permalink = False
            infinite_scroll = True

        is_blacklisted = False
        if self.current_user:
            is_blacklisted = self.is_blacklisted(self.current_user)

        posts = postsdb.get_hot_posts_24hr(day)
        previous_day_posts = postsdb.get_hot_posts_24hr(datetime.datetime.now() - datetime.timedelta(hours=24))

        #midpoint = (len(posts) - 1) / 2
        # midpoint determines where post list breaks from size=md to size=sm
        midpoint = 7
        hot_posts_past_week = postsdb.get_hot_posts_past_week()

        self.vars.update({
          'is_today': is_today,
          'view': view,
          'msg': msg,
          'posts': posts,
          'previous_day_posts': previous_day_posts,
          'hot_posts_past_week': hot_posts_past_week,
          'featured_posts': featured_posts,
          'post': post,
          #'featured_posts': featured_posts,
          'is_blacklisted': is_blacklisted,
          'tags': hot_tags,
          'day': day,
          'show_day_permalink': show_day_permalink,
          'infinite_scroll': infinite_scroll,
          'midpoint': midpoint,
          'new_post': new_post,
          'datetime': datetime
        })
        self.render('post/lists_posts.html', **self.vars)

Example 62

Project: netimpair
Source File: netimpair.py
View license
    def netem(
            self,
            loss_ratio=0,
            loss_corr=0,
            dup_ratio=0,
            delay=0,
            jitter=0,
            delay_jitter_corr=0,
            reorder_ratio=0,
            reorder_corr=0,
            toggle=None):
        '''Enable packet loss.'''
        if toggle is None:
            toggle = [1000000]
        self._check_call(
            'tc qdisc add dev {0} parent 1:3 handle 30: netem'.format(
                self.nic))
        while toggle:
            impair_cmd = 'tc qdisc change dev {0} parent 1:3 handle 30: ' \
                'netem loss {1}% {2}% duplicate {3}% delay {4}ms {5}ms {6}% ' \
                'reorder {7}% {8}%'.format(
                    self.nic, loss_ratio, loss_corr, dup_ratio, delay, jitter,
                    delay_jitter_corr, reorder_ratio, reorder_corr)
            print('Setting network impairment:')
            print(impair_cmd)
            # Set network impairment
            self._check_call(impair_cmd)
            print(
                'Impairment timestamp: {0}'.format(
                    datetime.datetime.today()))
            time.sleep(toggle.pop(0))
            if not toggle:
                return
            self._check_call(
                'tc qdisc change dev {0} parent 1:3 handle 30: netem'.format(
                    self.nic))
            print(
                'Impairment stopped timestamp: {0}'.format(
                    datetime.datetime.today()))
            time.sleep(toggle.pop(0))

Example 63

Project: viaduct
Source File: user.py
View license
@blueprint.route('/users/view/', methods=['GET'])
@blueprint.route('/users/view/<int:user_id>', methods=['GET'])
@login_required
def view_single(user_id=None):
    if user_id is None:
        if current_user.is_authenticated:
            return redirect(url_for('user.view_single',
                                    user_id=current_user.id))
        return redirect(url_for('user.view'))

    can_read = False
    can_write = False

    # Only logged in users can view profiles
    if current_user.is_anonymous:
        return abort(403)
    # Unpayed members cannot view other profiles
    if current_user.id != user_id and not current_user.has_payed:
        return abort(403)
    # A user can always view his own profile
    if current_user.id == user_id:
        can_write = True
        can_read = True
    # group rights
    if ModuleAPI.can_read('user'):
        can_read = True
    if ModuleAPI.can_write('user'):
        can_write = True
        can_read = True

    user = User.query.get_or_404(user_id)
    user.avatar = UserAPI.avatar(user)
    user.groups = UserAPI.get_groups_for_user_id(user)

    user.groups_amount = user.groups.count()

    if "gravatar" in user.avatar:
        user.avatar = user.avatar + "&s=341"

    # Get all activity entrees from these forms, order by start_time of
    # activity.
    activities = Activity.query.join(CustomForm).join(CustomFormResult).\
        filter(CustomFormResult.owner_id == user_id and
               CustomForm.id == CustomFormResult.form_id and
               Activity.form_id == CustomForm.id)

    user.activities_amount = activities.count()

    new_activities = activities\
        .filter(Activity.end_time > datetime.today()).distinct()\
        .order_by(Activity.start_time)
    old_activities = activities\
        .filter(Activity.end_time < datetime.today()).distinct()\
        .order_by(Activity.start_time.desc())

    return render_template('user/view_single.htm', user=user,
                           new_activities=new_activities,
                           old_activities=old_activities,
                           can_read=can_read,
                           can_write=can_write)

Example 64

Project: reggata
Source File: operations.py
View license
    @staticmethod
    def addUntrackedFile(session, item, repoBasePath, srcAbsPath, dstRelPath, userLogin):
        assert not hlp.is_none_or_empty(srcAbsPath)
        assert dstRelPath is not None
        #NOTE: If dstRelPath is an empty string it means the root of repository

        srcAbsPath = os.path.normpath(srcAbsPath)
        if not os.path.isabs(srcAbsPath):
            raise ValueError("srcAbsPath='{}' must be an absolute path.".format(srcAbsPath))

        if not os.path.exists(srcAbsPath):
            raise ValueError("srcAbsPath='{}' must point to an existing file.".format(srcAbsPath))

        if os.path.isabs(dstRelPath):
            raise ValueError("dstRelPath='{}' must be a relative to repository root path, but it is absolute."
                             .format(dstRelPath))

        dstRelPath = hlp.removeTrailingOsSeps(dstRelPath)
        dstRelPath = os.path.normpath(dstRelPath)
        dstAbsPath = os.path.abspath(os.path.join(repoBasePath, dstRelPath))
        dstAbsPath = os.path.normpath(dstAbsPath)
        if srcAbsPath != dstAbsPath and os.path.exists(dstAbsPath):
            raise ValueError("{} should not point to an existing file.".format(dstAbsPath))

        dataRef = session.query(db.DataRef).filter(
            db.DataRef.url_raw==hlp.to_db_format(dstRelPath)).first()
        if dataRef is not None:
            raise err.DataRefAlreadyExistsError("DataRef instance with url='{}' "
                                               "is already in database. ".format(dstRelPath))

        item.data_ref = db.DataRef(objType=db.DataRef.FILE, url=dstRelPath)
        item.data_ref.user_login = userLogin
        item.data_ref.size = os.path.getsize(srcAbsPath)
        item.data_ref.hash = hlp.computeFileHash(srcAbsPath)
        item.data_ref.date_hashed = datetime.datetime.today()
        session.add(item.data_ref)
        item.data_ref_id = item.data_ref.id
        session.flush()

        #Now it's time to COPY physical file to the repository
        if srcAbsPath != dstAbsPath:
            try:
                head, _tail = os.path.split(dstAbsPath)
                os.makedirs(head)
            except:
                pass
            shutil.copy(srcAbsPath, dstAbsPath)

Example 65

Project: vnpy
Source File: strategyEngine.py
View license
    def __init__(self, eventEngine, mainEngine):
        """Constructor"""
        self.__eventEngine = eventEngine
        self.mainEngine = mainEngine
        
        # 获取代表今日的datetime
        t = datetime.today()
        self.today = t.replace(hour=0, minute=0, second=0, microsecond=0)
        
        # 保存所有报单数据的字典
        self.__dictOrder = {}
        
        # 保存策略对象的字典
        # key为策略名称
        # value为策略对象
        self.dictStrategy = {}
        
        # 保存合约代码和策略对象映射关系的字典
        # key为合约代码
        # value为交易该合约的策略列表
        self.__dictSymbolStrategy = {}
        
        # 保存报单编号和策略对象映射关系的字典
        # key为报单编号
        # value为策略对象
        self.__dictOrderRefStrategy = {}
        
        # 保存合约代码和相关停止单的字典
        # key为合约代码
        # value为该合约相关的停止单列表
        self.__dictStopOrder = {}
        
        # MongoDB数据库相关
        self.__mongoConnected = False
        self.__mongoConnection = None
        self.__mongoTickDB = None
        
        # 调用函数
        self.__connectMongo()
        self.__registerEvent()

Example 66

Project: VIP
Source File: mcmc_sampling.py
View license
def mcmc_negfc_sampling(cubes, angs, psfn, ncomp, plsc, initial_state,
                        fwhm=4, annulus_width=3, aperture_radius=4, cube_ref=None, 
                        svd_mode='lapack', scaling='temp-mean', fmerit='sum',
                        collapse='median', nwalkers=1000, bounds=None, a=2.0,
                        burnin=0.3, rhat_threshold=1.01, rhat_count_threshold=1,
                        niteration_min=0, niteration_limit=1e02, 
                        niteration_supp=0, check_maxgap=1e04, nproc=1, 
                        output_file=None, display=False, verbose=True, save=False):
    """ Runs an affine invariant mcmc sampling algorithm in order to determine
    the position and the flux of the planet using the 'Negative Fake Companion'
    technique. The result of this procedure is a chain with the samples from the
    posterior distributions of each of the 3 parameters.
    
    This technique can be summarized as follows:
    
    1)  We inject a negative fake companion (one candidate) at a given 
        position and characterized by a given flux, both close to the expected 
        values.
    2)  We run PCA on an full annulus which pass through the initial guess, 
        regardless of the position of the candidate.
    3)  We extract the intensity values of all the pixels contained in a 
        circular aperture centered on the initial guess.
    4)  We calculate the function of merit. The associated chi^2 is given by
        chi^2 = sum(|I_j|) where j \in {1,...,N} with N the total number of 
        pixels contained in the circular aperture.        
    The steps 1) to 4) are looped. At each iteration, the candidate model 
    parameters are defined by the emcee Affine Invariant algorithm. 
    
    Parameters
    ----------  
    cubes: str or numpy.array
        The relative path to the cube of fits images OR the cube itself.
    angs: str or numpy.array
        The relative path to the parallactic angle fits image or the angs itself.
    psfn: str or numpy.array
        The relative path to the instrumental PSF fits image or the PSF itself.
        The PSF must be centered and the flux in a 1*FWHM aperture must equal 1.
    ncomp: int
        The number of principal components.        
    plsc: float
        The platescale, in arcsec per pixel.  
    annulus_width: float, optional
        The width in pixel of the annulus on which the PCA is performed.
    aperture_radius: float, optional
        The radius of the circular aperture.        
    nwalkers: int optional
        The number of Goodman & Weare 'walkers'.
    initial_state: numpy.array
        The first guess for the position and flux of the planet, respectively.
        Each walker will start in a small ball around this preferred position.
    cube_ref : array_like, 3d, optional
        Reference library cube. For Reference Star Differential Imaging.
    svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
        Switch for different ways of computing the SVD and selected PCs.
        'randsvd' is not recommended for the negative fake companion technique.
    scaling : {'temp-mean', 'temp-standard'} or None, optional
        With None, no scaling is performed on the input data before SVD. With 
        "temp-mean" then temporal px-wise mean subtraction is done and with 
        "temp-standard" temporal mean centering plus scaling to unit variance 
        is done. 
    fmerit : {'sum', 'stddev'}, string optional
        Chooses the figure of merit to be used. stddev works better for close in
        companions sitting on top of speckle noise.
    collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
        Sets the way of collapsing the frames for producing a final image. If
        None then the cube of residuals is used when measuring the function of
        merit (instead of a single final frame).
    bounds: numpy.array or list, default=None, optional
        The prior knowledge on the model parameters. If None, large bounds will 
        be automatically estimated from the initial state.
    a: float, default=2.0
        The proposal scale parameter. See notes.
    burnin: float, default=0.3
        The fraction of a walker which is discarded.
    rhat_threshold: float, default=0.01
        The Gelman-Rubin threshold used for the test for nonconvergence.   
    rhat_count_threshold: int, optional
        The Gelman-Rubin test must be satisfied 'rhat_count_threshold' times in
        a row before claiming that the chain has converged.        
    niteration_min: int, optional
        Steps per walker lower bound. The simulation will run at least this
        number of steps per walker.
    niteration_limit: int, optional
        Steps per walker upper bound. If the simulation runs up to 
        'niteration_limit' steps without having reached the convergence 
        criterion, the run is stopped.
    niteration_supp: int, optional
        Number of iterations to run after having "reached the convergence".     
    check_maxgap: int, optional
        Maximum number of steps per walker between two Gelman-Rubin test.
    nproc: int, optional
        The number of processes to use for parallelization. 
    output_file: str
        The name of the ouput file which contains the MCMC results 
        (if save is True).
    display: boolean
        If True, the walk plot is displayed at each evaluation of the Gelman-
        Rubin test.
    verbose: boolean
        Display informations in the shell.
    save: boolean
        If True, the MCMC results are pickled.
                    
    Returns
    -------
    out : numpy.array
        The MCMC chain.         
        
    Notes
    -----
    The parameter 'a' must be > 1. For more theoretical information concerning
    this parameter, see Goodman & Weare, 2010, Comm. App. Math. Comp. Sci., 
    5, 65, Eq. [9] p70.
    
    The parameter 'rhat_threshold' can be a numpy.array with individual 
    threshold value for each model parameter.
    """ 
    if verbose:
        start_time = timeInit()
        print "        MCMC sampler for the NEGFC technique       "
        print sep

    # If required, one create the output folder.    
    if save:    
        if not os.path.exists('results'):
            os.makedirs('results')
        
        if output_file is None:
            datetime_today = datetime.datetime.today()
            output_file = str(datetime_today.year)+str(datetime_today.month)+\
                          str(datetime_today.day)+'_'+str(datetime_today.hour)+\
                          str(datetime_today.minute)+str(datetime_today.second)            
        
        if not os.path.exists('results/'+output_file):
            os.makedirs('results/'+output_file)

            
    # #########################################################################
    # If required, one opens the source files
    # #########################################################################
    if isinstance(cubes,str) and isinstance(angs,str):
        if angs is None:
            cubes, angs = open_adicube(cubes, verbose=False)
        else:
            cubes = open_fits(cubes)
            angs = open_fits(angs, verbose=False)    
        
        if isinstance(psfn,str):
            psfn = open_fits(psfn)
        
        if verbose:
            print 'The data has been loaded. Let''s continue !'
    
    # #########################################################################
    # Initialization of the variables
    # #########################################################################    
    dim = 3 # There are 3 model parameters, resp. the radial and angular 
            # position of the planet and its flux.
    
    itermin = niteration_min
    limit = niteration_limit    
    supp = niteration_supp
    maxgap = check_maxgap
    initial_state = np.array(initial_state)
    
    if itermin > limit:
        itermin = 0
        print("'niteration_min' must be < 'niteration_limit'.")
        
    fraction = 0.3
    geom = 0
    lastcheck = 0
    konvergence = np.inf
    rhat_count = 0
        
    chain = np.empty([nwalkers,1,dim])
    isamples = np.empty(0)
    pos = initial_state + np.random.normal(0,1e-01,(nwalkers,3))
    nIterations = limit + supp
    rhat = np.zeros(dim)  
    stop = np.inf
    

    if bounds is None:
        bounds = [(initial_state[0]-annulus_width/2.,initial_state[0]+annulus_width/2.), #radius
                  (initial_state[1]-10,initial_state[1]+10), #angle
                  (0,2*initial_state[2])] #flux
    
    sampler = emcee.EnsembleSampler(nwalkers,dim,lnprob,a,
                                    args =([bounds, cubes, angs, plsc, psfn,
                                            fwhm, annulus_width, ncomp,
                                            aperture_radius, initial_state,
                                            cube_ref, svd_mode, scaling, fmerit,
                                            collapse]),
                                    threads=nproc)
    
    duration_start = datetime.datetime.now()
    start = datetime.datetime.now()

    # #########################################################################
    # Affine Invariant MCMC run
    # ######################################################################### 
    if verbose:
        print ''
        print 'Start of the MCMC run ...'
        print 'Step  |  Duration/step (sec)  |  Remaining Estimated Time (sec)'
                             
    for k, res in enumerate(sampler.sample(pos,iterations=nIterations,
                                           storechain=True)):
        elapsed = (datetime.datetime.now()-start).total_seconds()
        if verbose:
            if k == 0:
                q = 0.5
            else:
                q = 1
            print '{}\t\t{:.5f}\t\t\t{:.5f}'.format(k,elapsed*q,elapsed*(limit-k-1)*q)
            
        start = datetime.datetime.now()

        # ---------------------------------------------------------------------        
        # Store the state manually in order to handle with dynamical sized chain.
        # ---------------------------------------------------------------------    
        ## Check if the size of the chain is long enough.
        s = chain.shape[1]
        if k+1 > s: #if not, one doubles the chain length
            empty = np.zeros([nwalkers,2*s,dim])
            chain = np.concatenate((chain,empty),axis=1)
        ## Store the state of the chain
        chain[:,k] = res[0]
        
        
        # ---------------------------------------------------------------------
        # If k meets the criterion, one tests the non-convergence.
        # ---------------------------------------------------------------------              
        criterion = np.amin([ceil(itermin*(1+fraction)**geom),\
                            lastcheck+floor(maxgap)])
   
        if k == criterion:
            if verbose:
                print ''
                print '   Gelman-Rubin statistic test in progress ...' 
            
            geom += 1
            lastcheck = k
            if display:
                showWalk(chain)
                
            if save:
                import pickle                                    
                
                with open('results/'+output_file+'/'+output_file+'_temp_k{}'.format(k),'wb') as fileSave:
                    myPickler = pickle.Pickler(fileSave)
                    myPickler.dump({'chain':sampler.chain, 
                                    'lnprob':sampler.lnprobability, 
                                    'AR':sampler.acceptance_fraction})
                
            ## We only test the rhat if we have reached the minimum number of steps.
            if (k+1) >= itermin and konvergence == np.inf:
                threshold0 = int(floor(burnin*k))
                threshold1 = int(floor((1-burnin)*k*0.25))

                # We calculate the rhat for each model parameter.
                for j in range(dim):
                    part1 = chain[:,threshold0:threshold0+threshold1,j].reshape((-1))
                    part2 = chain[:,threshold0+3*threshold1:threshold0+4*threshold1,j].reshape((-1))
                    series = np.vstack((part1,part2))
                    rhat[j] = gelman_rubin(series)   
                if verbose:    
                    print '   r_hat = {}'.format(rhat)
                    print '   r_hat <= threshold = {}'.format(rhat <= rhat_threshold)
                    print ''
                # We test the rhat.
                if (rhat <= rhat_threshold).all(): #and rhat_count < rhat_count_threshold: 
                    rhat_count += 1
                    if rhat_count < rhat_count_threshold:
                        print("Gelman-Rubin test OK {}/{}".format(rhat_count,rhat_count_threshold))
                    elif rhat_count >= rhat_count_threshold:
                        print '... ==> convergence reached'
                        konvergence = k
                        stop = konvergence + supp                       
                #elif (rhat <= rhat_threshold).all() and rhat_count >= rhat_count_threshold:
                #    print '... ==> convergence reached'
                #    konvergence = k
                #    stop = konvergence + supp
                else:
                    rhat_count = 0

        if (k+1) >= stop: #Then we have reached the maximum number of steps for our Markov chain.
            print 'We break the loop because we have reached convergence'
            break
      
    if k == nIterations-1:
        print("We have reached the limit number of steps without having converged")
            
    # #########################################################################
    # Construction of the independent samples
    # ######################################################################### 
            
    temp = np.where(chain[0,:,0] == 0.0)[0]
    if len(temp) != 0:
        idxzero = temp[0]
    else:
        idxzero = chain.shape[1]
    
    idx = np.amin([np.floor(2e05/nwalkers),np.floor(0.1*idxzero)])
    if idx == 0:
        isamples = chain[:,0:idxzero,:] 
    else:
        isamples = chain[:,idxzero-idx:idxzero,:]

    if save:
        import pickle
        
        frame = inspect.currentframe()
        args, _, _, values = inspect.getargvalues(frame)
        input_parameters = {j : values[j] for j in args[1:]}        
        
        output = {'isamples':isamples,
                  'chain': chain_zero_truncated(chain),
                  'input_parameters': input_parameters,
                  'AR': sampler.acceptance_fraction,
                  'lnprobability': sampler.lnprobability}
                  
        with open('results/'+output_file+'/MCMC_results','wb') as fileSave:
            myPickler = pickle.Pickler(fileSave)
            myPickler.dump(output)
        
        print ''        
        print("The file MCMC_results has been stored in the folder {}".format('results/'+output_file+'/'))

    if verbose:
        timing(start_time)
                                    
    return chain_zero_truncated(chain)    

Example 67

Project: cobra
Source File: DashboardController.py
View license
@web.route(ADMIN_URL + "/graph_lines", methods=['POST'])
def graph_lines():
    # everyday vulns count
    # everyday scan count
    if not ValidateClass.check_login():
        return redirect(ADMIN_URL + '/index')
    show_all = request.form.get("show_all")
    if show_all:
        days = 15 - 1
        vuls = list()
        scans = list()
        labels = list()
        # get vulns count
        end_date = datetime.datetime.today()
        start_date = datetime.date.today() - datetime.timedelta(days=days)
        start_date = datetime.datetime.combine(start_date, datetime.datetime.min.time())

        d = start_date
        while d < end_date:
            all_vuls = db.session.query(
                func.count("*").label('counts')
            ).filter(
                and_(CobraResults.created_at >= d, CobraResults.created_at <= d + datetime.timedelta(1))
            ).all()
            vuls.append(all_vuls[0][0])
            labels.append(d.strftime("%Y%m%d"))
            d += datetime.timedelta(1)

        # get scan count
        d = start_date
        while d < end_date:
            t = int(time.mktime(d.timetuple()))
            all_scans = db.session.query(
                func.count("*").label("counts")
            ).filter(
                and_(CobraTaskInfo.time_start >= t, CobraTaskInfo.time_start <= t + 3600 * 24)
            ).all()
            scans.append(all_scans[0][0])
            d += datetime.timedelta(1)

        return jsonify(labels=labels, vuls=vuls, scans=scans)

    else:
        start_time_stamp = request.form.get("start_time_stamp")[:10]
        end_time_stamp = request.form.get("end_time_stamp")[:10]

        labels = list()
        vuls = list()
        scans = list()

        start_date = datetime.datetime.fromtimestamp(int(start_time_stamp[:10]))
        end_date = datetime.datetime.fromtimestamp(int(end_time_stamp[:10]))

        # get vulns count
        d = start_date
        while d < end_date:
            t = end_date if d + datetime.timedelta(1) > end_date else d + datetime.timedelta(1)

            all_vuls = db.session.query(
                func.count("*").label('counts')
            ).filter(
                and_(CobraResults.created_at >= d, CobraResults.created_at <= t)
            ).all()

            labels.append(d.strftime("%Y%m%d"))
            vuls.append(all_vuls[0][0])
            d += datetime.timedelta(1)

        # get scans count
        d = start_date
        while d < end_date:
            t_end_date = end_date if d + datetime.timedelta(1) > end_date else d + datetime.timedelta(1)
            t_start_date = time.mktime(d.timetuple())
            t_end_date = time.mktime(t_end_date.timetuple())

            all_scans = db.session.query(
                func.count("*").label("counts")
            ).filter(
                and_(CobraTaskInfo.time_start >= t_start_date, CobraTaskInfo.time_start <= t_end_date)
            ).all()
            scans.append(all_scans[0][0])
            d += datetime.timedelta(1)

        return jsonify(labels=labels, vuls=vuls, scans=scans)

Example 68

Project: Sick-Beard-TPB
Source File: properFinder.py
View license
    def _getProperList(self):

        propers = {}

        # for each provider get a list of the propers
        for curProvider in providers.sortedProviderList():

            if not curProvider.isActive():
                continue

            search_date = datetime.datetime.today() - datetime.timedelta(days=2)

            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
            try:
                curPropers = curProvider.findPropers(search_date)
            except exceptions.AuthException, e:
                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
                continue

            # if they haven't been added by a different provider than add the proper to the list
            for x in curPropers:
                name = self._genericName(x.name)

                if not name in propers:
                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
                    x.provider = curProvider
                    propers[name] = x

        # take the list of unique propers and get it sorted by
        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
        finalPropers = []

        for curProper in sortedPropers:

            # parse the file name
            try:
                myParser = NameParser(False)
                parse_result = myParser.parse(curProper.name)
            except InvalidNameException:
                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
                continue

            if not parse_result.episode_numbers:
                logger.log(u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode", logger.DEBUG)
                continue

            # populate our Proper instance
            if parse_result.air_by_date:
                curProper.season = -1
                curProper.episode = parse_result.air_date
            else:
                curProper.season = parse_result.season_number if parse_result.season_number != None else 1
                curProper.episode = parse_result.episode_numbers[0]
            curProper.quality = Quality.nameQuality(curProper.name)

            # for each show in our list
            for curShow in sickbeard.showList:

                if not parse_result.series_name:
                    continue

                genericName = self._genericName(parse_result.series_name)

                # get the scene name masks
                sceneNames = set(show_name_helpers.makeSceneShowSearchStrings(curShow))

                # for each scene name mask
                for curSceneName in sceneNames:

                    # if it matches
                    if genericName == self._genericName(curSceneName):
                        logger.log(u"Successful match! Result " + parse_result.series_name + " matched to show " + curShow.name, logger.DEBUG)

                        # set the tvdbid in the db to the show's tvdbid
                        curProper.tvdbid = curShow.tvdbid

                        # since we found it, break out
                        break

                # if we found something in the inner for loop break out of this one
                if curProper.tvdbid != -1:
                    break

            if curProper.tvdbid == -1:
                continue

            if not show_name_helpers.filterBadReleases(curProper.name):
                logger.log(u"Proper " + curProper.name + " isn't a valid scene release that we want, igoring it", logger.DEBUG)
                continue

            # if we have an air-by-date show then get the real season/episode numbers
            if curProper.season == -1 and curProper.tvdbid:
                showObj = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid)
                if not showObj:
                    logger.log(u"This should never have happened, post a bug about this!", logger.ERROR)
                    raise Exception("BAD STUFF HAPPENED")

                tvdb_lang = showObj.lang
                # There's gotta be a better way of doing this but we don't wanna
                # change the language value elsewhere
                ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()

                if tvdb_lang and not tvdb_lang == 'en':
                    ltvdb_api_parms['language'] = tvdb_lang

                try:
                    t = tvdb_api.Tvdb(**ltvdb_api_parms)
                    epObj = t[curProper.tvdbid].airedOn(curProper.episode)[0]
                    curProper.season = int(epObj["seasonnumber"])
                    curProper.episodes = [int(epObj["episodenumber"])]
                except tvdb_exceptions.tvdb_episodenotfound:
                    logger.log(u"Unable to find episode with date " + str(curProper.episode) + " for show " + parse_result.series_name + ", skipping", logger.WARNING)
                    continue

            # check if we actually want this proper (if it's the right quality)
            sqlResults = db.DBConnection().select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [curProper.tvdbid, curProper.season, curProper.episode])
            if not sqlResults:
                continue
            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))

            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != curProper.quality:
                continue

            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
            if curProper.tvdbid != -1 and (curProper.tvdbid, curProper.season, curProper.episode) not in map(operator.attrgetter('tvdbid', 'season', 'episode'), finalPropers):
                logger.log(u"Found a proper that we need: " + str(curProper.name))
                finalPropers.append(curProper)

        return finalPropers

Example 69

Project: drydrop
Source File: drydrop_handler.py
View license
    def system_dispatch(self):
        import logging
        import drydrop.app as app
        import datetime
        from drydrop.lib.utils import import_module
        from drydrop.app.core.appceptions import PageException

        # match internal route
        self.mapper.environ = self.request.environ
        controller = self.mapper.match(self.request.path)
        if controller == None:
            return False

        logging.debug("System: dispatching %s to %s", self.request.path, controller)

        # find the controller class
        action = controller['action']
        name = controller['controller']
        mod = import_module('drydrop.app.controllers.%s' % name)
        klass = "%sController" % name.capitalize()
        controller_class = mod.__dict__[klass]

        # add the route information as request parameters
        for param, value in controller.iteritems():
            self.request.GET[param] = value

        # instantiate controller
        controller_instance = controller_class(self.request, self.response, self)

        # get controller's methods
        before_method = controller_instance.__getattribute__('before_action')
        action_method = controller_instance.__getattribute__(action)
        after_method = controller_instance.__getattribute__('after_action')

        # see http://code.google.com/p/googleappengine/issues/detail?id=732
        self.response.headers['Cache-Control'] = "no-cache"
        expires = datetime.datetime.today() + datetime.timedelta(0, -1)
        self.response.headers['Expires'] = expires.strftime('%a, %d %b %Y %H:%M:%S GMT')
        
        # call action methods
        try:
            before_result = before_method()
            if before_result:
                return
            
            action_result = action_method()
            if action_result:
                return
        
            after_result = after_method()
            if after_result:
                return
        except PageException:
            pass 

Example 70

Project: datafeed
Source File: test_imiguserver.py
View license
    @patch.object(ImiguHandler, 'get_snapshot_index')
    def test_fix_report_when_archive(self, mock_index):
        # set to after hours: 15:30 implicates error data
        # some datafeed still sending data even market was closed.
        day = datetime.datetime.today()
        ts = time.mktime((day.year, day.month, day.day,
                          15, 30, 0, 0, 0, 0))
        mock_index.return_value = (ts, 360)
        
        r = {
            'amount': 84596203520.0,
            'close': 2856.9899999999998,
            'high': 2880.5599999999999,
            'low': 2851.9499999999998,
            'name': u'\u4e0a\u8bc1\u6307\u6570',
            'open': 2868.73,
            'preclose': 2875.8600000000001,
            'price': 2856.9899999999998,
            'symbol': 'SH000001',
            'time': '2010-12-08 14:02:57',
            'timestamp': 1291788177,
            'volume': 75147848.0
            }
        

        r['timestamp'] = ts
        r['time'] = str(datetime.datetime.fromtimestamp(ts))

        data = {'SH000001': r}

        import zlib
        import marshal
        data = zlib.compress(marshal.dumps(data))
        
        request = Request(None, 'put_reports', data)
        self.application(request)

        close_time = time.mktime((day.year, day.month, day.day,
                                  15, 0, 0, 0, 0, 0))
        
        request = Request(None, 'archive_minute', data)
        self.application(request)
        
        r = self.application.dbm.get_report('SH000001')
        self.assertEqual(r['timestamp'], close_time)
        self.assertEqual(r['open'], 2868.73)

Example 71

Project: backintime
Source File: test_takeSnapshot.py
View license
    @patch('time.sleep') # speed up unittest
    def test_takeSnapshot(self, sleep):
        now = datetime.today() - timedelta(minutes = 6)
        sid1 = snapshots.SID(now, self.cfg)

        self.assertListEqual([True, False], self.sn.takeSnapshot(sid1, now, [(self.include.name, 0),]))
        self.assertTrue(sid1.exists())
        self.assertTrue(sid1.canOpenPath(os.path.join(self.include.name, 'foo', 'bar', 'baz')))
        self.assertTrue(sid1.canOpenPath(os.path.join(self.include.name, 'test')))
        self.assertTrue(sid1.canOpenPath(os.path.join(self.include.name, 'file with spaces')))
        for f in ('config',
                  'fileinfo.bz2',
                  'info',
                  'takesnapshot.log.bz2'):
            self.assertTrue(os.path.exists(sid1.path(f)), msg = 'file = {}'.format(f))

        for f in ('failed',
                  'save_to_continue'):
            self.assertFalse(os.path.exists(sid1.path(f)), msg = 'file = {}'.format(f))

        # second takeSnapshot which should not create a new snapshot as nothing
        # has changed
        now = datetime.today() - timedelta(minutes = 4)
        sid2 = snapshots.SID(now, self.cfg)

        self.assertListEqual([False, False], self.sn.takeSnapshot(sid2, now, [(self.include.name, 0),]))
        self.assertFalse(sid2.exists())

        # third takeSnapshot
        self.remount()
        with open(os.path.join(self.include.name, 'lalala'), 'wt') as f:
            f.write('asdf')

        now = datetime.today() - timedelta(minutes = 2)
        sid3 = snapshots.SID(now, self.cfg)

        self.assertListEqual([True, False], self.sn.takeSnapshot(sid3, now, [(self.include.name, 0),]))
        self.assertTrue(sid3.exists())
        self.assertTrue(sid3.canOpenPath(os.path.join(self.include.name, 'lalala')))
        inode1 = self.getInode(sid1)
        inode3 = self.getInode(sid3)
        self.assertEqual(inode1, inode3)

        # fourth takeSnapshot with force create new snapshot even if nothing
        # has changed
        self.cfg.setTakeSnapshotRegardlessOfChanges(True)
        now = datetime.today()
        sid4 = snapshots.SID(now, self.cfg)

        self.assertListEqual([True, False], self.sn.takeSnapshot(sid4, now, [(self.include.name, 0),]))
        self.assertTrue(sid4.exists())
        self.assertTrue(sid4.canOpenPath(os.path.join(self.include.name, 'foo', 'bar', 'baz')))
        self.assertTrue(sid4.canOpenPath(os.path.join(self.include.name, 'test')))

Example 72

View license
  def __init__( self, **kwargs):
    """ Set up our data structures and determine whether we're in
        live or simulated mode.
        
        time_str : (default "5min") time-frame to analyze on ... this
                   controls the length of each "bar" or period, can be
                   any pandas-recognized string, (10s, 10min, 1h, 1d, etc)
        live : live or simulated mode (whether or not to read from
               filename or from the web), defaults to False (simulated)
        filename : name of log file to read in simulated mode ... interpreted
                   as ./logs/filename ... file must be in this dir
        warp : whether or not to use our timedelta or just next value
               for each update() ... so we can do all calculations
               as fast as possible, defaults to False ("realtime")
        debug : whether or not to spit out debugging info
        sample_secs : if in warp-mode, N-seconds to sample on (the shorter
                      N, the more often we are "checking" the price and
                      the more iterations it will take to complete a series)
        instant : (default False) Setting this to true will make Data
                  send the lastprice series to the Coins to calculate all
                  in faster, one-pass mode
        ltc_opts : dict structure on what to do with LTC data ... see coin for
              options from kwargs (default is same as GOX...)
              Here's an example of a fully loaded options dict
              {  "debug": False,
                 "relative": False,
                 "calc_rolling": False,
                 "rolling": { self.time_str : {  5: pd.DataFrame(), 
                                                25: pd.DataFrame(), 
                                                50: pd.DataFrame() } },
                 "calc_mid": False,
                 "calc_ohlc": True,
                 "ohlc": { self.time_str : pd.DataFrame()  },
                 "calc_indicators": True,
                 "indicators":{ "RSI"  : { "data": pd.DataFrame(), "n":14 },
                                "ROC"  : { "data": pd.DataFrame(), "n":20 },
                                "AMA"  : { "data": pd.DataFrame(), "n":10, "fn":2.5, "sn":30 },
                                "CCI"  : { "data": pd.DataFrame(), "n":20 },
                                "FRAMA": { "data": pd.DataFrame(), "n":10 },
                                "RVI2" : { "data": pd.DataFrame(), "n":14, "s":10 },
                                "MACD" : { "data": pd.DataFrame(), "f":12, "s":26, "m":9 },
                                "ADX"  : { "data": pd.DataFrame(), "n":14 },
                                "ELI"  : { "data": pd.DataFrame(), "n":14 },
                                "TMI"  : { "data": pd.DataFrame(), "nb":10, "nf":5} }
                 "calc_std": True,
                 "std": { 10: pd.DataFrame(), 50: pd.DataFrame(), 100: pd.DataFrame() },
                 "calc_crt": True,
                 "crt": { 1: pd.DataFrame(), 2: pd.DataFrame(),
                          3: pd.DataFrame(), 5: pd.DataFrame(),
                          8: pd.DataFrame() },
                 "instant": False,
                 "time_str": self.time_str }
        gox_opts : dict structure on what to do with GOX BTC data ... see coin for
              options from kwargs (default: everything disabled but OHLC ... )
               { "debug": False,
                 "relative": False,
                 "calc_rolling": False,
                 "rolling": False,
                 "calc_mid": False,
                 "calc_ohlc": True,
                 "ohlc": { self.time_str : pd.DataFrame() },
                 "calc_indicators": False,
                 "calc_std": False,
                 "std": False,
                 "calc_crt": False,
                 "crt": False,
                 "instant": False,
                 "time_str": self.time_str }
        pickled_data : (default False) if this is set to a data structure,
                       from pickle'd pandas csv data structure, it'll take
                       it from here instead of from disk. Faster on multiple
                       iterations.
        verbose : (default False) whether or not to print out shit
    """
    self.live = kwargs.get("live", False)
    self.filename = kwargs.get("filename", "test.csv")
    self.warp = kwargs.get( "warp", True)
    self._debug = kwargs.get( "debug", False)
    self.sample_secs = kwargs.get( "sample_secs", 5)
    self.instant = kwargs.get( "instant", False)
    self.time_str = kwargs.get( "time_str", "5min")
    self.verbose = kwargs.get( "verbose", False)
    # default LTC options
    def_ltc =  { "debug": False,
                 "relative": False,
                 "calc_rolling": False,
                 "rolling": False,
                 "calc_mid": False,
                 "calc_ohlc": True,
                 "ohlc": { self.time_str : pd.DataFrame() },
                 "calc_indicators": False,
                 "indicators": False,
                 "calc_std": False,
                 "std": False,
                 "calc_crt": False,
                 "crt": False,
                 "instant": False,
                 "time_str": self.time_str }
    self.ltc_opts = kwargs.get( "ltc_opts", def_ltc)
    # default gox options
    def_gox = { "debug": False,
                 "relative": False,
                 "calc_rolling": False,
                 "rolling": False,
                 "calc_mid": False,
                 "calc_ohlc": True,
                 "ohlc": { self.time_str : pd.DataFrame() },
                 "calc_indicators": False,
                 "indicators": False,
                 "calc_std": False,
                 "std": False,
                 "calc_crt": False,
                 "crt": False,
                 "instant": False,
                 "time_str": self.time_str }
    self.gox_opts = kwargs.get( "gox_opts", def_gox)
    self.pickled_data = kwargs.get( "pickled_data", False)
    
    if self.verbose:
      print "[*]", "Online" if self.live else "Offline", "mode initiated"
      print "[*]", "Simulated" if not self.warp else "Speed", "mode initiated"
    
    # if we're running simulated, set up price logs so we can query them
    # in realtime as if they were actual price changes
    if self.live == False:
      # did we supply a pre-parsed pandas CSV data struct?
      if self.pickled_data != False:
        if self.verbose:
          print "[*]", "Loading supplied pickle!"
        data = self.pickled_data
      # nope ... load from disk!
      else:
        # loading from CSV takes a long time, lets prepare a pickle of the
        # loaded CSV if we haven't already done so, if we have then load it
        filename_pick = os.path.realpath( os.path.join( "logs", self.filename+".pickle"))
        if os.path.exists( filename_pick):
          if self.verbose:
            print "[*]", "Loading csv pickle from %s" % filename_pick
          f = open( filename_pick, "rb")
          data = cPickle.load( f)
          f.close()
        else:
          filename_csv = os.path.realpath( os.path.join( "logs", self.filename))
          if self.verbose: print "[*] Loading %s" % filename_csv
          data = pl2.load2( filename_csv)
          if self.verbose: print "[*] Generating pickle for next time to %s" % filename_pick
          f = open( filename_pick, "wb")
          cPickle.dump( data, f)
          f.close()

      # load our time-series dataframe from csv using pandas library
      self._gox_offline = data["gox"]
      self._ltc_offline = data["ltc"]
      self._ltc_depth_offline = data["ltc_depth"]
      
      # if we're running in non-simulated offline mode, where we just
      # want to run through our historical price data as quickly as
      # possible, then we build a range of dates that we will walk through
      if self.warp == True:
        # get our start and end points in our timerange
        start = max( [ self._gox_offline.index[0], self._ltc_offline.index[0]])
        end = max( [ self._gox_offline.index[-1], self._ltc_offline.index[-1]])
        
        # our list of total dates to run through
        # jump to N-seconds intervals (self.sample_secs)
        if self.verbose:
          print "[*]","Building daterange"
        self.logrange = self._daterange( start, end, self.sample_secs)
        
        # we're going to need to iterate through this one at a time ...
        # get new values, calculate indicators, train, repeat, so we'll
        # need to keep track of where we are
        self.logrange_n = 0
        if self.verbose:
          print "[*] Dates from", start, "to", end

      # otherwise we pretend we're live (slow so we can watch it IRT)
      else:
        # find out which has the earliest starting date. We will use
        # this to calculate our timedelta. In the future when we want
        # to check the price, we will use this delta compared to current
        # time to grab the proper simulated price
        # (we use max here so we don't get any initial NaN prices if possible)
        self.delta = datetime.datetime.today() - max( [ self._gox_offline.index[0], 
                                                        self._ltc_offline.index[0]])

        if self.verbose: print "[*] Timedelta: %s" % self.delta
        
    #####################################
    #                                   #
    #            C O I N S              #
    #                                   #
    ##################################### 

    # prepare instant if necessary
    if self.instant:
      # seed prices with midprice
      if self.ltc_opts["calc_mid"]:
        filename = os.path.realpath( os.path.join( "logs", 
                     self.filename+".midprices.pickle"))
        # if midprices pickle doesn't exist, we need to generate it ... this is slow as fuck
        # so we really want to have this preloaded
        if os.path.exists( filename):
          if self.verbose: print "[*]", "Loading midprices from %s" % filename
          f = open( filename, "rb")
          bas = cPickle.load( f)
        else:
          if self.verbose: print "[*]","Calculating midprices ..."
          bas = [ pl2.bid_ask(self._ltc_depth_offline.ix[i][0], 
                  avg=True) for i in xrange( len( self._ltc_depth_offline))]
          f = open( filename, "wb")
          if self.verbose: print "[*]", "Saving midprices to %s" % filename
          cPickle.dump( bas, f)
        self.ltc_opts["instant"] = pd.DataFrame( {"lastprice":bas}, 
                                     index=[self._ltc_depth_offline.index])
      # otherwise hand it lastprice
      else:
        self.ltc_opts["instant"] = self._ltc_offline

    self.ltc = Coin( debug=self.ltc_opts["debug"],
                     relative=self.ltc_opts["relative"],
                     calc_rolling=self.ltc_opts["calc_rolling"],
                     rolling=self.ltc_opts["rolling"],
                     calc_mid=self.ltc_opts["calc_mid"], 
                     calc_ohlc=self.ltc_opts["calc_ohlc"],
                     ohlc=self.ltc_opts["ohlc"], 
                     calc_indicators=self.ltc_opts["calc_indicators"],
                     indicators=self.ltc_opts["indicators"],
                     calc_std=self.ltc_opts["calc_std"], 
                     std=self.ltc_opts["std"],
                     calc_crt=self.ltc_opts["calc_crt"], 
                     crt=self.ltc_opts["crt"],
                     instant=self.ltc_opts["instant"], 
                     time_str=self.ltc_opts["time_str"],
                     verbose=self.verbose)

    # for gox, all I want to calculate is the EMA of the last prices ...
    # I chose last price, not mid, because I think that a lot of people
    # are trading based on the last price ticker, not where the market
    # really is.
    # prepare instant if necessary
    # prepare instant if necessary
    if self.instant:
      # seed prices with midprice
      if self.gox_opts["calc_mid"]:
        if self.verbose: print "[*]","Calculating midprices ..."
        bas = [ pl2.bid_ask(self._gox_depth_offline.ix[i][0], avg=True) for i in xrange( len( self._gox_depth_offline))]
        self.gox_opts["instant"] = pd.DataFrame( {"lastprice":bas}, index=[self._gox_depth_offline.index])
      # otherwise hand it lastprice
      else:
        self.gox_opts["instant"] = self._gox_offline

    self.gox = Coin( debug=self.gox_opts["debug"], 
                     relative=self.gox_opts["relative"],
                     calc_rolling=self.gox_opts["calc_rolling"], 
                     rolling=self.gox_opts["rolling"],
                     calc_mid=self.gox_opts["calc_mid"], 
                     calc_ohlc=self.gox_opts["calc_ohlc"],
                     ohlc=self.gox_opts["ohlc"], 
                     calc_indicators=self.gox_opts["calc_indicators"],
                     indicators=self.gox_opts["indicators"],
                     calc_std=self.gox_opts["calc_std"], 
                     std=self.gox_opts["std"],
                     calc_crt=self.gox_opts["calc_crt"], 
                     crt=self.gox_opts["crt"],
                     instant=self.gox_opts["instant"], 
                     time_str=self.gox_opts["time_str"],
                     verbose=self.verbose)

Example 73

View license
  def update( self):
    """ Grab most recent prices from on/offline and append them to
        our exchange data structures.
    """
    #######################################################
    #               -- SIMULATION MODE --                 #
    #######################################################
    # simulation mode. pull most recent price from our logs and
    # append if different
    if self.live == False:
      #######################################################
      #          -- REAL TIME SIMULATION MODE --            #
      #######################################################
      # if warp is false, we will pretend this is realtime and
      # grab prices from our logs using our timedelta
      if self.warp == False:
        # calculate our timedelta from NOW!!
        adjusted_t = datetime.datetime.today() - self.delta

        # Get our last prices from the logs
        last_gox , last_ltc , last_ltc_depth = self._offline_prices( adjusted_t)

        # make sure we got a timeseries object back, otherwise we
        # hit the end of the log
        if( type(last_gox) != pd.Series or
            type(last_ltc) != pd.Series or
            type(last_ltc_depth) != pd.Series):
          if self.verbose: print "[!]", "End of log."
          return False
        # we have values, so add them to each coin
        else:
          # give coins new price changes ... them bitches'll do the rest
          self.gox.add( last_gox[0], last_gox.name)
          # bid-ask avg for LTC only
          ba = pl2.bid_ask( last_ltc_depth[0])
          self.ltc.add( last_ltc[0], last_ltc.name, ba=ba)
          return True
      #######################################################
      #               -- FAST MODE --                       #
      #######################################################
      # otherwise, we'll grab our next price from the index
      else:
        # r we about to do something stupid? (hit end of the fucking log)
        if self.logrange_n >= len(self.logrange):
          if self.verbose: print "[!]", "End of log."
          return False
        # NO!
        else:
          # get our next date in our time index & grab the prices
          t = self.logrange[self.logrange_n]
          if self._debug:
            print "\n_update"
            print "t:", t
            print "logrange:", self.logrange_n

          last_gox, last_ltc, last_ltc_depth = self._offline_prices( t)
          # get LTC market data (bid ask)
          ba = pl2.bid_ask( last_ltc_depth[0])
          
          # upd8 fuk'n coinz
          if self._debug:
            print "\n_update"
            print "\nltc"
            print "last_ltc:", last_ltc[0], last_ltc.name
            print "ba:", ba
          
          self.ltc.add( last_ltc[0], last_ltc.name, ba=ba)
          if self._debug:
            print "\ngox"
            print "last_gox:", last_gox[0], last_gox.name
          self.gox.add( last_gox[0], last_gox.name)

          # increment for the next fucking time
          self.logrange_n += 1
          return True

Example 74

Project: combined-pvalues
Source File: pipeline.py
View license
def pipeline(col_num, step, dist, acf_dist, prefix, threshold, seed,
        bed_files, mlog=True, region_filter_p=1, region_filter_n=None,
        genome_control=False, db=None, use_fdr=True):
    sys.path.insert(0, op.join(op.dirname(__file__), ".."))
    from cpv import acf, slk, fdr, peaks, region_p, stepsize, filter
    from cpv._common import genome_control_adjust, genomic_control, bediter
    import operator


    if step is None:
        step = min(acf_dist, stepsize.stepsize(bed_files, col_num))
        print >>sys.stderr, "calculated stepsize as: %i" % step

    lags = range(1, acf_dist, step)
    lags.append(lags[-1] + step)

    prefix = prefix.rstrip(".")
    putative_acf_vals = acf.acf(bed_files, lags, col_num, simple=False,
                                mlog=mlog)
    acf_vals = []
    # go out to max requested distance but stop once an autocorrelation
    # < 0.05 is added.
    for a in putative_acf_vals:
        # a is ((lmin, lmax), (corr, N))
        # this heuristic seems to work. stop just above the 0.08 correlation
        # lag.
        if a[1][0] < 0.04 and len(acf_vals) > 2: break
        acf_vals.append(a)
        if a[1][0] < 0.04 and len(acf_vals): break

    # save the arguments that this was called with.
    with open(prefix + ".args.txt", "w") as fh:
        print >>fh, " ".join(sys.argv[1:]) + "\n"
        import datetime
        print >>fh, "date: %s" % datetime.datetime.today()
        from .__init__ import __version__
        print >>fh, "version:", __version__

    with open(prefix + ".acf.txt", "w") as fh:
        acf_vals = acf.write_acf(acf_vals, fh)
        print >>sys.stderr, "wrote: %s" % fh.name
    print >>sys.stderr, "ACF:\n", open(prefix + ".acf.txt").read()

    spvals, opvals = [], []
    with ts.nopen(prefix + ".slk.bed.gz", "w") as fhslk:
        fhslk.write('#chrom\tstart\tend\tp\tregion-p\n')
        for row in slk.adjust_pvals(bed_files, col_num, acf_vals):
            fhslk.write("%s\t%i\t%i\t%.4g\t%.4g\n" % row)
            opvals.append(row[-2])
            spvals.append(row[-1])

    print >>sys.stderr, "# original lambda: %.2f" % genomic_control(opvals)
    del opvals

    gc_lambda = genomic_control(spvals)
    print >>sys.stderr, "wrote: %s with lambda: %.2f" % (fhslk.name, gc_lambda)

    if genome_control:
        fhslk = ts.nopen(prefix + ".slk.gc.bed.gz", "w")
        adj = genome_control_adjust([d['p'] for d in bediter(prefix + ".slk.bed.gz", -1)])
        for i, line in enumerate(ts.nopen(prefix + ".slk.bed.gz")):
            print >>fhslk, "%s\t%.5g" % (line.rstrip("\r\n"), adj[i])

        fhslk.close()
        print >>sys.stderr, "wrote: %s" % fhslk.name

    with ts.nopen(prefix + ".fdr.bed.gz", "w") as fh:
        fh.write('#chrom\tstart\tend\tp\tregion-p\tregion-q\n')
        for bh, l in fdr.fdr(fhslk.name, -1):
            fh.write("%s\t%.4g\n" % (l.rstrip("\r\n"), bh))
        print >>sys.stderr, "wrote: %s" % fh.name
    fregions = prefix + ".regions.bed.gz"
    with ts.nopen(fregions, "w") as fh:
        list(peaks.peaks(prefix + ".fdr.bed.gz", -1 if use_fdr else -2, threshold, seed,
            dist, fh, operator.le))
    n_regions = sum(1 for _ in ts.nopen(fregions))
    print >>sys.stderr, "wrote: %s (%i regions)" % (fregions, n_regions)
    if n_regions == 0:
        sys.exit()

    with ts.nopen(prefix + ".regions-p.bed.gz", "w") as fh:
        N = 0
        fh.write("#chrom\tstart\tend\tmin_p\tn_probes\tz_p\tz_sidak_p\n")
        # use -2 for original, uncorrected p-values in slk.bed
        for region_line, slk_p, slk_sidak_p, sim_p in region_p.region_p(
                               prefix + ".slk.bed.gz",
                               prefix + ".regions.bed.gz", -2,
                               step):
            fh.write("%s\t%.4g\t%.4g\n" % (region_line, slk_p, slk_sidak_p))
            fh.flush()
            N += int(slk_sidak_p < 0.05)
        print >>sys.stderr, "wrote: %s, (regions with corrected-p < 0.05: %i)" \
                % (fh.name, N)

    regions_bed = fh.name
    #if all(h in header for h in ('t', 'start', 'end')):
    if region_filter_n is None: region_filter_n = 0
    with ts.nopen(prefix + ".regions-t.bed", "w") as fh:
        N = 0
        for i, toks in enumerate(filter.filter(bed_files[0],
            regions_bed, p_col_name=col_num)):
            if i == 0: toks[0] = "#" + toks[0]
            else:
                if float(toks[6]) > region_filter_p: continue
                if int(toks[4]) < region_filter_n: continue
                #if region_filter_t and "/" in toks[7]:
                #    # t-pos/t-neg. if the lower one is > region_filter_t?
                #    vals = map(int, toks[7].split("/"))
                #    if min(vals) > region_filter_t: continue

                N += 1
            print >>fh, "\t".join(toks)
        print >>sys.stderr, ("wrote: %s, (regions with region-p "
                            "< %.3f and n-probes >= %i: %i)") \
                % (fh.name, region_filter_p, region_filter_n, N)

    try:
        from cpv import manhattan
        regions = manhattan.read_regions(fh.name)

        manhattan.manhattan(prefix + ".slk.bed.gz", 3, prefix.rstrip(".") + ".manhattan.png",
                         False, ['#959899', '#484B4C'], "", False, None,
                         regions=regions, bonferonni=False)
    except ImportError:
        pass # they dont have matplotlib


    if db is not None:
        from cruzdb import Genome
        g = Genome(db)
        lastf = fh.name
        with open(prefix + ".anno.%s.bed" % db, "w") as fh:
            fh.write('#')
            g.annotate(lastf, ("refGene", "cpgIslandExt"), out=fh,
                    feature_strand=True, parallel=len(spvals) > 500)
        print >>sys.stderr, "wrote: %s annotated with %s" % (fh.name, db)

Example 75

Project: django-timepiece
Source File: views.py
View license
    def get_context_data(self, **kwargs):
        context = super(ProjectTimesheet, self).get_context_data(**kwargs)
        project = self.object
        year_month_form = YearMonthForm(self.request.GET or None)
        if self.request.GET and year_month_form.is_valid():
            from_date, to_date = year_month_form.save()
        else:
            date = utils.add_timezone(datetime.datetime.today())
            from_date = utils.get_month_start(date).date()
            to_date = from_date + relativedelta(months=1)
        entries_qs = Entry.objects
        entries_qs = entries_qs.timespan(from_date, span='month').filter(
            project=project
        )
        extra_values = ('start_time', 'end_time', 'comments', 'seconds_paused',
                        'id', 'location__name', 'project__name',
                        'activity__name', 'status')

        month_entries = entries_qs.date_trunc('month', extra_values).order_by('start_time')
        if month_entries:
            format_totals(month_entries, "hours")

        total = entries_qs.aggregate(hours=Sum('hours'))['hours']
        if total:
            total = "{0:.2f}".format(total)
        user_entries = entries_qs.order_by().values('user__first_name', 'user__last_name')
        user_entries = user_entries.annotate(sum=Sum('hours')).order_by('-sum')
        if user_entries:
            format_totals(user_entries)
        activity_entries = entries_qs.order_by().values('activity__name')
        activity_entries = activity_entries.annotate(sum=Sum('hours')).order_by('-sum')
        if activity_entries:
            format_totals(activity_entries)

        context.update({
            'project': project,
            'year_month_form': year_month_form,
            'from_date': from_date,
            'to_date': to_date - relativedelta(days=1),
            'entries': month_entries,
            'total': total,
            'user_entries': user_entries,
            'activity_entries': activity_entries,
        })
        return context

Example 76

View license
    def export_to_csv(self, model_name):
        self.header('Exporting models ...')

        today = datetime.datetime.today()
        model = get_model('calaccess_campaign_browser', model_name)

        fieldnames = [f.name for f in model._meta.fields] + [
            'committee_name', 'filer_name', 'filer_id', 'filer_id_raw']

        relation_names = [f.name for f in model._meta.fields] + [
            'committee__name',
            'committee__filer__name',
            'committee__filer__id',
            'committee__filer__filer_id_raw'
        ]

        filename = '{}-{}-{}-{}.csv'.format(
            today.year,
            today.month,
            today.day,
            model_name.lower()
        )
        filepath = os.path.join(self.data_dir, filename)

        self.header('  Exporting {} model ...'.format(model_name.capitalize()))

        with open(filepath, 'wb') as csvfile:
            writer = csv.writer(csvfile, delimiter="\t")
            writer.writerow(fieldnames)

            if model_name != 'summary':
                for cycle in Cycle.objects.all():
                    self.log('    Looking at cycle {} ...'.format(cycle.name))
                    rows = model.objects.filter(cycle=cycle)\
                        .exclude(is_duplicate=True)\
                        .values_list(*relation_names)

                    if not rows:
                        self.failure('      No data for {}'.format(cycle.name))

                    else:
                        rows = self.encoded(rows)
                        writer.writerows(rows)
                        self.success('      Added {} {} data'.format(
                            cycle.name, model_name))
            else:
                rows = self.encoded(model.objects.values_list())
                writer.writerows(rows)

        self.success('  Exported {}!'.format(model_name.capitalize()))

Example 77

Project: c2c-rd-addons
Source File: c2c_budget_item.py
View license
    def __compute_real_sum(self, cr, uid, ids, field_names, arg=None, context=None,
                  query='', query_params=''):
        """ compute the balance for the provided
        budget_item_ids
        Arguments:
        `ids`: account ids
        `field_names`: the fields to compute (a list of any of
                       'balance', 'debit' and 'credit')
        `arg`: unused fields.function stuff
        `query`: additional query filter (as a string)
        `query_params`: parameters for the provided query string
                        (__compute will handle their escaping) as a
                        tuple
        """
        mapping = {
            'balance_real': "sum(credit) - sum(debit) as balance_real" ,
        }

        #get all the necessary accounts
        children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)

        #compute for each account the balance/debit/credit from the move lines
        accounts = {}
        if children_and_consolidated:
            # FIXME allow only fy and period filters
            # remove others filters from context or raise error
            #aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)

            #wheres = [""]
            #if query.strip():
            #    wheres.append(query.strip())
            #if aml_query.strip():
            #    wheres.append(aml_query.strip())
            #filters = " AND ".join(wheres)
            #filters = ' AND period_id in ( select id from account_period where fiscalyear_id = %s ) ' % context.get('fiscalyear', False) 
            if context.get('periods', False):
                periods = context.get('periods', False)
            else:
               # default if startet without form
               date = time.strftime('%Y-%m-%d')
               date2a = datetime.datetime.today() + relativedelta(months=+1)
               date2 = date2a.strftime('%Y-%m-%d')
               fiscalyear_pool = self.pool.get('account.fiscalyear')
               fy_id = fiscalyear_pool.search(cr, uid, [('date_start','<=',date), ('date_stop','>=',date)])
               period_pool = self.pool.get('account.period')
               periods = period_pool.search(cr, uid, [('fiscalyear_id','in',fy_id), ('date_stop','<=',date2)])

            # FIXME - tuple must not return ',' if only one period is available - period_id in ( p,) should be period_id in ( p )
            filters = ' AND period_id in (%s) ' % (','.join(map(str,periods)) )
            # IN might not work ideally in case there are too many
            # children_and_consolidated, in that case join on a
            # values() e.g.:
            # SELECT l.account_id as id FROM account_move_line l
            # INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
            # ON l.account_id = tmp.id
            # or make _get_children_and_consol return a query and join on that
            if not query_params:
                 query_params = 'null'
            request = ("SELECT i.id as id, " +\
                       ', '.join(map(mapping.__getitem__, field_names)) +
                       " FROM account_account_period_sum l," \
                       "      c2c_budget_item i," \
                       "      c2c_budget_item_account_rel r " \
                       " WHERE l.account_id = r.account_id " \
                       "   AND i.id = r.budget_item_id " \
             #          "   AND i.id IN (%s) " \
                            + filters +
                       " GROUP BY i.id") #% (query_params)
            #params = (tuple(children_and_consolidated),) + query_params
            self._logger.error('children and consolidated FGF:  %s/ %s', children_and_consolidated, query_params)
            self._logger.error('children and consolidated FGF:  %s/ %s ', ', '.join(map(str,children_and_consolidated)), query_params)
            params = (', '.join(map(str,children_and_consolidated))) 
            cr.execute(request, params)
            #                          'Status: %s'%cr.statusmessage)

            for res in cr.dictfetchall():
                accounts[res['id']] = res

            # consolidate accounts with direct children
            children_and_consolidated.reverse()
            brs = list(self.browse(cr, uid, children_and_consolidated, context=context))

            sums = {}
            currency_obj = self.pool.get('res.currency')
            while brs:
                current = brs[0]
#                can_compute = True
#                for child in current.children_ids:
#                    if child.id not in sums:
#                        can_compute = False
#                        try:
#                            brs.insert(0, brs.pop(brs.index(child)))
#                        except ValueError:
#                            brs.insert(0, child)
#                if can_compute:
                brs.pop(0)
                for fn in field_names:
                    sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
                    for child in current.children_ids:
                        if child.company_id.currency_id.id == current.company_id.currency_id.id:
                            #FIXME Data error ?
                            try:
                               sums[current.id][fn] += sums[child.id][fn]
                            except:
                               print ' sums[current.id][fn] += sums[child.id][fn]'
                        else:
                            sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
            res = {}
            null_result = dict((fn, 0.0) for fn in field_names)
            for id in ids:
                res[id] = sums.get(id, null_result)
            return res

Example 78

Project: c2c-rd-addons
Source File: c2c_budget_item.py
View license
    def __compute_budget_sum(self, cr, uid, ids, field_names, arg=None, context=None,
                  query='', query_params=()):
        """ compute the balance for the provided
        budget_item_ids
        Arguments:
        `ids`: account ids
        `field_names`: the fields to compute (a list of any of
                       'balance', 'debit' and 'credit')
        `arg`: unused fields.function stuff
        `query`: additional query filter (as a string)
        `query_params`: parameters for the provided query string
                        (__compute will handle their escaping) as a
                        tuple
        """
        mapping = {
            'balance_budget': "sum(amount) as balance_budget" ,
        }

        #get all the necessary accounts
        children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)

        #compute for each account the balance/debit/credit from the move lines
        accounts = {}
        if children_and_consolidated:
            # FIXME allow only fy and period filters
            # remove others filters from context or raise error
            #aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)

            #wheres = [""]
            #if query.strip():
            #    wheres.append(query.strip())
            #if aml_query.strip():
            #    wheres.append(aml_query.strip())
            #filters = " AND ".join(wheres)
            #filters = ' AND period_id in ( select id from account_period where fiscalyear_id = %s ) ' % context.get('fiscalyear', False)
            if context.get('periods', False):
                periods = context.get('periods', False)
            else:
               # default if startet without form
               date = time.strftime('%Y-%m-%d')
               date2a = datetime.datetime.today() + relativedelta(months=+1)
               date2 = date2a.strftime('%Y-%m-%d')
               #date2 = (datetime.today() + relativedelta(months=+1)).strftime('%Y-%m-%d')
               #date2 = (datetime.today() + relativedelta(years=-1)).strftime('%Y-%m-%d')
               fiscalyear_pool = self.pool.get('account.fiscalyear')
               fy_id = fiscalyear_pool.search(cr, uid, [('date_start','<=',date), ('date_stop','>=',date)])
               period_pool = self.pool.get('account.period')
               periods = period_pool.search(cr, uid, [('fiscalyear_id','in',fy_id),('date_stop','<=',date2)])

            # FIXME - tuple must not return ',' if only one period is available - period_id in ( p,) should be period_id in ( p )
            filters = ' AND period_id in (%s) ' % (','.join(map(str,periods)) )
            self._logger.error('periods FGF: %s %s', periods, tuple(periods))
            # IN might not work ideally in case there are too many
            # children_and_consolidated, in that case join on a
            # values() e.g.:
            # SELECT l.account_id as id FROM account_move_line l
            # INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
            # ON l.account_id = tmp.id
            # or make _get_children_and_consol return a query and join on that
            if not query_params:
                query_params = '%'
            request = ("SELECT l.budget_item_id as id, " +\
                       ', '.join(map(mapping.__getitem__, field_names)) +
                       " FROM c2c_budget_line l" \
                       " WHERE l.budget_item_id >0 " 
                            + filters +
                       " GROUP BY l.budget_item_id") 
            params = (tuple(children_and_consolidated),) 
            cr.execute(request, params)
            #                          'Status: %s'%cr.statusmessage)

            for res in cr.dictfetchall():
                accounts[res['id']] = res

            # consolidate accounts with direct children
            children_and_consolidated.reverse()
            brs = list(self.browse(cr, uid, children_and_consolidated, context=context))

            sums = {}
            currency_obj = self.pool.get('res.currency')
            while brs:
                current = brs[0]
#                can_compute = True
#                for child in current.children_ids:
#                    if child.id not in sums:
#                        can_compute = False
#                        try:
#                            brs.insert(0, brs.pop(brs.index(child)))
#                        except ValueError:
#                            brs.insert(0, child)
#                if can_compute:
                brs.pop(0)
                for fn in field_names:
                    sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
                    for child in current.children_ids:
                        if child.company_id.currency_id.id == current.company_id.currency_id.id:
                            #FIXME Data error ?
                            try:
                               sums[current.id][fn] += sums[child.id][fn]
                            except:
                               self._logger.debug('sums[current.id][fn] += sums[child.id][fn] `%s` `%s`', current.id, child.id)
                        else:
                            sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
            res = {}
            null_result = dict((fn, 0.0) for fn in field_names)
            for id in ids:
                res[id] = sums.get(id, null_result)
            return res

Example 79

Project: c2c-rd-addons
Source File: c2c_budget_item.py
View license
    def __compute_real_sum(self, cr, uid, ids, field_names, arg=None, context=None,
                  query='', query_params=''):
        """ compute the balance for the provided
        budget_item_ids
        Arguments:
        `ids`: account ids
        `field_names`: the fields to compute (a list of any of
                       'balance', 'debit' and 'credit')
        `arg`: unused fields.function stuff
        `query`: additional query filter (as a string)
        `query_params`: parameters for the provided query string
                        (__compute will handle their escaping) as a
                        tuple
        """
        mapping = {
            'balance_real': "sum(credit) - sum(debit) as balance_real" ,
        }

        #get all the necessary accounts
        children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)

        #compute for each account the balance/debit/credit from the move lines
        accounts = {}
        if children_and_consolidated:
            # FIXME allow only fy and period filters
            # remove others filters from context or raise error
            #aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)

            #wheres = [""]
            #if query.strip():
            #    wheres.append(query.strip())
            #if aml_query.strip():
            #    wheres.append(aml_query.strip())
            #filters = " AND ".join(wheres)
            #filters = ' AND period_id in ( select id from account_period where fiscalyear_id = %s ) ' % context.get('fiscalyear', False) 
            if context.get('periods', False):
                periods = context.get('periods', False)
            else:
               # default if startet without form
               date = time.strftime('%Y-%m-%d')
               date2a = datetime.datetime.today() + relativedelta(months=+1)
               date2 = date2a.strftime('%Y-%m-%d')
               fiscalyear_pool = self.pool.get('account.fiscalyear')
               fy_id = fiscalyear_pool.search(cr, uid, [('date_start','<=',date), ('date_stop','>=',date)],context=context)
               period_pool = self.pool.get('account.period')
               periods = period_pool.search(cr, uid, [('fiscalyear_id','in',fy_id), ('date_stop','<=',date2)])

            # FIXME - tuple must not return ',' if only one period is available - period_id in ( p,) should be period_id in ( p )
            filters = ' AND period_id in (%s) ' % (','.join(map(str,periods)) )
            company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
            filters += ' AND l.company_id = %s ' % ( company_id)
            # IN might not work ideally in case there are too many
            # children_and_consolidated, in that case join on a
            # values() e.g.:
            # SELECT l.account_id as id FROM account_move_line l
            # INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
            # ON l.account_id = tmp.id
            # or make _get_children_and_consol return a query and join on that
            if not query_params:
                 query_params = 'null'
            request = ("SELECT i.id as id, " +\
                       ', '.join(map(mapping.__getitem__, field_names)) +
                       " FROM account_account_period_sum l," \
                       "      c2c_budget_item i," \
                       "      c2c_budget_item_account_rel r " \
                       " WHERE l.account_id = r.account_id " \
                       "   AND i.id = r.budget_item_id " \
             #          "   AND i.id IN (%s) " \
                            + filters +
                       " GROUP BY i.id") #% (query_params)
            #params = (tuple(children_and_consolidated),) + query_params
            self._logger.error('children and consolidated FGF:  %s/ %s', children_and_consolidated, query_params)
            self._logger.error('children and consolidated FGF:  %s/ %s ', ', '.join(map(str,children_and_consolidated)), query_params)
            params = (', '.join(map(str,children_and_consolidated))) 
            cr.execute(request, params)
            #                          'Status: %s'%cr.statusmessage)

            for res in cr.dictfetchall():
                accounts[res['id']] = res

            # consolidate accounts with direct children
            children_and_consolidated.reverse()
            brs = list(self.browse(cr, uid, children_and_consolidated, context=context))

            sums = {}
            currency_obj = self.pool.get('res.currency')
            while brs:
                current = brs[0]
#                can_compute = True
#                for child in current.children_ids:
#                    if child.id not in sums:
#                        can_compute = False
#                        try:
#                            brs.insert(0, brs.pop(brs.index(child)))
#                        except ValueError:
#                            brs.insert(0, child)
#                if can_compute:
                brs.pop(0)
                for fn in field_names:
                    sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
                    for child in current.children_ids:
                        #if child.company_id.currency_id.id == current.company_id.currency_id.id:
                            #FIXME Data error ?
                            # sums include only lines with postings, where as current inćluds all accounts
                           if sums.get(current.id) and sums.get(child.id):
                           #try:
                               sums[current.id][fn] += sums[child.id][fn]
                               #print 'OK sums[current.id][fn] += sums[child.id][fn] %s %s' % ( current.id , child.id)
                           #except:
                           #    print 'NOK sums[current.id][fn] += sums[child.id][fn] %s %s' % ( current.id , child.id)
                        #else:
                        #    sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
        res = {}
        null_result = dict((fn, 0.0) for fn in field_names)
        for id in ids:
                res[id] = sums.get(id, null_result)
        return res

Example 80

Project: c2c-rd-addons
Source File: c2c_budget_item.py
View license
    def __compute_budget_sum(self, cr, uid, ids, field_names, arg=None, context=None,
                  query='', query_params=()):
        """ compute the balance for the provided
        budget_item_ids
        Arguments:
        `ids`: account ids
        `field_names`: the fields to compute (a list of any of
                       'balance', 'debit' and 'credit')
        `arg`: unused fields.function stuff
        `query`: additional query filter (as a string)
        `query_params`: parameters for the provided query string
                        (__compute will handle their escaping) as a
                        tuple
        """
        mapping = {
            'balance_budget': "sum(amount) as balance_budget" ,
        }

        #get all the necessary accounts
        children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)

        #compute for each account the balance/debit/credit from the move lines
        accounts = {}
        if children_and_consolidated:
            # FIXME allow only fy and period filters
            # remove others filters from context or raise error
            #aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)

            #wheres = [""]
            #if query.strip():
            #    wheres.append(query.strip())
            #if aml_query.strip():
            #    wheres.append(aml_query.strip())
            #filters = " AND ".join(wheres)
            #filters = ' AND period_id in ( select id from account_period where fiscalyear_id = %s ) ' % context.get('fiscalyear', False)
            if context.get('periods_budget', False):
                periods = context['periods_budget']
            else:
               # default if startet without form
               date = time.strftime('%Y-%m-%d')
               date2a = datetime.datetime.today() + relativedelta(months=+1)
               date2 = date2a.strftime('%Y-%m-%d')
               #date2 = (datetime.today() + relativedelta(months=+1)).strftime('%Y-%m-%d')
               #date2 = (datetime.today() + relativedelta(years=-1)).strftime('%Y-%m-%d')
               fiscalyear_pool = self.pool.get('account.fiscalyear')
               fy_id = fiscalyear_pool.search(cr, uid, [('date_start','<=',date), ('date_stop','>=',date)],context=context)
               period_pool = self.pool.get('account.period')
               periods = period_pool.search(cr, uid, [('fiscalyear_id','in',fy_id),('date_stop','<=',date2)])

            # FIXME - tuple must not return ',' if only one period is available - period_id in ( p,) should be period_id in ( p )
            filters = ' AND period_id in (%s) ' % (','.join(map(str,periods)))
            # if no budget versions are defined we take all budget data of the matching periods
            if context.get('budget_version_ids'):
                budget_version_ids = context['budget_version_ids']
                filters += ' AND budget_version_id n ( %s ) ' % ( budget_version_ids )
            self._logger.error('periods FGF: %s %s', periods, tuple(periods))
            # IN might not work ideally in case there are too many
            # children_and_consolidated, in that case join on a
            # values() e.g.:
            # SELECT l.account_id as id FROM account_move_line l
            # INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
            # ON l.account_id = tmp.id
            # or make _get_children_and_consol return a query and join on that
            if not query_params:
                query_params = '%'
            request = ("SELECT l.budget_item_id as id, " +\
                       ', '.join(map(mapping.__getitem__, field_names)) +
                       " FROM c2c_budget_line l" \
                       " WHERE l.budget_item_id >0 " 
                            + filters +
                       " GROUP BY l.budget_item_id") 
            params = (tuple(children_and_consolidated),) 
            cr.execute(request, params)
            #                          'Status: %s'%cr.statusmessage)

            for res in cr.dictfetchall():
                accounts[res['id']] = res

            # consolidate accounts with direct children
            children_and_consolidated.reverse()
            brs = list(self.browse(cr, uid, children_and_consolidated, context=context))

            sums = {}
            currency_obj = self.pool.get('res.currency')
            while brs:
                current = brs[0]
#                can_compute = True
#                for child in current.children_ids:
#                    if child.id not in sums:
#                        can_compute = False
#                        try:
#                            brs.insert(0, brs.pop(brs.index(child)))
#                        except ValueError:
#                            brs.insert(0, child)
#                if can_compute:
                brs.pop(0)
                for fn in field_names:
                    sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
                    for child in current.children_ids:
                        #if child.company_id.currency_id.id == current.company_id.currency_id.id:
                            #FIXME Data error ?
                            try:
                               sums[current.id][fn] += sums[child.id][fn]
                            except:
                               self._logger.debug('sums[current.id][fn] += sums[child.id][fn] `%s` `%s`', current.id, child.id)
                        #else:
                        #    sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
        res = {}
        null_result = dict((fn, 0.0) for fn in field_names)
        for id in ids:
            res[id] = sums.get(id, null_result)
        return res

Example 81

Project: c2c-rd-addons
Source File: chricar_account.py
View license
    def __compute_prev_sum(self, cr, uid, ids, field_names, arg=None, context=None,
                  query=None, query_params=None):
        """ compute the balance, debit and/or credit for the provided
        account ids
        Arguments:
        `ids`: account ids
        `field_names`: the fields to compute (a list of any of
                       'balance', 'debit' and 'credit')
        `arg`: unused fields.function stuff
        `query`: additional query filter (as a string)
        `query_params`: parameters for the provided query string
                        (__compute will handle their escaping) as a
                        tuple
        """
        mapping = {
             'balance_prev_sum': "sum(debit) - sum(credit) as balance_prev_sum" ,
        }
        #get all the necessary accounts
        children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)
        self._logger.debug('Children: `%s`', children_and_consolidated)

        #compute for each account the balance/debit/credit from the move lines
        accounts = {}
        if children_and_consolidated :
            # FIXME allow only fy and period filters
            # remove others filters from context or raise error
            #aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)

            #wheres = [""]
            #if query.strip():
            #    wheres.append(query.strip())
            #if aml_query.strip():
            #    wheres.append(aml_query.strip())
            #filters = " AND ".join(wheres)
            if context.get('periods_prev', False):
                periods_prev = context.get('periods_prev', False)
            else:
                # default if startet without form
                date = (datetime.today() + relativedelta(years=-1)).strftime('%Y-%m-%d')
                fiscalyear_pool = self.pool.get('account.fiscalyear')
                fy_id = fiscalyear_pool.search(cr, uid, [('date_start','<=',date), ('date_stop','>=',date)])
                period_pool = self.pool.get('account.period')
                periods_prev = period_pool.search(cr, uid, [('fiscalyear_id','in',fy_id),('date_start','<=',date)])
            if periods_prev and len(periods_prev) > 0:
                filters = ' AND period_id in (%s) ' % (','.join(map(str,periods_prev)) )
            else:
                filters = ' AND 1=2'
            # IN might not work ideally in case there are too many
            # children_and_consolidated, in that case join on a
            # values() e.g.:
            # SELECT l.account_id as id FROM account_move_line l
            # INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
            # ON l.account_id = tmp.id
            # or make _get_children_and_consol return a query and join on that
            if not query_params:
                query_params = ''
            params = (', '.join(map(str,children_and_consolidated)))
            request = ("SELECT l.account_id as id, " +\
                       ', '.join(map(mapping.__getitem__, field_names)) +
                       " FROM account_account_period_sum l" \
                       " WHERE l.account_id IN (%s) " \
                            + filters +
                       " GROUP BY l.account_id") % (params)
            #params = (tuple(children_and_consolidated),)
            #params = (tuple(children_and_consolidated),) + query_params
            #params = (', '.join(map(str,children_and_consolidated)))
            self._logger.debug('Request: `%s`', request)
            self._logger.debug('Params: `%s`', params)
            cr.execute(request)
            self._logger.debug('Status: `%s`', cr.statusmessage)

            for res in cr.dictfetchall():
                accounts[res['id']] = res

            # consolidate accounts with direct children
            children_and_consolidated.reverse()
            brs = list(self.browse(cr, uid, children_and_consolidated, context=context))

            sums = {}
            currency_obj = self.pool.get('res.currency')
            while brs:
                current = brs[0]
#                can_compute = True
#                for child in current.child_id:
#                    if child.id not in sums:
#                        can_compute = False
#                        try:
#                            brs.insert(0, brs.pop(brs.index(child)))
#                        except ValueError:
#                            brs.insert(0, child)
#                if can_compute:
                brs.pop(0)
                for fn in field_names:
                    sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
                    for child in current.child_id:
                        if child.company_id.currency_id.id == current.company_id.currency_id.id:
                            sums[current.id][fn] += sums[child.id][fn]
                        else:
                            sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
            res = {}
            null_result = dict((fn, 0.0) for fn in field_names)
            for id in ids:
                res[id] = sums.get(id, null_result)
            return res

Example 82

Project: cdr-stats
Source File: views.py
View license
@permission_required('user_profile.search', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def cdr_view(request):
    """List of CDRs

    **Attributes**:

        * ``template`` - cdr/list.html
        * ``form`` - CdrSearchForm

    **Logic Description**:

        * get the call records as well as daily call analytics
          from postgresql according to search parameters
    """
    logging.debug('CDR View Start')
    result = 1  # default min
    switch_id = 0  # default all
    hangup_cause_id = 0  # default all
    destination, destination_type, accountcode = '', '', ''
    direction, duration, duration_type = '', '', ''
    caller_id_number, caller_id_number_type, country_id = '', '', ''
    action = 'tabs-1'
    menu = 'on'
    records_per_page = settings.PAGE_SIZE

    form = CdrSearchForm(request.POST or None)
    if form.is_valid():
        logging.debug('CDR Search View')
        # set session var value
        field_list = ['destination', 'result', 'destination_type', 'accountcode',
                      'caller_id_number', 'caller_id_number_type', 'duration',
                      'duration_type', 'hangup_cause_id', 'switch_id', 'direction',
                      'country_id', 'export_query_var']
        unset_session_var(request, field_list)
        from_date = getvar(request, 'from_date', setsession=False)
        to_date = getvar(request, 'to_date', setsession=False)
        result = getvar(request, 'result', setsession=True)
        destination = getvar(request, 'destination', setsession=True)
        destination_type = getvar(request, 'destination_type', setsession=True)
        accountcode = getvar(request, 'accountcode', setsession=True)
        caller_id_number = getvar(request, 'caller_id_number', setsession=True)
        caller_id_number_type = getvar(request, 'caller_id_number_type', setsession=True)
        duration = getvar(request, 'duration', setsession=True)
        duration_type = getvar(request, 'duration_type', setsession=True)
        direction = getvar(request, 'direction', setsession=True)
        if direction and direction != 'all' and direction != '0':
            request.session['session_direction'] = str(direction)

        switch_id = getvar(request, 'switch_id', setsession=True)
        hangup_cause_id = getvar(request, 'hangup_cause_id', setsession=True)
        records_per_page = getvar(request, 'records_per_page', setsession=True)

        country_id = form.cleaned_data.get('country_id')
        # convert list value in int
        country_id = [int(row) for row in country_id]
        if len(country_id) >= 1:
            request.session['session_country_id'] = country_id

        start_date = ceil_strdate(str(from_date), 'start', True)
        end_date = ceil_strdate(str(to_date), 'end', True)
        converted_start_date = start_date.strftime('%Y-%m-%d %H:%M')
        converted_end_date = end_date.strftime('%Y-%m-%d %H:%M')
        request.session['session_start_date'] = converted_start_date
        request.session['session_end_date'] = converted_end_date

    menu = show_menu(request)

    using_session = False
    # Display a specific page or sort
    if request.GET.get('page') or request.GET.get('sort_by'):
        using_session = True
        from_date = start_date = request.session.get('session_start_date')
        to_date = end_date = request.session.get('session_end_date')
        start_date = ceil_strdate(start_date, 'start', True)
        end_date = ceil_strdate(end_date, 'end', True)

        destination = request.session.get('session_destination')
        destination_type = request.session.get('session_destination_type')
        accountcode = request.session.get('session_accountcode')
        caller_id_number = request.session.get('session_caller_id_number')
        caller_id_number_type = request.session.get('session_caller_id_number_type')
        duration = request.session.get('session_duration')
        duration_type = request.session.get('session_duration_type')
        direction = request.session.get('session_direction')
        switch_id = request.session.get('session_switch_id')
        hangup_cause_id = request.session.get('session_hangup_cause_id')
        result = request.session.get('session_result')
        records_per_page = request.session.get('session_records_per_page')
        country_id = request.session['session_country_id']

    # Set default cause we display page for the first time
    if request.method == 'GET' and not using_session:
        tday = datetime.today()
        from_date = datetime(tday.year, tday.month, 1, 0, 0, 0, 0)
        last_day = ((datetime(tday.year, tday.month, 1, 23, 59, 59, 999999) +
                     relativedelta(months=1)) -
                    relativedelta(days=1)).strftime('%d')
        # to_date = tday.strftime('%Y-%m-' + last_day + ' 23:59')
        to_date = datetime(tday.year, tday.month, int(last_day), 23, 59, 59, 999999)
        start_date = ceil_strdate(str(from_date), 'start', True)
        end_date = ceil_strdate(str(to_date), 'end', True)

        converted_start_date = start_date.strftime('%Y-%m-%d %H:%M')
        converted_end_date = end_date.strftime('%Y-%m-%d %H:%M')
        request.session['session_start_date'] = converted_start_date
        request.session['session_end_date'] = converted_end_date
        request.session['session_result'] = 1
        field_list = [
            'destination', 'destination_type', 'accountcode',
            'caller_id_number', 'caller_id_number_type', 'duration',
            'duration_type', 'hangup_cause_id',
            'switch_id', 'direction', 'country_id']
        unset_session_var(request, field_list)
        request.session['session_records_per_page'] = records_per_page
        request.session['session_country_id'] = ''

    # Define no of records per page
    records_per_page = int(records_per_page)

    sort_col_field_list = ['id', 'caller_id_number', 'destination_number', 'starting_date']
    page_vars = get_pagination_vars(request, sort_col_field_list, default_sort_field='id')

    # Build filter for CDR.object
    kwargs = {}
    if hangup_cause_id and hangup_cause_id != '0':
        kwargs['hangup_cause_id'] = int(hangup_cause_id)

    if switch_id and switch_id != '0':
        kwargs['switch_id'] = int(switch_id)

    if direction and direction != 'all' and direction != "0":
        kwargs['direction'] = direction

    if len(country_id) >= 1 and country_id[0] != 0:
        kwargs['country_id__in'] = country_id

    if start_date:
        kwargs['starting_date__gte'] = start_date

    if end_date:
        kwargs['starting_date__lte'] = end_date

    if destination:
        operator_query = get_filter_operator_str('destination_number', destination_type)
        kwargs[operator_query] = destination

    if duration:
        operator_query = get_filter_operator_int('duration', duration_type)
        kwargs[operator_query] = duration

    if caller_id_number:
        operator_query = get_filter_operator_str('caller_id_number', caller_id_number_type)
        kwargs[operator_query] = caller_id_number

    # user are restricted to their own CDRs
    if not request.user.is_superuser:
        kwargs['user_id'] = request.user.id

    if request.user.is_superuser and accountcode:
        try:
            acc = AccountCode.objects.get(accountcode=accountcode)
            kwargs['user_id'] = acc.user.id
            # on specific accountcode filter let only display that one
            kwargs['accountcode'] = accountcode
        except AccountCode.DoesNotExist:
            # cannot find a user for this accountcode
            pass

    cdrs = CDR.objects.filter(**kwargs).order_by(page_vars['sort_order'])
    page_cdr_list = cdrs[page_vars['start_page']:page_vars['end_page']]
    cdr_count = cdrs.count()

    logging.debug('Create cdr result')

    # store query_var in session without date
    export_kwargs = kwargs.copy()
    if 'starting_date__gte' in export_kwargs:
        export_kwargs['starting_date__gte'] = export_kwargs['starting_date__gte'].strftime('%Y-%m-%dT%H:%M:%S')
    if 'starting_date__lte' in export_kwargs:
        export_kwargs['starting_date__lte'] = export_kwargs['starting_date__lte'].strftime('%Y-%m-%dT%H:%M:%S')

    request.session['session_export_kwargs'] = export_kwargs

    form = CdrSearchForm(
        initial={
            'from_date': from_date,
            'to_date': to_date,
            'destination': destination,
            'destination_type': destination_type,
            'accountcode': accountcode,
            'caller_id_number': caller_id_number,
            'caller_id_number_type': caller_id_number_type,
            'duration': duration,
            'duration_type': duration_type,
            'result': result,
            'direction': direction,
            'hangup_cause_id': hangup_cause_id,
            'switch_id': switch_id,
            'country_id': country_id,
            'records_per_page': records_per_page
        }
    )

    template_data = {
        'page_cdr_list': page_cdr_list,
        'cdrs': cdrs,
        'form': form,
        'cdr_count': cdr_count,
        'cdr_daily_data': {},
        'col_name_with_order': page_vars['col_name_with_order'],
        'menu': menu,
        'start_date': start_date,
        'end_date': end_date,
        'action': action,
        'result': result,
        'CDR_COLUMN_NAME': CDR_COLUMN_NAME,
        'records_per_page': records_per_page,
        'up_icon': '<i class="glyphicon glyphicon-chevron-up"></i>',
        'down_icon': '<i class="glyphicon glyphicon-chevron-down"></i>'
    }
    logging.debug('CDR View End')
    return render_to_response('cdr/list.html', template_data, context_instance=RequestContext(request))

Example 83

Project: cdr-stats
Source File: views.py
View license
@permission_required('user_profile.daily_comparison', login_url='/')
@check_user_detail('accountcode')
@login_required
def cdr_daily_comparison(request):
    """
    Hourly CDR graph that compare with previous dates

    **Attributes**:

        * ``template`` - cdr/daily_comparison.html
        * ``form`` - CompareCallSearchForm

    **Logic Description**:

        get the call records aggregated from the CDR table
        using the materialized view and compare with other date records


    # hourly_charttype = "lineWithFocusChart"
    # daily_charttype = "lineWithFocusChart"
    # hourly_chartdata = {'x': []}
    # daily_chartdata = {'x': []}
    # metric = 'nbcalls'  # Default metric

    """
    # Default
    metric = 'nbcalls'
    switch_id = 0
    hourly_charttype = "multiBarChart"
    hourly_chartdata = {'x': []}
    compare_days = 2
    compare_type = COMPARE_WITH.previous_days
    today_date = datetime.today()
    form = CompareCallSearchForm(request.POST or None,
                                 initial={'from_date': today_date.strftime('%Y-%m-%d'),
                                          'compare_days': compare_days,
                                          'compare_type': compare_type,
                                          'switch_id': 0})

    today_date = datetime(today_date.year, today_date.month, today_date.day)
    current_date = today_date

    if form.is_valid():
        from_date = getvar(request, 'from_date')
        current_date = ceil_strdate(str(from_date), 'start')
        # current_date = trunc_date_start(from_date)
        switch_id = getvar(request, 'switch_id')
        compare_days = int(getvar(request, 'compare_days'))
        metric = getvar(request, 'metric')

    kwargs = {}

    if switch_id and switch_id != '0':
        kwargs['switch_id'] = int(switch_id)

    xdata = [i for i in range(0, 24)]
    hourly_chartdata = {'x': xdata}

    y_count = 1
    for nday in range(1, compare_days + 1):
        start_date = current_date + relativedelta(days=-int(nday-1))
        start_date = datetime(start_date.year, start_date.month, start_date.day, 0, 0, 0, 0)
        end_date = current_date + relativedelta(days=-int(nday-1))
        end_date = datetime(end_date.year, end_date.month, end_date.day, 23, 59, 59, 999999)
        # Get hourly Data
        hourly_data = get_report_compare_cdr(request.user, 'hour', start_date, end_date, switch_id)

        extra_serie = {
            "tooltip": {"y_start": "", "y_end": " " + metric}
        }
        # We only need to set x axis once, so let's do it for nbcalls
        # hourly_chartdata['x'] = hourly_data["nbcalls"]["x_timestamp"]
        for switch in hourly_data[metric]["columns"]:
            serie = get_switch_ip_addr(switch) + "_day_" + str(nday)
            hourly_chartdata['name' + str(y_count)] = serie
            hourly_chartdata['y' + str(y_count)] = hourly_data[metric]["values"][str(switch)]
            hourly_chartdata['extra' + str(y_count)] = extra_serie
            y_count += 1

    variables = {
        'form': form,
        'from_date': current_date,
        'metric': metric,
        'compare_days': compare_days,
        'hourly_charttype': hourly_charttype,
        'hourly_chartdata': hourly_chartdata,
        'hourly_chartcontainer': 'hourly_chartcontainer',
        'hourly_extra': {
            'x_is_date': False,
            'x_axis_format': '',
            'tag_script_js': True,
            'jquery_on_ready': True,
        },
    }
    return render_to_response('cdr/daily_comparison.html', variables, context_instance=RequestContext(request))

Example 84

Project: cdr-stats
Source File: views.py
View license
@permission_required('user_profile.overview', login_url='/')
@check_user_detail('accountcode')
@login_required
def cdr_overview(request):
    """CDR graph by hourly/daily/monthly basis

    **Attributes**:

        * ``template`` - cdr/overview.html
        * ``form`` - CdrOverviewForm

    **Logic Description**:

        Get Call records from Postgresql table and build
        all monthly, daily, hourly analytics
    """
    # initialize variables
    hourly_charttype = "lineWithFocusChart"
    daily_charttype = "lineWithFocusChart"
    hourly_chartdata = {'x': []}
    daily_chartdata = {'x': []}
    metric = 'nbcalls'  # Default metric

    action = 'tabs-1'
    tday = datetime.today()
    switch_id = 0
    # assign initial value in form fields
    form = CdrOverviewForm(request.POST or None,
                           initial={'from_date': tday.strftime('%Y-%m-%d 00:00'),
                                    'to_date': tday.strftime('%Y-%m-%d 23:55'),
                                    'switch_id': switch_id})
    start_date = trunc_date_start(tday)
    end_date = trunc_date_end(tday)
    if form.is_valid():
        from_date = getvar(request, 'from_date')
        to_date = getvar(request, 'to_date')
        start_date = trunc_date_start(from_date)
        end_date = trunc_date_end(to_date)
        switch_id = getvar(request, 'switch_id')
        metric = getvar(request, 'metric')

    # get the number of hour that diff the date
    delta = end_date - start_date
    hour_diff = abs(divmod(delta.days * 86400 + delta.seconds, 60)[0]) / 60
    if hour_diff <= 72:
        display_chart = 'hourly'
    else:
        display_chart = 'daily'

    # check metric is valid
    if metric not in ['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost']:
        metric = 'nbcalls'

    extra_serie = {
        "tooltip": {"y_start": "", "y_end": " " + metric},
        "date_format": "%d %b %y %H:%M%p"
    }

    if display_chart == 'hourly':
        hourly_data = get_report_cdr_per_switch(request.user, 'hour', start_date, end_date, switch_id)

        for switch in hourly_data[metric]["columns"]:
            hourly_chartdata['x'] = hourly_data[metric]["x_timestamp"]
            hourly_chartdata['name' + str(switch)] = get_switch_ip_addr(switch)
            hourly_chartdata['y' + str(switch)] = hourly_data[metric]["values"][str(switch)]
            hourly_chartdata['extra' + str(switch)] = extra_serie

        total_calls = hourly_data["nbcalls"]["total"]
        total_duration = hourly_data["duration"]["total"]
        total_billsec = hourly_data["billsec"]["total"]
        total_buy_cost = hourly_data["buy_cost"]["total"]
        total_sell_cost = hourly_data["sell_cost"]["total"]

    elif display_chart == 'daily':
        daily_data = get_report_cdr_per_switch(request.user, 'day', start_date, end_date, switch_id)

        for switch in daily_data[metric]["columns"]:
            daily_chartdata['x'] = daily_data[metric]["x_timestamp"]
            daily_chartdata['name' + str(switch)] = get_switch_ip_addr(switch)
            daily_chartdata['y' + str(switch)] = daily_data[metric]["values"][str(switch)]
            daily_chartdata['extra' + str(switch)] = extra_serie

        total_calls = daily_data["nbcalls"]["total"]
        total_duration = daily_data["duration"]["total"]
        total_billsec = daily_data["billsec"]["total"]
        total_buy_cost = daily_data["buy_cost"]["total"]
        total_sell_cost = daily_data["sell_cost"]["total"]

    # Calculate the Average Time of Call
    metric_aggr = calculate_act_acd(total_calls, total_duration)

    # Get top 10 of country calls
    country_data = custom_sql_aggr_top_country(request.user, switch_id, 10, start_date, end_date)

    variables = {
        'action': action,
        'form': form,
        'display_chart': display_chart,
        'start_date': start_date,
        'end_date': end_date,
        'metric': metric,
        'hourly_chartdata': hourly_chartdata,
        'hourly_charttype': hourly_charttype,
        'hourly_chartcontainer': 'hourly_container',
        'hourly_extra': {
            'x_is_date': True,
            'x_axis_format': '%d %b %y %H%p',
            'tag_script_js': True,
            'jquery_on_ready': True,
        },
        'daily_chartdata': daily_chartdata,
        'daily_charttype': daily_charttype,
        'daily_chartcontainer': 'daily_container',
        'daily_extra': {
            'x_is_date': True,
            'x_axis_format': '%d %b %Y',
            'tag_script_js': True,
            'jquery_on_ready': True,
        },
        'total_calls': total_calls,
        'total_duration': total_duration,
        'total_billsec': total_billsec,
        'total_buy_cost': total_buy_cost,
        'total_sell_cost': total_sell_cost,
        'metric_aggr': metric_aggr,
        'country_data': country_data,
    }
    return render_to_response('cdr/overview.html', variables, context_instance=RequestContext(request))

Example 85

Project: cdr-stats
Source File: views.py
View license
@permission_required('user_profile.by_country', login_url='/')
@check_user_detail('accountcode')
@login_required
def cdr_country_report(request):
    """CDR country report

    **Attributes**:

        * ``template`` - cdr/country_report.html
        * ``form`` - CountryReportForm

    **Logic Description**:

        Retrieve call records from Postgresql for all countries
        and create reporting information for those countries
    """
    metric = 'nbcalls'
    tday = datetime.today()

    switch_id = 0
    hourly_charttype = "lineWithFocusChart"
    hourly_chartdata = {'x': []}
    country_id_list = []
    total_metric = 0

    # assign initial value in form fields
    form = CountryReportForm(request.POST or None,
                             initial={'from_date': tday.strftime('%Y-%m-%d 00:00'),
                                    'to_date': tday.strftime('%Y-%m-%d 23:55'),
                                    'switch_id': switch_id})

    start_date = trunc_date_start(tday)
    end_date = trunc_date_end(tday)

    if form.is_valid():
        from_date = getvar(request, 'from_date')
        to_date = getvar(request, 'to_date')
        start_date = trunc_date_start(from_date)
        end_date = trunc_date_end(to_date)
        switch_id = getvar(request, 'switch_id')
        metric = getvar(request, 'metric')
        country_id = form.cleaned_data['country_id']
        # convert list value in int
        country_id_list = [int(row) for row in country_id]
        # handle 0 (All) selection
        if 0 in country_id_list:
            country_id_list = []

    # check metric is valid
    if metric not in ['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost']:
        metric = 'nbcalls'

    hourly_data = get_report_cdr_per_country(request.user, 'hour', start_date, end_date, switch_id, country_id_list)

    extra_serie = {
        "tooltip": {"y_start": "", "y_end": " " + metric},
        "date_format": "%d %b %y %H:%M%p"
    }
    for country in hourly_data[metric]["columns"]:
        hourly_chartdata['x'] = hourly_data[metric]["x_timestamp"]
        country_name = get_country_name(int(country)).encode('utf-8')
        hourly_chartdata['name' + str(country)] = country_name.decode('ascii', 'ignore').replace("'", " ")
        hourly_chartdata['y' + str(country)] = hourly_data[metric]["values"][str(country)]
        hourly_chartdata['extra' + str(country)] = extra_serie

    total_calls = hourly_data["nbcalls"]["total"]
    total_duration = hourly_data["duration"]["total"]
    total_billsec = hourly_data["billsec"]["total"]
    total_buy_cost = hourly_data["buy_cost"]["total"]
    total_sell_cost = hourly_data["sell_cost"]["total"]

    # Calculate the Average Time of Call
    metric_aggr = calculate_act_acd(total_calls, total_duration)

    # Get top 10 of country calls
    top_country = 10
    country_data = custom_sql_aggr_top_country(request.user, switch_id, top_country, start_date, end_date)

    # Build pie chart data for last 24h calls per country
    (xdata, ydata) = ([], [])
    for country in country_data:
        xdata.append(get_country_name(country["country_id"]))
        ydata.append(percentage(country["nbcalls"], total_calls))

    color_list = ['#FFC36C', '#FFFF9D', '#BEEB9F', '#79BD8F', '#FFB391',
        '#58A6A6', '#86BF30', '#F2D022', '#D9AA1E', '#D98236']

    extra_serie = {"tooltip": {"y_start": "", "y_end": " %"}, "color_list": color_list}
    country_analytic_chartdata = {'x': xdata, 'y1': ydata, 'extra1': extra_serie}
    country_analytic_charttype = "pieChart"

    country_extra = {
        'x_is_date': False,
        'x_axis_format': '',
        'tag_script_js': True,
        'jquery_on_ready': True,
    }

    data = {
        'action': 'tabs-1',
        'total_metric': total_metric,
        'start_date': start_date,
        'end_date': end_date,
        'metric': metric,
        'form': form,
        'NUM_COUNTRY': settings.NUM_COUNTRY,
        'hourly_charttype': hourly_charttype,
        'hourly_chartdata': hourly_chartdata,
        'hourly_chartcontainer': 'hourly_container',
        'hourly_extra': {
            'x_is_date': True,
            'x_axis_format': '%d %b %Y',
            'tag_script_js': True,
            'jquery_on_ready': False,
        },
        'total_calls': total_calls,
        'total_duration': total_duration,
        'total_billsec': total_billsec,
        'total_buy_cost': total_buy_cost,
        'total_sell_cost': total_sell_cost,
        'metric_aggr': metric_aggr,
        'country_data': country_data,

        'country_analytic_charttype': country_analytic_charttype,
        'country_analytic_chartdata': country_analytic_chartdata,
        'country_chartcontainer': 'country_piechart_container',
        'country_extra': country_extra,
        'top_country': top_country,
    }
    return render_to_response('cdr/country_report.html', data, context_instance=RequestContext(request))

Example 86

Project: cdr-stats
Source File: tasks.py
View license
    @only_one(ikey="get_channels_info", timeout=LOCK_EXPIRE)
    def run(self, **kwargs):

        logger = self.get_logger()
        logger.info('TASK :: get_channels_info')
        totalcall = 0

        # Get calldate
        now = datetime.today()
        date_now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0)
        # key_date / minute precision
        key_date = "%d-%d-%d-%d-%d" % (now.year, now.month, now.day, now.hour, now.minute)

        # Retrieve SwitchID
        try:
            switch = Switch.objects.get(ipaddress=settings.LOCAL_SWITCH_IP)
            switch_id = switch.id
        except:
            logger.error("Cannot retrieve Switch %s" % settings.LOCAL_SWITCH_IP)
            return False

        if settings.CDR_BACKEND[settings.LOCAL_SWITCH_IP]['cdr_type'] == 'freeswitch':
            con = False
            if settings.CDR_BACKEND[settings.LOCAL_SWITCH_IP]['internal_db_engine'] == 'pgsql':
                user = settings.CDR_BACKEND[settings.LOCAL_SWITCH_IP]['internal_db_user']
                password = settings.CDR_BACKEND[settings.LOCAL_SWITCH_IP]['internal_db_password']
                db_name = settings.CDR_BACKEND[settings.LOCAL_SWITCH_IP]['internal_db_name']
                host = settings.CDR_BACKEND[settings.LOCAL_SWITCH_IP]['internal_db_host']
                port = settings.CDR_BACKEND[settings.LOCAL_SWITCH_IP]['internal_db_port']
                try:
                    connection = PgDatabase.connect(user=user, password=password,
                                                    database=db_name, host=host, port=port)
                    connection.autocommit = True
                    cur = connection.cursor()
                    cur.execute('SELECT accountcode,COUNT(*) FROM channels GROUP BY accountcode;')
                    rows = cur.fetchall()
                    for row in rows:
                        if not row[0]:
                            accountcode = ''
                        else:
                            accountcode = row[0]
                        numbercall = row[1]
                        totalcall = totalcall + numbercall
                        logger.debug('%s (accountcode:%s, switch_id:%d) ==> %s'
                                     % (date_now, accountcode, switch_id,
                                        str(numbercall)))

                        call_json = {
                            'switch_id': switch_id,
                            'call_date': date_now,
                            'numbercall': numbercall,
                            'accountcode': accountcode,
                        }

                        mongodb.conc_call.insert(call_json)

                        # Save to cache
                        key = "%s-%d-%s" % (key_date, switch_id, str(accountcode))
                        cache.set(key, numbercall, 1800)  # 30 minutes
                        # Create collection for Analytics
                        set_concurrentcall_analytic(date_now, switch_id, accountcode, numbercall)
                except PgDatabase.Error, e:
                    logger.error('Error %s:' % e.args[0])
                finally:
                    if con:
                        con.close()
            else:
                try:
                    con = sqlite3.connect('/usr/local/freeswitch/db/core.db')
                    cur = con.cursor()
                    cur.execute('SELECT accountcode, count(*) FROM channels')
                    rows = cur.fetchall()
                    for row in rows:
                        if not row[0]:
                            accountcode = ''
                        else:
                            accountcode = row[0]
                        numbercall = row[1]
                        totalcall = totalcall + numbercall
                        logger.debug('%s (accountcode:%s, switch_id:%d) ==> %s' %
                                     (date_now, accountcode, switch_id, str(numbercall)))

                        call_json = {
                            'switch_id': switch_id,
                            'call_date': date_now,
                            'numbercall': numbercall,
                            'accountcode': accountcode,
                        }

                        mongodb.conc_call.insert(call_json)

                        # Save to cache
                        key = "%s-%d-%s" % (key_date, switch_id, str(accountcode))
                        cache.set(key, numbercall, 1800)  # 30 minutes
                        # Create collection for Analytics
                        set_concurrentcall_analytic(date_now, switch_id, accountcode, numbercall)

                except sqlite3.Error, e:
                    logger.error('Error %s:' % e.args[0])
                finally:
                    if con:
                        con.close()
        elif settings.CDR_BACKEND[settings.LOCAL_SWITCH_IP]['cdr_type'] == 'asterisk':
            manager = asterisk.manager.Manager()
            listaccount = {}
            try:
                # connect to the manager
                try:
                    manager.connect(settings.ASTERISK_MANAGER_HOST)
                    manager.login(settings.ASTERISK_MANAGER_USER, settings.ASTERISK_MANAGER_SECRET)

                    # get list of channels
                    response = manager.command('core show channels concise')
                    # response.data = "SIP/areski-00000006!a2billing-echotest!34902800102*!2!Ring!Echo!!34650784355!4267877355!!3!35!(None)!1352663344.6\n"
                    # response.data += "SIP/areski-00000006!a2billing-echotest!34902800102*!2!Ring!Echo!!34650784355!!!3!35!(None)!1352663344.6\n"
                    # response.data += "SIP/areski-00000006!a2billing-echotest!34902800102*!2!Ring!Echo!!34650784355!!!3!35!(None)!1352663344.6\n"
                    # response.data += "SIP/areski-00000006!a2billing-echotest!34902800102*!2!Ring!Echo!!34650784355!12346!!3!35!(None)!1352663344.6\n"
                    # response.data += "SIP/areski-00000006!a2billing-echotest!34902800102*!2!Ring!Echo!!34650784355!!!3!35!(None)!1352663344.6\n"

                    if response.data:
                        lines = response.data.split('\n')
                        for line in lines:
                            col = line.split('!')
                            if col and len(col) >= 8:
                                if col[8] in listaccount:
                                    listaccount[col[8]] = listaccount[col[8]] + 1
                                else:
                                    listaccount[col[8]] = 1
                    # manager.logoff()
                except asterisk.manager.ManagerSocketException, (errno, reason):
                    logger.error("Error connecting to the manager: %s" % reason)
                    return False
                except asterisk.manager.ManagerAuthException, reason:
                    logger.error("Error logging in to the manager: %s" % reason)
                    return False
                except asterisk.manager.ManagerException, reason:
                    logger.error("Error: %s" % reason)
                    return False
            finally:
                try:
                    manager.close()
                except:
                    logger.error("Manager didn't close")

            for accountcode in listaccount:
                numbercall = listaccount[accountcode]
                totalcall = totalcall + numbercall
                logger.debug('%s (accountcode:%s, switch_id:%d) ==> %s'
                             % (date_now, accountcode, switch_id,
                                str(numbercall)))
                call_json = {
                    'switch_id': switch_id,
                    'call_date': date_now,
                    'numbercall': numbercall,
                    'accountcode': accountcode,
                }
                mongodb.conc_call.insert(call_json)
                # Save to cache
                key = "%s-%d-%s" % (key_date, switch_id, str(accountcode))
                cache.set(key, numbercall, 1800)  # 30 minutes
                # Create collection for Analytics
                set_concurrentcall_analytic(date_now, switch_id, accountcode, numbercall)

        # For any switches

        # There is no calls
        if totalcall == 0:
            accountcode = ''
            numbercall = 0
            call_json = {
                'switch_id': switch_id,
                'call_date': date_now,
                'numbercall': numbercall,
                'accountcode': accountcode,
            }
            mongodb.conc_call.insert(call_json)
            key = "%s-%d-%s" % (key_date, switch_id, str(accountcode))
            cache.set(key, numbercall, 1800)  # 30 minutes
            set_concurrentcall_analytic(date_now, switch_id, accountcode, numbercall)

        key = "%s-%d-root" % (key_date, switch_id)
        logger.info("key:%s, totalcall:%d" % (key, totalcall))
        cache.set(key, totalcall, 1800)  # 30 minutes

        return True

Example 87

Project: cdr-stats
Source File: views.py
View license
@permission_required('user_profile.billing_report', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def billing_report(request):
    """CDR billing graph by daily basis

    **Attributes**:

        * ``template`` - voip_billing/billing_report.html
        * ``form`` - BillingReportForm

    **Logic Description**:

        Retrieve call records from PostgreSQL and build the
        daily billing analytics for given date range
    """
    switch_id = 0
    tday = datetime.today()
    total_data = []
    charttype = "lineWithFocusChart"
    hourly_chartdata = {"x": []}

    form = BillingReportForm(request.POST or None,
                             initial={'from_date': tday.strftime('%Y-%m-%d 00:00'),
                                    'to_date': tday.strftime('%Y-%m-%d 23:55'),
                                    'switch_id': switch_id})
    start_date = trunc_date_start(tday)
    end_date = trunc_date_end(tday)

    if form.is_valid():
        from_date = getvar(request, 'from_date')
        to_date = getvar(request, 'to_date')
        start_date = trunc_date_start(from_date)
        end_date = trunc_date_end(to_date)
        switch_id = getvar(request, 'switch_id')

    metrics = ['buy_cost', 'sell_cost']

    hourly_data = get_report_cdr_per_switch(request.user, 'hour', start_date, end_date, switch_id)

    hourly_chartdata['x'] = hourly_data["nbcalls"]["x_timestamp"]

    i = 0
    for metric in metrics:
        extra_serie = {
            "tooltip": {"y_start": "", "y_end": " " + metric},
            "date_format": "%d %b %y %H:%M%p"
        }
        for switch in hourly_data[metric]["columns"]:
            i = i + 1
            hourly_chartdata['name' + str(i)] = get_switch_ip_addr(switch) + "_" + metric
            hourly_chartdata['y' + str(i)] = hourly_data[metric]["values"][str(switch)]
            hourly_chartdata['extra' + str(i)] = extra_serie

    total_calls = hourly_data["nbcalls"]["total"]
    total_duration = hourly_data["duration"]["total"]
    total_billsec = hourly_data["billsec"]["total"]
    total_buy_cost = hourly_data["buy_cost"]["total"]
    total_sell_cost = hourly_data["sell_cost"]["total"]

    # Calculate the Average Time of Call
    metric_aggr = calculate_act_acd(total_calls, total_duration)

    # Get top 10 of country calls
    country_data = custom_sql_aggr_top_country(request.user, switch_id, 10, start_date, end_date)

    data = {
        'form': form,
        'total_data': total_data,
        'start_date': start_date,
        'end_date': end_date,
        'charttype': charttype,
        'chartdata': hourly_chartdata,
        'chartcontainer': 'chart_container',
        'extra': {
            'x_is_date': True,
            'x_axis_format': '%d %b %Y',
            'tag_script_js': True,
            'jquery_on_ready': True,
        },
        'total_calls': total_calls,
        'total_duration': total_duration,
        'total_billsec': total_billsec,
        'total_buy_cost': total_buy_cost,
        'total_sell_cost': total_sell_cost,
        'metric_aggr': metric_aggr,
        'country_data': country_data,
    }
    return render_to_response('voip_billing/billing_report.html',
                              data,
                              context_instance=RequestContext(request))

Example 88

Project: rhea
Source File: edid.py
View license
    def __init__(self, resolution=(640, 480,), refresh_rate=60,
                 aspect=(16,9) ):
        """
        """
        # vendor and product identification
        self._mfg_name = 'ABC'
        self._pid = intbv(0x72)[16:]
        self._serial_number = intbv(1)[32:]
        self._mfg_date = datetime.today()
        self._mfg_week = self._mfg_date.isocalendar()[1]
        self._mfg_year = self._mfg_date.year - 1990
        # EDID structure (version.revision)
        self._version = '1.4'   
        # basic display info (section 3.6)        
        self._display_type = 'hdmi-a'
        self._color_depth = 8
        self._refresh_rate = refresh_rate
        self._resolution = resolution
        self._horizontal_size = resolution[0]
        self._vertical_size = resolution[1]
        self._aspect = aspect
        self._landscape = True if aspect[0] > aspect[1] else False
        self._aspect_ratio = aspect[0] / aspect[1]
        self._gamma = 2.2

        # example of an x,y chroma table
        # 655 (28F) --> 0.6396484375
        # 338 (152) --> 0.330078125
        # 307 (133) --> 0.2998046875
        # 614 (266) --> 0.599609375
        # 154 (09A) --> 0.150390625
        # 61  (03D) --> 0.0595703125
        # 320 (140) --> 0.3125
        # 337 (151) --> 0.3291015625
        # @todo: add binary fracction conversion and used 
        # @todo: use decimal fractions in xy_chroma table
        self._xy_chroma = (655, 338, 307, 614, 154, 61, 320, 337)

        # @todo: color characteristics (section 3.7)

        # @todo: timing ... (section 3.8 and 3.9)
        
        self._rom = [intbv(0)[8:] for _ in range(128)]

Example 89

View license
    def create_xml(self, cr, uid, ids, data, context):
        registry = openerp.registry(cr.dbname)
        obj_dept = registry['hr.department']
        obj_emp = registry['hr.employee']
        depts=[]
        emp_id={}
        rpt_obj = registry['hr.holidays']
        rml_obj=report_sxw.rml_parse(cr, uid, rpt_obj._name,context)
        cr.execute("SELECT name FROM res_company")
        res=cr.fetchone()[0]
        date_xml=[]
        date_today=time.strftime('%Y-%m-%d %H:%M:%S')
        date_xml +=['<res name="%s" today="%s" />' % (to_xml(res),date_today)]

        cr.execute("SELECT id, name, color_name FROM hr_holidays_status ORDER BY id")
        legend=cr.fetchall()
        today=datetime.datetime.today()

        first_date=data['form']['date_from']
        som = strToDate(first_date)
        eom = som+datetime.timedelta(59)
        day_diff=eom-som

        name = ''
        if len(data['form'].get('emp', ())) == 1:
            name = obj_emp.read(cr, uid, data['form']['emp'][0], ['name'])['name']

        if data['form']['holiday_type']!='both':
            type=data['form']['holiday_type']
            if data['form']['holiday_type']=='Confirmed':
                holiday_type=('confirm')
            else:
                holiday_type=('validate')
        else:
            type="Confirmed and Approved"
            holiday_type=('confirm','validate')
        date_xml.append('<from>%s</from>\n'% (str(rml_obj.formatLang(som.strftime("%Y-%m-%d"),date=True))))
        date_xml.append('<to>%s</to>\n' %(str(rml_obj.formatLang(eom.strftime("%Y-%m-%d"),date=True))))
        date_xml.append('<type>%s</type>'%(type))
        date_xml.append('<name>%s</name>'%(name))

#        date_xml=[]
        for l in range(0,len(legend)):
            date_xml += ['<legend row="%d" id="%d" name="%s" color="%s" />' % (l+1,legend[l][0],_(legend[l][1]),legend[l][2])]
        date_xml += ['<date month="%s" year="%d" />' % (ustr(som.strftime('%B')), som.year),'<days>']

        cell=1
        if day_diff.days>=30:
            date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
        else:
            if day_diff.days>=(lengthmonth(som.year, som.month)-som.day):
                date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
            else:
                date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, eom.day+1)]

        cell=x-som.day+1
        day_diff1=day_diff.days-cell+1

        width_dict={}
        month_dict={}

        i=1
        j=1
        year=som.year
        month=som.month
        month_dict[j]=som.strftime('%B')
        width_dict[j]=cell

        while day_diff1>0:
            if month+i<=12:
                if day_diff1 > lengthmonth(year,i+month): # Not on 30 else you have problems when entering 01-01-2009 for example
                    som1=datetime.date(year,month+i,1)
                    date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(year,i+month)+1)]
                    i=i+1
                    j=j+1
                    month_dict[j]=som1.strftime('%B')
                    cell=cell+x
                    width_dict[j]=x

                else:
                    som1=datetime.date(year,month+i,1)
                    date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
                    i=i+1
                    j=j+1
                    month_dict[j]=som1.strftime('%B')
                    cell=cell+x
                    width_dict[j]=x

                day_diff1=day_diff1-x
            else:
                years=year+1
                year=years
                month=0
                i=1
                if day_diff1>=30:
                    som1=datetime.date(years,i,1)
                    date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(years,i)+1)]
                    i=i+1
                    j=j+1
                    month_dict[j]=som1.strftime('%B')
                    cell=cell+x
                    width_dict[j]=x

                else:
                    som1=datetime.date(years,i,1)
                    i=i+1
                    j=j+1
                    month_dict[j]=som1.strftime('%B')
                    date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
                    cell=cell+x
                    width_dict[j]=x

                day_diff1=day_diff1-x

        date_xml.append('</days>')
        date_xml.append('<cols>3.5cm%s,0.4cm</cols>\n' % (',0.4cm' * (60)))
        date_xml = ''.join(date_xml)

        st='<cols_months>3.5cm'
        for m in range(1,len(width_dict)+1):
            st+=',' + str(0.4 *width_dict[m])+'cm'
        st+=',0.4cm</cols_months>\n'

        months_xml =['<months  number="%d" name="%s"/>' % (x, _(month_dict[x])) for x in range(1,len(month_dict)+1) ]
        months_xml.append(st)
        
        emp_xml=''
        row_id=1
        
        if data['model'] == 'hr.employee':
            for items in obj_emp.read(cr, uid, data['form']['emp'], ['id', 'name']):
                emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, items['id'], items['name'], som, eom)
                row_id = row_id +1

        elif data['model']=='ir.ui.menu':
            for dept in obj_dept.browse(cr, uid, data['form']['depts'], context=context):
                emp_ids = obj_emp.search(cr, uid, [('department_id', '=', dept.id)], context=context)
                if emp_ids==[]:
                    continue
                dept_done=0
                for item in obj_emp.read(cr, uid, emp_ids, ['id', 'name']):
                    if dept_done==0:
                        emp_xml += emp_create_xml(self, cr, uid, 1, holiday_type, row_id, dept.id, dept.name, som, eom)
                        row_id = row_id +1
                    dept_done=1
                    emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, item['id'], item['name'], som, eom)
                    row_id = row_id +1
                    
        header_xml = '''
        <header>
        <date>%s</date>
        <company>%s</company>
        </header>
        ''' % (str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M")),to_xml(registry['res.users'].browse(cr,uid,uid).company_id.name))

        # Computing the xml
        xml='''<?xml version="1.0" encoding="UTF-8" ?>
        <report>
        %s
        %s
        %s
        %s
        </report>
        ''' % (header_xml,months_xml,date_xml, ustr(emp_xml))

        return xml

Example 90

Project: cgstudiomap
Source File: test_holidays_flow.py
View license
    @mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
    def test_00_leave_request_flow(self):
        """ Testing leave request flow """
        cr, uid = self.cr, self.uid

        def _check_holidays_status(holiday_status, ml, lt, rl, vrl):
            self.assertEqual(holiday_status.max_leaves, ml,
                             'hr_holidays: wrong type days computation')
            self.assertEqual(holiday_status.leaves_taken, lt,
                             'hr_holidays: wrong type days computation')
            self.assertEqual(holiday_status.remaining_leaves, rl,
                             'hr_holidays: wrong type days computation')
            self.assertEqual(holiday_status.virtual_remaining_leaves, vrl,
                             'hr_holidays: wrong type days computation')

        # HrUser creates some holiday statuses -> crash because only HrManagers should do this
        with self.assertRaises(AccessError):
            self.holidays_status_dummy = self.hr_holidays_status.create(cr, self.user_hruser_id, {
                'name': 'UserCheats',
                'limit': True,
            })

        # HrManager creates some holiday statuses
        self.holidays_status_0 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
            'name': 'WithMeetingType',
            'limit': True,
            'categ_id': self.registry('calendar.event.type').create(cr, self.user_hrmanager_id, {'name': 'NotLimitedMeetingType'}),
        })
        self.holidays_status_1 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
            'name': 'NotLimited',
            'limit': True,
        })
        self.holidays_status_2 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
            'name': 'Limited',
            'limit': False,
            'double_validation': True,
        })

        # --------------------------------------------------
        # Case1: unlimited type of leave request
        # --------------------------------------------------

        # Employee creates a leave request for another employee -> should crash
        with self.assertRaises(except_orm):
            self.hr_holidays.create(cr, self.user_employee_id, {
                'name': 'Hol10',
                'employee_id': self.employee_hruser_id,
                'holiday_status_id': self.holidays_status_1,
                'date_from': (datetime.today() - relativedelta(days=1)),
                'date_to': datetime.today(),
                'number_of_days_temp': 1,
            })
        ids = self.hr_holidays.search(cr, uid, [('name', '=', 'Hol10')])
        self.hr_holidays.unlink(cr, uid, ids)

        # Employee creates a leave request in a no-limit category
        hol1_id = self.hr_holidays.create(cr, self.user_employee_id, {
            'name': 'Hol11',
            'employee_id': self.employee_emp_id,
            'holiday_status_id': self.holidays_status_1,
            'date_from': (datetime.today() - relativedelta(days=1)),
            'date_to': datetime.today(),
            'number_of_days_temp': 1,
        })
        hol1 = self.hr_holidays.browse(cr, self.user_hruser_id, hol1_id)
        self.assertEqual(hol1.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')

        # Employee validates its leave request -> should not work
        self.hr_holidays.signal_workflow(cr, self.user_employee_id, [hol1_id], 'validate')
        hol1.refresh()
        self.assertEqual(hol1.state, 'confirm', 'hr_holidays: employee should not be able to validate its own leave request')

        # HrUser validates the employee leave request
        self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol1_id], 'validate')
        hol1.refresh()
        self.assertEqual(hol1.state, 'validate', 'hr_holidays: validates leave request should be in validate state')

        # --------------------------------------------------
        # Case2: limited type of leave request
        # --------------------------------------------------

        # Employee creates a new leave request at the same time -> crash, avoid interlapping
        with self.assertRaises(except_orm):
            self.hr_holidays.create(cr, self.user_employee_id, {
                'name': 'Hol21',
                'employee_id': self.employee_emp_id,
                'holiday_status_id': self.holidays_status_1,
                'date_from': (datetime.today() - relativedelta(days=1)).strftime('%Y-%m-%d %H:%M'),
                'date_to': datetime.today(),
                'number_of_days_temp': 1,
            })

        # Employee creates a leave request in a limited category -> crash, not enough days left
        with self.assertRaises(except_orm):
            self.hr_holidays.create(cr, self.user_employee_id, {
                'name': 'Hol22',
                'employee_id': self.employee_emp_id,
                'holiday_status_id': self.holidays_status_2,
                'date_from': (datetime.today() + relativedelta(days=0)).strftime('%Y-%m-%d %H:%M'),
                'date_to': (datetime.today() + relativedelta(days=1)),
                'number_of_days_temp': 1,
            })

        # Clean transaction
        self.hr_holidays.unlink(cr, uid, self.hr_holidays.search(cr, uid, [('name', 'in', ['Hol21', 'Hol22'])]))

        # HrUser allocates some leaves to the employee
        aloc1_id = self.hr_holidays.create(cr, self.user_hruser_id, {
            'name': 'Days for limited category',
            'employee_id': self.employee_emp_id,
            'holiday_status_id': self.holidays_status_2,
            'type': 'add',
            'number_of_days_temp': 2,
        })
        # HrUser validates the allocation request
        self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [aloc1_id], 'validate')
        self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [aloc1_id], 'second_validate')
        # Checks Employee has effectively some days left
        hol_status_2 = self.hr_holidays_status.browse(cr, self.user_employee_id, self.holidays_status_2)
        _check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)

        # Employee creates a leave request in the limited category, now that he has some days left
        hol2_id = self.hr_holidays.create(cr, self.user_employee_id, {
            'name': 'Hol22',
            'employee_id': self.employee_emp_id,
            'holiday_status_id': self.holidays_status_2,
            'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'),
            'date_to': (datetime.today() + relativedelta(days=3)),
            'number_of_days_temp': 1,
        })
        hol2 = self.hr_holidays.browse(cr, self.user_hruser_id, hol2_id)
        # Check left days: - 1 virtual remaining day
        hol_status_2.refresh()
        _check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 1.0)

        # HrUser validates the first step
        self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'validate')
        hol2.refresh()
        self.assertEqual(hol2.state, 'validate1',
                         'hr_holidays: first validation should lead to validate1 state')

        # HrUser validates the second step
        self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'second_validate')
        hol2.refresh()
        self.assertEqual(hol2.state, 'validate',
                         'hr_holidays: second validation should lead to validate state')
        # Check left days: - 1 day taken
        hol_status_2.refresh()
        _check_holidays_status(hol_status_2, 2.0, 1.0, 1.0, 1.0)

        # HrManager finds an error: he refuses the leave request
        self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'refuse')
        hol2.refresh()
        self.assertEqual(hol2.state, 'refuse',
                         'hr_holidays: refuse should lead to refuse state')
        # Check left days: 2 days left again
        hol_status_2.refresh()
        _check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)

        # Annoyed, HrUser tries to fix its error and tries to reset the leave request -> does not work, only HrManager
        self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'reset')
        self.assertEqual(hol2.state, 'refuse',
                         'hr_holidays: hr_user should not be able to reset a refused leave request')

        # HrManager resets the request
        self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'reset')
        hol2.refresh()
        self.assertEqual(hol2.state, 'draft',
                         'hr_holidays: resetting should lead to draft state')

        # HrManager changes the date and put too much days -> crash when confirming
        self.hr_holidays.write(cr, self.user_hrmanager_id, [hol2_id], {
            'date_from': (datetime.today() + relativedelta(days=4)).strftime('%Y-%m-%d %H:%M'),
            'date_to': (datetime.today() + relativedelta(days=7)),
            'number_of_days_temp': 4,
        })
        with self.assertRaises(except_orm):
            self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'confirm')

Example 91

Project: cgstudiomap
Source File: main.py
View license
    @http.route(['/event', '/event/page/<int:page>'], type='http', auth="public", website=True)
    def events(self, page=1, **searches):
        cr, uid, context = request.cr, request.uid, request.context
        event_obj = request.registry['event.event']
        type_obj = request.registry['event.type']
        country_obj = request.registry['res.country']

        searches.setdefault('date', 'all')
        searches.setdefault('type', 'all')
        searches.setdefault('country', 'all')

        domain_search = {}

        def sdn(date):
            return date.strftime('%Y-%m-%d 23:59:59')
        def sd(date):
            return date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
        today = datetime.today()
        dates = [
            ['all', _('Next Events'), [("date_end", ">", sd(today))], 0],
            ['today', _('Today'), [
                ("date_end", ">", sd(today)),
                ("date_begin", "<", sdn(today))],
                0],
            ['week', _('This Week'), [
                ("date_end", ">=", sd(today + relativedelta(days=-today.weekday()))),
                ("date_begin", "<", sdn(today  + relativedelta(days=6-today.weekday())))],
                0],
            ['nextweek', _('Next Week'), [
                ("date_end", ">=", sd(today + relativedelta(days=7-today.weekday()))),
                ("date_begin", "<", sdn(today  + relativedelta(days=13-today.weekday())))],
                0],
            ['month', _('This month'), [
                ("date_end", ">=", sd(today.replace(day=1))),
                ("date_begin", "<", (today.replace(day=1) + relativedelta(months=1)).strftime('%Y-%m-%d 00:00:00'))],
                0],
            ['nextmonth', _('Next month'), [
                ("date_end", ">=", sd(today.replace(day=1) + relativedelta(months=1))),
                ("date_begin", "<", (today.replace(day=1)  + relativedelta(months=2)).strftime('%Y-%m-%d 00:00:00'))],
                0],
            ['old', _('Old Events'), [
                ("date_end", "<", today.strftime('%Y-%m-%d 00:00:00'))],
                0],
        ]

        # search domains
        current_date = None
        current_type = None
        current_country = None
        for date in dates:
            if searches["date"] == date[0]:
                domain_search["date"] = date[2]
                if date[0] != 'all':
                    current_date = date[1]
        if searches["type"] != 'all':
            current_type = type_obj.browse(cr, uid, int(searches['type']), context=context)
            domain_search["type"] = [("type", "=", int(searches["type"]))]

        if searches["country"] != 'all' and searches["country"] != 'online':
            current_country = country_obj.browse(cr, uid, int(searches['country']), context=context)
            domain_search["country"] = ['|', ("country_id", "=", int(searches["country"])), ("country_id", "=", False)]
        elif searches["country"] == 'online':
            domain_search["country"] = [("country_id", "=", False)]

        def dom_without(without):
            domain = [('state', "in", ['draft','confirm','done'])]
            for key, search in domain_search.items():
                if key != without:
                    domain += search
            return domain

        # count by domains without self search
        for date in dates:
            if date[0] <> 'old':
                date[3] = event_obj.search(
                    request.cr, request.uid, dom_without('date') + date[2],
                    count=True, context=request.context)

        domain = dom_without('type')
        types = event_obj.read_group(
            request.cr, request.uid, domain, ["id", "type"], groupby="type",
            orderby="type", context=request.context)
        type_count = event_obj.search(request.cr, request.uid, domain,
                                      count=True, context=request.context)
        types.insert(0, {
            'type_count': type_count,
            'type': ("all", _("All Categories"))
        })

        domain = dom_without('country')
        countries = event_obj.read_group(
            request.cr, request.uid, domain, ["id", "country_id"],
            groupby="country_id", orderby="country_id", context=request.context)
        country_id_count = event_obj.search(request.cr, request.uid, domain,
                                            count=True, context=request.context)
        countries.insert(0, {
            'country_id_count': country_id_count,
            'country_id': ("all", _("All Countries"))
        })

        step = 10  # Number of events per page
        event_count = event_obj.search(
            request.cr, request.uid, dom_without("none"), count=True,
            context=request.context)
        pager = request.website.pager(
            url="/event",
            url_args={'date': searches.get('date'), 'type': searches.get('type'), 'country': searches.get('country')},
            total=event_count,
            page=page,
            step=step,
            scope=5)

        order = 'website_published desc, date_begin'
        if searches.get('date','all') == 'old':
            order = 'website_published desc, date_begin desc'
        obj_ids = event_obj.search(
            request.cr, request.uid, dom_without("none"), limit=step,
            offset=pager['offset'], order=order, context=request.context)
        events_ids = event_obj.browse(request.cr, request.uid, obj_ids,
                                      context=request.context)

        values = {
            'current_date': current_date,
            'current_country': current_country,
            'current_type': current_type,
            'event_ids': events_ids,
            'dates': dates,
            'types': types,
            'countries': countries,
            'pager': pager,
            'searches': searches,
            'search_path': "?%s" % werkzeug.url_encode(searches),
        }

        return request.website.render("website_event.index", values)

Example 92

Project: hue
Source File: close_queries.py
View license
  def handle(self, *args, **options):
    days = int(args[0]) if len(args) >= 1 else 7
    close_all = args[1] == 'all' if len(args) >= 2 else False

    self.stdout.write('Closing (all=%s) HiveServer2 queries older than %s days...\n' % (close_all, days))

    queries = QueryHistory.objects.filter(last_state__in=[QueryHistory.STATE.expired.index, QueryHistory.STATE.failed.index, QueryHistory.STATE.available.index])

    if close_all:
      queries = QueryHistory.objects.all()

    queries = queries.filter(submission_date__lte=datetime.today() - timedelta(days=days))

    import os
    import beeswax
    from beeswax import conf
    from beeswax import hive_site
    try:
      beeswax.conf.HIVE_CONF_DIR.set_for_testing(os.environ['HIVE_CONF_DIR'])
    except:
      LOG.exception('failed to lookup HIVE_CONF_DIR in environment')
      self.stdout.write('Did you export HIVE_CONF_DIR=/etc/hive/conf?\n')
      raise

    hive_site.reset()
    hive_site.get_conf()

    closed_queries = 0
    already_closed_queries = 0

    for query in queries:
      try:
        query_history = QueryHistory.get(id=query.id)
        if query_history.server_id is not None:
          handle = query_history.get_handle()
          dbms.get(user=query_history.owner).close_operation(handle)
          closed_queries += 1
        else:
          already_closed_queries += 1

        query.last_state = QueryHistory.STATE.expired.index
        query.save()
      except Exception, e:
        if 'None' in str(e) or 'Invalid OperationHandle' in str(e):
          already_closed_queries += 1
          query.last_state = QueryHistory.STATE.expired.index
          query.save()
        else:
          self.stdout.write('Info: %s\n' % e)

    self.stdout.write('%s queries closed. %s queries already closed.\n' % (closed_queries, already_closed_queries))

Example 93

Project: turbolift
Source File: __init__.py
View license
    def _compressor(self, file_list):
        # Set the name of the archive.
        tar_name = self.job_args.get('tar_name')
        tar_name = os.path.realpath(os.path.expanduser(tar_name))
        if not os.path.isdir(os.path.dirname(tar_name)):
            raise exceptions.DirectoryFailure(
                'The path to save the archive file does not exist.'
                ' PATH: [ %s ]',
                tar_name
            )

        if not tar_name.endswith('.tgz'):
            tar_name = '%s.tgz' % tar_name

        if self.job_args.get('add_timestamp'):
            # Set date and time
            date_format = '%a%b%d.%H.%M.%S.%Y'
            today = datetime.datetime.today()
            timestamp = today.strftime(date_format)
            _tar_name = os.path.basename(tar_name)
            tar_name = os.path.join(
                os.path.dirname(tar_name), '%s-%s' % (timestamp, _tar_name)
            )

        # Begin creating the Archive.
        verify = self.job_args.get('verify')
        verify_list = self._return_deque()
        with tarfile.open(tar_name, 'w:gz') as tar:
            while file_list:
                try:
                    local_object = file_list.pop()['local_object']
                    if verify:
                        verify_list.append(local_object)
                    tar.add(local_object)
                except IndexError:
                    break

        if verify:
            with tarfile.open(tar_name, 'r') as tar:
                verified_items = self._return_deque()
                for member_info in tar.getmembers():
                    verified_items.append(member_info.name)

                if len(verified_items) != len(verify_list):
                    raise exceptions.SystemProblem(
                        'ARCHIVE NOT VERIFIED: Archive and File List do not'
                        ' Match.'
                    )

        return {
            'meta': dict(),
            'local_object': tar_name,
            'container_object': os.path.basename(tar_name)
        }

Example 94

Project: kardboard
Source File: views.py
View license
def report_cycle(group="all", months=3, year=None, month=None, day=None):
    today = datetime.datetime.today()
    if day:
        end_day = datetime.datetime(year=year, month=month, day=day)
        if end_day > today:
            end_day = today
    else:
        end_day = today

    start_day = end_day - relativedelta.relativedelta(months=months)
    start_day = make_start_date(date=start_day)
    end_day = make_end_date(date=end_day)

    records = DailyRecord.objects.filter(
        date__gte=start_day,
        date__lte=end_day,
        group=group)

    daily_moving_averages = [(r.date, r.moving_cycle_time) for r in records]
    daily_moving_lead = [(r.date, r.moving_lead_time) for r in records]
    daily_mad = [(r.date, r.moving_median_abs_dev) for r in records]

    start_date = daily_moving_averages[0][0]
    chart = {}
    chart['series'] = [
        {
            'name': 'Cycle time',
            'data': [r[1] for r in daily_moving_averages],
        },
        {
            'name': 'Unpredictability',
            'data': [r[1] for r in daily_mad],
        }
    ]
    chart['goal'] = app.config.get('CYCLE_TIME_GOAL', ())

    daily_moving_averages.reverse()  # reverse order for display
    daily_moving_lead.reverse()
    daily_mad.reverse()
    context = {
        'title': "How quick can we do it?",
        'updated_at': datetime.datetime.now(),
        'chart': chart,
        'months': months,
        'start_date': start_date,
        'daily_averages': daily_moving_averages,
        'daily_mad': daily_mad,
        'version': VERSION,
    }

    return render_template('report-cycle.html', **context)

Example 95

Project: kardboard
Source File: views.py
View license
def report_cycle_distribution(group="all", months=3, limit=None):
    from kardboard.services.reports import CycleTimeDistribution

    defects_only, cards_only = False, False
    if limit == 'cards':
        cards_only = True
    if limit == 'defects':
        defects_only = True

    today = datetime.datetime.today()
    start_day = today - relativedelta.relativedelta(months=months)
    start_day = make_start_date(date=start_day)
    end_day = make_end_date(date=today)

    context = {
        'title': "How quick can we do it?",
        'updated_at': datetime.datetime.now(),
        'version': VERSION,
    }

    query = Q(done_date__gte=start_day) & Q(done_date__lte=end_day)
    if defects_only:
        query = query & Q(_type__in=app.config.get('DEFECT_TYPES', []))
    elif cards_only:
        query = query & Q(_type__nin=app.config.get('DEFECT_TYPES', []))
    rg = ReportGroup(group, Kard.objects.filter(query))

    cards = list(rg.queryset)

    total = len(cards)
    if total == 0:
        context = {
            'error': "Zero cards were completed in the past %s months" % months,
        }
        return render_template('report-cycle-distro.html', **context)

    cdr = CycleTimeDistribution(cards=cards)

    chart = {}
    chart['categories'] = cdr.days()
    chart['series'] = []

    service_class_series = cdr.service_class_series()
    sclasses = service_class_series.keys()
    sclasses.sort()

    for sclass in sclasses:
        seri = service_class_series[sclass]
        chart['series'].append(
            dict(name=sclass, data=seri)
        )

    context = {
        'histogram_data': cdr.histogram(),
        'chart': chart,
        'title': "How quick can we do it?",
        'months': months,
        'total': total,
        'updated_at': datetime.datetime.now(),
        'version': VERSION,
    }
    if defects_only:
        context['title'] = "Defects: %s" % (context['title'])
        context['card_type'] = 'defects'
    elif cards_only:
        context['title'] = "Cards: %s" % (context['title'])
        context['card_type'] = 'cards'
    else:
        context['title'] = "All: %s" % (context['title'])
        context['card_type'] = 'cards and defects'

    return render_template('report-cycle-distro.html', **context)

Example 96

Project: Sick-Beard
Source File: properFinder.py
View license
    def _getProperList(self):

        propers = {}

        # for each provider get a list of the propers
        for curProvider in providers.sortedProviderList():

            if not curProvider.isActive():
                continue

            search_date = datetime.datetime.today() - datetime.timedelta(days=2)

            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
            try:
                curPropers = curProvider.findPropers(search_date)
            except exceptions.AuthException, e:
                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
                continue

            # if they haven't been added by a different provider than add the proper to the list
            for x in curPropers:
                name = self._genericName(x.name)

                if not name in propers:
                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
                    x.provider = curProvider
                    propers[name] = x

        # take the list of unique propers and get it sorted by
        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
        finalPropers = []

        for curProper in sortedPropers:

            # parse the file name
            try:
                myParser = NameParser(False)
                parse_result = myParser.parse(curProper.name)
            except InvalidNameException:
                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
                continue

            if not parse_result.episode_numbers:
                logger.log(u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode", logger.DEBUG)
                continue

            # populate our Proper instance
            if parse_result.air_by_date:
                curProper.season = -1
                curProper.episode = parse_result.air_date
            else:
                curProper.season = parse_result.season_number if parse_result.season_number != None else 1
                curProper.episode = parse_result.episode_numbers[0]
            curProper.quality = Quality.nameQuality(curProper.name)

            # for each show in our list
            for curShow in sickbeard.showList:

                if not parse_result.series_name:
                    continue

                genericName = self._genericName(parse_result.series_name)

                # get the scene name masks
                sceneNames = set(show_name_helpers.makeSceneShowSearchStrings(curShow))

                # for each scene name mask
                for curSceneName in sceneNames:

                    # if it matches
                    if genericName == self._genericName(curSceneName):
                        logger.log(u"Successful match! Result " + parse_result.series_name + " matched to show " + curShow.name, logger.DEBUG)

                        # set the tvdbid in the db to the show's tvdbid
                        curProper.tvdbid = curShow.tvdbid

                        # since we found it, break out
                        break

                # if we found something in the inner for loop break out of this one
                if curProper.tvdbid != -1:
                    break

            if curProper.tvdbid == -1:
                continue

            if not show_name_helpers.filterBadReleases(curProper.name):
                logger.log(u"Proper " + curProper.name + " isn't a valid scene release that we want, ignoring it", logger.DEBUG)
                continue

            show = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid)
            if not show:
                logger.log(u"Unable to find the show with tvdbid " + str(curProper.tvdbid), logger.ERROR)
                continue

            if show.rls_ignore_words and search.filter_release_name(curProper.name, show.rls_ignore_words):
                logger.log(u"Ignoring " + curProper.name + " based on ignored words filter: " + show.rls_ignore_words, logger.MESSAGE)
                continue

            if show.rls_require_words and not search.filter_release_name(curProper.name, show.rls_require_words):
                logger.log(u"Ignoring " + curProper.name + " based on required words filter: " + show.rls_require_words, logger.MESSAGE)
                continue

            # if we have an air-by-date show then get the real season/episode numbers
            if curProper.season == -1 and curProper.tvdbid:

                tvdb_lang = show.lang
                # There's gotta be a better way of doing this but we don't wanna
                # change the language value elsewhere
                ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()

                if tvdb_lang and not tvdb_lang == 'en':
                    ltvdb_api_parms['language'] = tvdb_lang

                try:
                    t = tvdb_api.Tvdb(**ltvdb_api_parms)
                    epObj = t[curProper.tvdbid].airedOn(curProper.episode)[0]
                    curProper.season = int(epObj["seasonnumber"])
                    curProper.episodes = [int(epObj["episodenumber"])]
                except tvdb_exceptions.tvdb_episodenotfound:
                    logger.log(u"Unable to find episode with date " + str(curProper.episode) + " for show " + parse_result.series_name + ", skipping", logger.WARNING)
                    continue

            # check if we actually want this proper (if it's the right quality)
            sqlResults = db.DBConnection().select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [curProper.tvdbid, curProper.season, curProper.episode])
            if not sqlResults:
                continue
            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))

            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != curProper.quality:
                continue

            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
            if curProper.tvdbid != -1 and (curProper.tvdbid, curProper.season, curProper.episode) not in map(operator.attrgetter('tvdbid', 'season', 'episode'), finalPropers):
                logger.log(u"Found a proper that we need: " + str(curProper.name))
                finalPropers.append(curProper)

        return finalPropers

Example 97

Project: flashbake
Source File: commit.py
View license
def commit(control_config, hot_files, quiet_mins):
    # change to the project directory, necessary to find the .flashbake file and
    # to correctly refer to the project files by relative paths
    os.chdir(hot_files.project_dir)

    git_obj = git.Git(hot_files.project_dir, control_config.git_path)

    # the wrapper object ensures git is on the path
    # get the git status for the project
    git_status = git_obj.status()

    _handle_fatal(hot_files, git_status)

    # in particular find the existing entries that need a commit
    pending_re = re.compile('\s*(renamed|copied|modified|new file):.*')

    now = datetime.datetime.today()
    quiet_period = datetime.timedelta(minutes=quiet_mins)

    to_commit = list()
    # first look in the files git already knows about
    logging.debug("Examining git status.")
    for line in git_status.splitlines():
        if pending_re.match(line):
            pending_file = _trimgit(line)

            # not in the dot-control file, skip it
            if not (hot_files.contains(pending_file)):
                continue

            logging.debug('Parsing status line %s to determine commit action' % line)

            # remove files that will be considered for commit
            hot_files.remove(pending_file)

            # check the quiet period against mtime
            last_mod = os.path.getmtime(pending_file)
            pending_mod = datetime.datetime.fromtimestamp(last_mod)
            pending_mod += quiet_period

            # add the file to the list to include in the commit
            if pending_mod < now:
                to_commit.append(pending_file)
                logging.debug('Flagging file, %s, for commit.' % pending_file)
            else:
                logging.debug('Change for file, %s, is too recent.' % pending_file)
        _capture_deleted(hot_files, line)

    logging.debug('Examining unknown or unchanged files.')

    hot_files.warnproblems()

    # figure out what the status of the remaining files is
    for control_file in hot_files.control_files:
        # this shouldn't happen since HotFiles.addfile uses glob.iglob to expand
        # the original file lines which does so based on what is in project_dir
        if not os.path.exists(control_file):
            logging.debug('%s does not exist yet.' % control_file)
            hot_files.putabsent(control_file)
            continue

        status_output = git_obj.status(control_file)

        # needed for git >= 1.7.0.4
        if status_output.find('Untracked files') > 0:
            hot_files.putneedsadd(control_file)
            continue
        if status_output.startswith('error'):
            # needed for git < 1.7.0.4
            if status_output.find('did not match') > 0:
                hot_files.putneedsadd(control_file)
                logging.debug('%s exists but is unknown by git.' % control_file)
            else:
                logging.error('Unknown error occurred!')
                logging.error(status_output)
            continue
        # use a regex to match so we can enforce whole word rather than
        # substring matchs, otherwise 'foo.txt~' causes a false report of an
        # error
        control_re = re.compile('\<' + re.escape(control_file) + '\>')
        if control_re.search(status_output) == None:
            logging.debug('%s has no uncommitted changes.' % control_file)
        # if anything hits this block, we need to figure out why
        else:
            logging.error('%s is in the status message but failed other tests.' % control_file)
            logging.error('Try \'git status "%s"\' for more info.' % control_file)

    hot_files.addorphans(git_obj, control_config)

    for plugin in control_config.file_plugins:
        plugin.post_process(to_commit, hot_files, control_config)

    if len(to_commit) > 0:
        logging.info('Committing changes to known files, %s.' % to_commit)
        message_file = context.buildmessagefile(control_config)
        if not control_config.dry_run:
            # consolidate the commit to be friendly to how git normally works
            commit_output = git_obj.commit(message_file, to_commit)
            logging.debug(commit_output)
        os.remove(message_file)
        _send_commit_notice(control_config, hot_files, to_commit)
        logging.info('Commit for known files complete.')
    else:
        logging.info('No changes to known files found to commit.')

    if hot_files.needs_warning():
        _send_warning(control_config, hot_files)
    else:
        logging.info('No missing or untracked files found, not sending warning notice.')

Example 98

Project: crowdata
Source File: models.py
View license
    def verify(self):
        # almost direct port from ProPublica's Transcribable.
        # Thanks @ashaw! :)

        form_entries = self.form_entries.all().distinct('user')
        form_fields = self.document_set.form.all()[0].fields.filter(verify=True)

        aggregate = defaultdict(dict)
        for field in form_fields:
            aggregate[field] = defaultdict(lambda: 0)

        for fe in form_entries:
            for field in form_fields:
                aggregate[field][fe.get_answer_for_field(field)] += 1

        # aggregate
        #      defaultdict(<type 'dict'>, {<DocumentSetFormField: Tipo de gasto>:
        #                                    defaultdict(<function <lambda> at 0x10f97dd70>,
        #                            {u'Gastos': 1, u'Pasajes a\xe9reos, terrestres y otros': 2}),
        #                                  <DocumentSetFormField: Adjudicatario>: defaultdict(<function <lambda> at 0x10f97dcf8>, {u'V\xeda Bariloche S.A.': 3}),
        #                                  <DocumentSetFormField: Importe total>: defaultdict(<function <lambda> at 0x10f97dc80>, {u'14528.8': 3})})

        choosen = {}

        for field, answers in aggregate.items():
            for answer, answer_ct in answers.items():
                if answer_ct >= self.entries_threshold():
                    choosen[field] = answer #max(answers.items(), lambda i: i[1])[0]
        # choosen
        #      { <DocumentSetFormField: Tipo de gasto>: (u'viaticos por viaje', 3),
        #        <DocumentSetFormField: Adjudicatario>: (u'Honorable Senado de la Naci\xf3n', 4),
        #        <DocumentSetFormField: Importe total>: (u'10854.48', 4)
        #      }

        if len(choosen.keys()) == len(form_fields):
            # choosen is
            #   { DocumentSetFormField -> (value, number) }

            the_choosen_one = {}
            for entry in self.form_entries.all():
              the_choosen_one[entry] = 0
              for field, verified_answer in choosen.items():
                if CanonicalFieldEntryLabel.objects.filter(value=verified_answer):
                  canon=CanonicalFieldEntryLabel.objects.filter(value=verified_answer)[0]
                  if entry.fields.filter(canonical_label_id=canon.id):
                    the_choosen_one[entry] += 1
                else:
                  if entry.fields.filter(value=verified_answer):
                    the_choosen_one[entry] += 1
              if the_choosen_one[entry] == len(form_fields):
                entry.force_verify()
                break

            self.updated_at = datetime.today()
        else:
            self.verified = False

        self.save()

Example 99

Project: cif-sdk-py
Source File: es_archiver.py
View license
def main():

    p = ArgumentParser(
        description=textwrap.dedent('''\
        example usage:
            $ cif-es-reindex
        '''),
        formatter_class=RawDescriptionHelpFormatter,
        prog='cif-es-reindex'
    )

    # options
    p.add_argument("-v", "--verbose", action="store_true", help="logging level: INFO")
    p.add_argument('-d', '--debug', action="store_true", help="logging level: DEBUG")
    p.add_argument('-V', '--version', action='version', version=VERSION)
    p.add_argument('-m', '--months', help='how many months ago to cull [default %(default)s]', default=MONTHS)
    p.add_argument('-c', '--confidence', help='min confidence [default %(default)s]', default=CONFIDENCE)
    p.add_argument('--index-prefix', help='index prefix', default='cif.observables')
    p.add_argument('--dry-run', action="store_true", help='dry run, do not delete')
    p.add_argument('--nodes', default=['localhost:9200'])

    args = p.parse_args()
    setup_logging(args)
    logger = logging.getLogger(__name__)

    end_month = (datetime.today() - relativedelta(months=int(args.months)))
    end_month = end_month.strftime('%Y.%m')

    logger.info('month: {}'.format(end_month))

    es = Elasticsearch(args.nodes, timeout=120, max_retries=10, retry_on_timeout=True)

    body = {
        'query': {
            'filtered': {
                'filter': {
                    'and': [
                        {
                            'range': {
                                'confidence': {'lt': args.confidence}
                            }
                        }
                    ]
                }
            }
        }
    }

    monthlies = es.indices.get_aliases(index='{}-*.*'.format(args.index_prefix)).keys()
    to_cull = {}
    for m in monthlies:
        match = re.search(r"^cif\.observables-(\d{4}\.\d{2})$", m)
        if match.group(1) < end_month:
            to_cull['{}-{}'.format(args.index_prefix, match.group(1))] = '{}-{}'.format(args.index_prefix,
                                                                                        match.group(1))

    # https://www.elastic.co/guide/en/elasticsearch/reference/1.4/docs-delete-by-query.html
    # http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.delete_by_query
    # http://stackoverflow.com/questions/26808239/elasticsearch-python-api-delete-documents-by-query

    for c in to_cull:
        logger.info('culling: {}'.format(c))
        if not args.dry_run:
            rv = helpers.scan(
                es,
                index=c,
                query=body,
                scroll='5m',
                size=LIMIT,
            )

            for r in rv:
                pprint(r)

Example 100

Project: cif-sdk-py
Source File: es_reindex.py
View license
def main():

    p = ArgumentParser(
        description=textwrap.dedent('''\
        example usage:
            $ cif-es-reindex
        '''),
        formatter_class=RawDescriptionHelpFormatter,
        prog='cif-es-reindex'
    )

    # options
    p.add_argument("-v", "--verbose", action="store_true", help="logging level: INFO")
    p.add_argument('-d', '--debug', action="store_true", help="logging level: DEBUG")
    p.add_argument('-V', '--version', action='version', version=VERSION)
    p.add_argument('-m', '--months', help='how many months ago to cull [default %(default)s]', default=MONTHS)
    p.add_argument('-c', '--confidence', help='min confidence [default %(default)s]', default=CONFIDENCE)
    p.add_argument('--index-prefix', help='index prefix', default='cif.observables')
    p.add_argument('--dry-run', action="store_true", help='dry run, do not delete')
    p.add_argument('--nodes', default=['localhost:9200'])

    args = p.parse_args()
    setup_logging(args)
    logger = logging.getLogger(__name__)

    end_month = (datetime.today() - relativedelta(months=int(args.months)))
    end_month = end_month.strftime('%Y.%m')

    logger.info('month: {}'.format(end_month))

    es = Elasticsearch(args.nodes, timeout=120, max_retries=10, retry_on_timeout=True)

    # get list of dailies
    dailies = es.indices.get_aliases(index='{}-*.*.*'.format(args.index_prefix)).keys()  # daily indices only

    to_cull = {}
    for d in dailies:
        match = re.search(r"^cif\.observables\-((\d{4}\.\d{2})\.\d{2})$", d)
        if match.group(1):
            if match.group(1) < end_month:
                to_cull['{}-{}'.format(args.index_prefix, match.group(1))] = '{}-{}'.format(args.index_prefix, match.group(2))
    body = {
        'query': {
            'filtered': {
                'filter': {
                    'and': [
                        {
                            'range': {
                                'confidence': {'gte': args.confidence}
                            }
                        }
                    ]
                }
            }
        }
    }

    for c in to_cull:
        logger.info('culling: {}'.format(c))
        if not args.dry_run:
            s, f = helpers.reindex(es, c, target_index=to_cull[c], query=body, chunk_size=50000)
            logger.info('success: {}'.format(s))
            logger.info('failure: {}'.format(f))
            if f:
                logger.error('re-index failed: {}'.format(c))
                raise SystemError
            else:
                logger.info('closing index: {}'.format(c))
                es.indices.close(index=c)

        logger.info('optimizing: {}'.format(to_cull[c]))
        if not args.dry_run:
            es.indices.optimize(index=to_cull[c])