sqlalchemy.orm.undefer

Here are the examples of the python api sqlalchemy.orm.undefer taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

47 Examples 7

Example 1

Project: pele Source File: database.py
    def findMinimum(self, E, coords):
        candidates = self.session.query(Minimum).\
            options(undefer("coords")).\
            filter(Minimum.energy > E-self.accuracy).\
            filter(Minimum.energy < E+self.accuracy)
        
        new = Minimum(E, coords)
        
        for m in candidates:
            if self.compareMinima:
                if not self.compareMinima(new, m):
                    continue
            return m
        return None

Example 2

Project: quicktill Source File: stock.py
    def popup_menu(self,department):
        items=self.filter.query_items(department).\
            options(joinedload('stocktype')).\
            options(undefer('remaining'))[:100]
        f=ui.tableformatter(' r l c ')
        sl=[(f(s.id,s.stocktype.format(),"%s %ss"%(
            s.remaining,s.stockunit.unit.name)),
             self.item_chosen,(s.id,))
            for s in items]
        ui.menu(sl,title=self.title)

Example 3

Project: quicktill Source File: views.py
@tillweb_view
def session_transactions(request,info,session,sessionid):
    try:
        s=session.query(Session).\
            filter_by(id=int(sessionid)).\
            options(undefer('transactions.total')).\
            options(joinedload('transactions.payments')).\
            one()
    except NoResultFound:
        raise Http404
    return ('session-transactions.ajax',{'session':s})

Example 4

Project: quicktill Source File: views.py
@tillweb_view
def transaction(request,info,session,transid):
    try:
        t=session.query(Transaction).\
            filter_by(id=int(transid)).\
            options(subqueryload_all('payments')).\
            options(joinedload_all('lines.stockref.stockitem.stocktype')).\
            options(joinedload('lines.user')).\
            options(undefer('total')).\
            one()
    except NoResultFound:
        raise Http404
    return ('transaction.html',{'transaction':t,})

Example 5

Project: sqlalchemy Source File: test_deferred.py
    def test_load_only_w_deferred(self):
        orders, Order = self.tables.orders, self.classes.Order

        mapper(Order, orders, properties={
                "description": deferred(orders.c.description)
            })

        sess = create_session()
        q = sess.query(Order).options(
                    load_only("isopen", "description"),
                    undefer("user_id")
                )
        self.assert_compile(q,
            "SELECT orders.description AS orders_description, "
            "orders.id AS orders_id, "
            "orders.user_id AS orders_user_id, "
            "orders.isopen AS orders_isopen FROM orders")

Example 6

Project: sqlalchemy Source File: test_deferred.py
    def test_defer_on_wildcard_subclass(self):
        # pretty much the same as load_only except doesn't
        # exclude the primary key

        s = Session()
        q = s.query(Manager).order_by(Person.person_id).options(
            defer(".*"), undefer("status"))
        self.assert_compile(
            q,
            "SELECT managers.status AS managers_status "
            "FROM people JOIN managers ON "
            "people.person_id = managers.person_id ORDER BY people.person_id"
        )

Example 7

Project: KaraKara Source File: actions.py
def get_track_dict_full(id):
    try:
        return DBSession.query(Track).options(
            joinedload(Track.tags),
            joinedload(Track.attachments),
            joinedload('tags.parent'),
            undefer(Track.lyrics),
        ).get(id).to_dict('full')
    except AttributeError:
        return None

Example 8

Project: flask-lastuser Source File: sqlalchemy.py
Function: get
    @classmethod
    def get(cls, username=None, userid=None, defercols=True):
        """
        Return a User with the given username or userid.

        :param str username: Username to lookup
        :param str userid: Userid to lookup
        """
        if (not not username) + (not not userid) != 1:
            raise TypeError("Only one of username or userid should be specified")

        if userid:
            query = cls.query.filter_by(userid=userid)
        else:
            query = cls.query.filter_by(username=username)
        if not defercols:
            query = query.options(undefer('userinfo'))
        return query.one_or_none()

Example 9

Project: indico Source File: display.py
    @classproperty
    @classmethod
    def _category_query_options(cls):
        children_strategy = subqueryload('children')
        children_strategy.load_only('id', 'parent_id', 'title', 'protection_mode')
        children_strategy.subqueryload('acl_entries')
        children_strategy.undefer('deep_children_count')
        children_strategy.undefer('deep_events_count')
        children_strategy.undefer('has_events')
        return (children_strategy,
                load_only('id', 'parent_id', 'title', 'protection_mode'),
                subqueryload('acl_entries'),
                undefer('deep_children_count'),
                undefer('deep_events_count'),
                undefer('has_events'),
                undefer('chain'))

Example 10

Project: indico Source File: serialize.py
def serialize_category_chain(category, include_children=False, include_parents=False):
    data = {'category': serialize_category(category, with_path=True)}
    if include_children:
        data['subcategories'] = [serialize_category(c, with_path=True, parent_path=data['category']['path'])
                                 for c in category.children]
    if include_parents:
        query = (category.parent_chain_query
                 .options(undefer('deep_events_count'), undefer('deep_children_count')))
        data['supercategories'] = [serialize_category(c, with_path=True, child_path=data['category']['path'])
                                   for c in query]
    return data

Example 11

Project: indico Source File: api.py
    def _get_query_options(self, detail_level):
        acl_user_strategy = joinedload('acl_entries').joinedload('user')
        # remote group membership checks will trigger a load on _all_emails
        # but not all events use this so there's no need to eager-load them
        # acl_user_strategy.noload('_primary_email')
        # acl_user_strategy.noload('_affiliation')
        creator_strategy = joinedload('creator')
        contributions_strategy = subqueryload('contributions')
        contributions_strategy.subqueryload('references')
        if detail_level in {'subcontributions', 'sessions'}:
            contributions_strategy.subqueryload('subcontributions').subqueryload('references')
        sessions_strategy = subqueryload('sessions')
        options = [acl_user_strategy, creator_strategy]
        if detail_level in {'contributions', 'subcontributions', 'sessions'}:
            options.append(contributions_strategy)
        if detail_level == 'sessions':
            options.append(sessions_strategy)
        options.append(undefer('effective_protection_mode'))
        return options

Example 12

Project: indico Source File: management.py
def _render_subcontribution_list(contrib):
    tpl = get_template_module('events/contributions/management/_subcontribution_list.html')
    subcontribs = (SubContribution.query.with_parent(contrib)
                   .options(undefer('attachment_count'))
                   .order_by(SubContribution.position)
                   .all())
    return tpl.render_subcontribution_list(contrib.event_new, contrib, subcontribs)

Example 13

Project: indico Source File: sessions.py
def _get_session_list_args(event):
    sessions = (Session.query.with_parent(event)
                .options(undefer('attachment_count'),
                         subqueryload('blocks').undefer('contribution_count'))
                .order_by(db.func.lower(Session.title))
                .all())
    return {'sessions': sessions, 'default_colors': get_colors()}

Example 14

Project: indico Source File: controllers.py
    def _process(self):
        query = (Category.query
                 .filter(Category.id.in_(c.id for c in self.user.favorite_categories))
                 .options(undefer('chain_titles')))
        categories = sorted([(cat, truncate_path(cat.chain_titles[:-1], chars=50)) for cat in query],
                            key=lambda c: (c[0].title, c[1]))
        return WPUser.render_template('favorites.html', 'favorites', user=self.user, favorite_categories=categories)

Example 15

Project: mediadrop Source File: podcasts.py
Function: index
    @expose_xhr('admin/podcasts/index.html',
                'admin/podcasts/index-table.html')
    @paginate('podcasts', items_per_page=10)
    @observable(events.Admin.PodcastsController.index)
    def index(self, page=1, **kw):
        """List podcasts with pagination.

        :param page: Page number, defaults to 1.
        :type page: int
        :rtype: Dict
        :returns:
            podcasts
                The list of :class:`~mediadrop.model.podcasts.Podcast`
                instances for this page.
        """
        podcasts = DBSession.query(Podcast)\
            .options(orm.undefer('media_count'))\
            .order_by(Podcast.title)
        return dict(podcasts=podcasts)

Example 16

Project: mediadrop Source File: media.py
Function: tags
    @expose('media/tags.html')
    def tags(self, **kwargs):
        """Display a listing of all tags."""
        tags = Tag.query\
            .options(orm.undefer('media_count_published'))\
            .filter(Tag.media_count_published > 0)
        return dict(
            tags = tags,
        )

Example 17

Project: pele Source File: database.py
Function: addminimum
    def addMinimum(self, E, coords, commit=True, max_n_minima=-1, pgorder=None, fvib=None):
        """add a new minimum to database
        
        Parameters
        ----------
        E : float
        coords : numpy.array
            coordinates of the minimum
        commit : bool, optional
            commit changes to database
        max_n_minima : int, optional
            keep only the max_n_minima with the lowest energies. If E is greater
            than the minimum with the highest energy in the database, then don't add
            this minimum and return None.  Else add this minimum and delete the minimum
            with the highest energy.  if max_n_minima < 0 then it is ignored.

        Returns
        -------
        minimum : Minimum
            minimum which was added (not necessarily a new minimum)
            
        """
        self.lock.acquire()
        # undefer coords because it is likely to be used by compareMinima and
        # it is slow to load them individually by accessing the database repetitively.
        candidates = self.session.query(Minimum).\
            options(undefer("coords")).\
            filter(Minimum.energy.between(E-self.accuracy, E+self.accuracy))
        
        new = Minimum(E, coords)
        
        for m in candidates:
            if self.compareMinima:
                if not self.compareMinima(new, m):
                    continue
            self.lock.release()
            return m

        if max_n_minima is not None and max_n_minima > 0:
            if self.number_of_minima() >= max_n_minima:
                mmax = self._highest_energy_minimum()
                if E >= mmax.energy:
                    # don't add the minimum
                    self.lock.release() 
                    return None
                else:
                    # remove the minimum with the highest energy and continue
                    self.removeMinimum(mmax, commit=commit)

        if fvib is not None:
            new.fvib = fvib
        if pgorder is not None:
            new.pgorder = pgorder
        self.session.add(new)
        if commit:
            self.session.commit()
        
        self.lock.release()
        
        self.on_minimum_added(new)
        return new

Example 18

Project: pele Source File: database.py
    def addTransitionState(self, energy, coords, min1, min2, commit=True, 
                           eigenval=None, eigenvec=None, pgorder=None, fvib=None):
        """Add transition state object
        
        Parameters
        ----------
        energy : float
            energy of transition state
        coords : numpy array
            coordinates of transition state
        min1, min2 : Minimum
            minima on either side of the transition states
        eigenval : float
            the eigenvalue (curvature) across the transition state
        eigenvec : numpy array
            the eigenvector associated with eigenval
        commit : bool
            commit changes to sql database
        
        Returns
        -------
        ts : TransitionState
            the transition state object (not necessarily new)
        """
        m1, m2 = min1, min2
        if m1.id() > m2.id():
            m1, m2 = m2, m1
        candidates = self.session.query(TransitionState).\
            options(undefer("coords")).\
            filter(or_(
                       and_(TransitionState.minimum1==m1, 
                            TransitionState.minimum2==m2),
                       and_(TransitionState.minimum1==m2, 
                            TransitionState.minimum2==m1),
                       )).\
            filter(TransitionState.energy.between(energy-self.accuracy,  energy+self.accuracy))
        
        for m in candidates:
            return m

        new = TransitionState(energy, coords, m1, m2, eigenval=eigenval, eigenvec=eigenvec)
        
        if fvib is not None:
            new.fvib = fvib
        if pgorder is not None:
            new.pgorder = pgorder 
        self.session.add(new)
        if commit:
            self.session.commit()
        self.on_ts_added(new)
        return new

Example 19

Project: pele Source File: optim_compatibility.py
    def write_min_data_ts_data(self):

        # write minima ordered by energy
        minima_labels = dict()
        import sqlalchemy.orm
        with open(self.pointsmin, "wb") as point_out: 
            with open(self.mindata, "w") as data_out:
                
                minima_iter = self.db.session.query(Minimum).\
                            options(sqlalchemy.orm.undefer("coords")).order_by(Minimum.energy)
                for label, m in enumerate(minima_iter):
                    minima_labels[m.id()] = label + 1 # +1 so it starts with 1
                    fvib = m.fvib
                    if fvib is None:
                        fvib = 1.
                    pgorder = m.pgorder
                    if pgorder is None:
                        pgorder = 1
                    
                    data_out.write("{} {} {} 1 1 1\n".format(m.energy,
                                                              fvib,
                                                              pgorder))
                    write_points_min_ts(point_out, m.coords, endianness=self.endianness)
        
        del m
        
        # write trasnition_states ordered by energy
        with open(self.pointsts, "wb") as point_out: 
            with open(self.tsdata, "w") as data_out:
                
                ts_iter = self.db.session.query(TransitionState).\
                            options(sqlalchemy.orm.undefer("coords"))
                for ts in ts_iter:
                    m1_label = minima_labels[ts._minimum1_id]
                    m2_label = minima_labels[ts._minimum2_id]

                    fvib = ts.fvib
                    if fvib is None:
                        fvib = 1.
                    pgorder = ts.pgorder
                    if pgorder is None:
                        pgorder = 1

                    data_out.write("{energy} {fvib} {pgorder} {min1} {min2} 1 1 1\n".format(
                        energy=ts.energy, fvib=fvib, pgorder=pgorder, 
                        min1=m1_label, min2=m2_label))
                    write_points_min_ts(point_out, ts.coords, endianness=self.endianness)

Example 20

Project: quicktill Source File: managestock.py
Function: stockcheck
@user.permission_required('stock-check', 'List unfinished stock items')
def stockcheck(dept=None):
    # Build a list of all not-finished stock items.
    log.info("Stock check")
    sq = td.s.query(StockItem)\
             .join(StockItem.stocktype)\
             .join(Delivery)\
             .filter(StockItem.finished == None)\
             .filter(Delivery.checked == True)\
             .options(contains_eager(StockItem.stocktype))\
             .options(contains_eager(StockItem.delivery))\
             .options(undefer('remaining'))\
             .order_by(StockItem.id)
    if dept:
        sq = sq.filter(StockType.dept_id == dept)
    sinfo = sq.all()
    # Split into groups by stocktype
    st = {}
    for s in sinfo:
        st.setdefault(s.stocktype_id, []).append(s)
    # Convert to a list of lists; each inner list contains items with
    # the same stocktype
    st = [x for x in list(st.values())]
    # We might want to sort the list at this point... sorting by ascending
    # amount remaining will put the things that are closest to running out
    # near the start - handy!
    remfunc = lambda a: reduce(lambda x, y: x + y, [x.remaining for x in a])
    st.sort(key=remfunc)
    # We want to show name, remaining, items in each line
    # and when a line is selected we want to pop up the list of individual
    # items.
    sl = []
    f = ui.tableformatter(' l l l ')
    for i in st:
        name = i[0].stocktype.format(maxw=40)
        remaining = reduce(lambda x, y : x + y, [x.remaining for x in i])
        items = len(i)
        unit = i[0].stocktype.unit.name
        sl.append(
            (f(name,
               "{:.0f} {}s".format(remaining, unit),
               "({} item{})".format(items, ("s", "")[items == 1])),
             stockdetail, (i,)))
    title = "Stock Check" if dept is None \
            else "Stock Check department {}".format(dept)
    ui.menu(sl, title=title, blurb="Select a stock type and press "
            "Cash/Enter for details on individual items.",
            dismiss_on_select=False, keymap={
            keyboard.K_PRINT: (print_stocklist_menu, (sinfo, title), False)})

Example 21

Project: quicktill Source File: managestock.py
@user.permission_required('stock-history', 'List finished stock')
def stockhistory(dept=None):
    log.info("Stock history")
    sq = td.s.query(StockItem)\
             .join(StockItem.stocktype)\
             .filter(StockItem.finished != None)\
             .options(undefer(StockItem.remaining))\
             .options(joinedload_all('stocktype.unit'))\
             .order_by(StockItem.id.desc())
    if dept:
        sq = sq.filter(StockType.dept_id == dept)
    sinfo = sq.all()
    f = ui.tableformatter(' r l l ')
    sl = [(f(x.id, x.stocktype.format(), x.remaining_units),
           stock.stockinfo_popup, (x.id,)) for x in sinfo]
    title = "Stock History" if dept is None \
            else "Stock History department {}".format(dept)
    ui.menu(sl, title=title, blurb="Select a stock item and press "
            "Cash/Enter for more information.  The number of units remaining "
            "when the stock was finished is shown.", dismiss_on_select=False)

Example 22

Project: quicktill Source File: managestock.py
Function: enter
    def enter(self):
        if self.wfield.f == '' or self.mfield.f == '' or self.minfield.f == '':
            ui.infopopup(["You must fill in all three fields."], title="Error")
            return
        weeks_ahead = int(self.wfield.f)
        months_behind = int(self.mfield.f)
        min_sale = float(self.minfield.f)
        ahead = datetime.timedelta(days=weeks_ahead * 7)
        behind = datetime.timedelta(days=months_behind * 30.4)
        dept = None if self.deptfield.f is None \
               else self.deptfield.read()
        if dept:
            td.s.add(dept)
        self.dismiss()
        q = td.s.query(StockType, func.sum(StockOut.qty) / behind.days)\
                .join(StockItem)\
                .join(StockOut)\
                .options(lazyload(StockType.department))\
                .options(lazyload(StockType.unit))\
                .options(undefer(StockType.instock))\
                .filter(StockOut.removecode_id == 'sold')\
                .filter((func.now() - StockOut.time) < behind)\
                .having(func.sum(StockOut.qty) / behind.days > min_sale)\
                .group_by(StockType)
        if dept:
            q = q.filter(StockType.dept_id == dept.id)
        r = q.all()
        f = ui.tableformatter(' l r  r  r ')
        lines = [f(st.format(), '{:0.1f}'.format(sold), st.instock,
                   '{:0.1f}'.format(sold * ahead.days - st.instock))
                 for st, sold in r]
        lines.sort(key=lambda l: float(l.fields[3]), reverse=True)
        header = [f('Name', 'Sold per day', 'In stock', 'Buy')]
        ui.listpopup(lines, header=header,
                     title="Stock to buy for next {} weeks".format(weeks_ahead),
                     colour=ui.colour_info, show_cursor=False,
                     dismiss=keyboard.K_CASH)

Example 23

Project: quicktill Source File: spreadsheets.py
def sessionrange(ds,start=None,end=None,tillname="Till"):
    """
    A spreadsheet summarising sessions between the start and end date.

    """
    depts=ds.query(Department).order_by(Department.id).all()
    depttotals=ds.query(Session,Department,func.sum(
            Transline.items*Transline.amount)).\
        select_from(Session).\
        options(undefer('total')).\
        options(undefer('actual_total')).\
        filter(Session.endtime!=None).\
        filter(select([func.count(SessionTotal.sessionid)],
                      whereclause=SessionTotal.sessionid==Session.id).\
                   correlate(Session.__table__).as_scalar()!=0).\
        join(Transaction,Transline,Department).\
        order_by(Session.id,Department.id).\
        group_by(Session,Department)
    if start: depttotals=depttotals.filter(Session.date>=start)
    if end: depttotals=depttotals.filter(Session.date<=end)

    doc=OpenDocuementSpreadsheet()

    datestyle=dateStyle(doc)
    currencystyle=currencyStyle(doc)

    header=Style(name="ColumnHeader",family="table-cell")
    header.addElement(
        ParagraphProperties(textalign="center"))
    header.addElement(
        TextProperties(fontweight="bold"))
    doc.automaticstyles.addElement(header)

    def colwidth(w):
        if not hasattr(colwidth,'num'): colwidth.num=0
        colwidth.num+=1
        width=Style(name="W{}".format(colwidth.num),family="table-column")
        width.addElement(TableColumnProperties(columnwidth=w))
        doc.automaticstyles.addElement(width)
        return width

    widthshort=colwidth("2.0cm")
    widthtotal=colwidth("2.2cm")
    widthgap=colwidth("0.5cm")

    table=Table(name=tillname)

    # Session ID and date
    table.addElement(TableColumn(numbercolumnsrepeated=2,stylename=widthshort))
    # Totals
    table.addElement(TableColumn(numbercolumnsrepeated=2,stylename=widthtotal))
    # Gap
    table.addElement(TableColumn(stylename=widthgap))
    # Departments
    table.addElement(TableColumn(numbercolumnsrepeated=len(depts),
                                 stylename=widthshort))

    tr=TableRow()
    table.addElement(tr)
    def tcheader(text):
        tc=TableCell(valuetype="string",stylename=header)
        tc.addElement(P(stylename=header,text=text))
        return tc
    tr.addElement(tcheader("ID"))
    tr.addElement(tcheader("Date"))
    tr.addElement(tcheader("Till Total"))
    tr.addElement(tcheader("Actual Total"))
    tr.addElement(TableCell())
    for d in depts:
        tr.addElement(tcheader(d.description))

    def tcint(i):
        """
        Integer table cell

        """
        return TableCell(valuetype="float",value=i)

    def tcdate(d):
        """
        Date table cell

        """
        return TableCell(valuetype="date",datevalue=d,stylename=datestyle)

    def tcmoney(m):
        """
        Money table cell

        """
        return TableCell(valuetype="currency",currency="GBP",value=str(m),
                         stylename=currencystyle)

    tr=None
    prev_s=None
    for s,d,t in depttotals:
        if s!=prev_s:
            prev_s=s
            tr=TableRow()
            table.addElement(tr)
            tr.addElement(tcint(s.id))
            tr.addElement(tcdate(s.date))
            tr.addElement(tcmoney(s.total))
            tr.addElement(tcmoney(s.actual_total))
            tr.addElement(TableCell())
            di=iter(depts)
        while True:
            dept=next(di)
            if dept==d:
                tr.addElement(tcmoney(t))
                break
            else:
                tr.addElement(TableCell())

    doc.spreadsheet.addElement(table)

    filename="{}-summary".format(tillname)
    if start: filename=filename+"-from-{}".format(start)
    if end: filename=filename+"-to-{}".format(end)
    filename=filename+".ods"

    r=HttpResponse(content_type='application/vnd.oasis.opendocuement.spreadsheet')
    r['Content-Disposition']='attachment; filename={}'.format(filename)
    doc.write(r)
    return r

Example 24

Project: quicktill Source File: views.py
@tillweb_view
def sessionfinder(request,info,session):
    if request.method=='POST' and "submit_find" in request.POST:
        form=SessionFinderForm(request.POST)
        if form.is_valid():
            s=session.query(Session).get(form.cleaned_data['session'])
            if s:
                return HttpResponseRedirect(info['base']+s.tillweb_url)
            form.add_error(None, "This session does not exist.")
    else:
        form=SessionFinderForm()
    if request.method=='POST' and "submit_sheet" in request.POST:
        rangeform=SessionRangeForm(request.POST)
        if rangeform.is_valid():
            cd=rangeform.cleaned_data
            return spreadsheets.sessionrange(
                session,
                start=cd['startdate'],end=cd['enddate'],
                tillname=info['tillname'])
    else:
        rangeform=SessionRangeForm()
    recent=session.query(Session).\
        options(undefer('total')).\
        options(undefer('actual_total')).\
        order_by(desc(Session.id))[:30]
    return ('sessions.html',{'recent':recent,'form':form,'rangeform':rangeform})

Example 25

Project: quicktill Source File: views.py
Function: session
@tillweb_view
def session(request,info,session,sessionid):
    try:
        # The subqueryload_all() significantly improves the speed of loading
        # the transaction totals
        s=session.query(Session).\
            filter_by(id=int(sessionid)).\
            options(undefer('total')).\
            options(undefer('closed_total')).\
            options(undefer('actual_total')).\
            one()
    except NoResultFound:
        raise Http404
    nextsession=session.query(Session).\
        filter(Session.id>s.id).\
        order_by(Session.id).\
        first()
    nextlink=info['base']+nextsession.tillweb_url if nextsession else None
    prevsession=session.query(Session).\
        filter(Session.id<s.id).\
        order_by(desc(Session.id)).\
        first()
    prevlink=info['base']+prevsession.tillweb_url if prevsession else None
    return ('session.html',{'session':s,'nextlink':nextlink,
                            'prevlink':prevlink})

Example 26

Project: quicktill Source File: views.py
@tillweb_view
def stockcheck(request,info,session):
    buylist=[]
    depts=session.query(Department).order_by(Department.id).all()
    if request.method == 'POST':
        form=StockCheckForm(depts,request.POST)
        if form.is_valid():
            cd=form.cleaned_data
            ahead=datetime.timedelta(days=cd['weeks_ahead']*7)
            behind=datetime.timedelta(days=cd['months_behind']*30.4)
            min_sale=cd['minimum_sold']
            dept=int(cd['department'])
            q=session.query(StockType,func.sum(StockOut.qty)/behind.days).\
               join(StockItem).\
               join(StockOut).\
               options(lazyload(StockType.department)).\
               options(lazyload(StockType.unit)).\
               options(undefer(StockType.instock)).\
               filter(StockOut.removecode_id=='sold').\
               filter((func.now()-StockOut.time)<behind).\
               filter(StockType.dept_id == dept).\
               having(func.sum(StockOut.qty)/behind.days>min_sale).\
               group_by(StockType)
            r=q.all()
            buylist=[(st,'{:0.1f}'.format(sold),
                      '{:0.1f}'.format(sold*ahead.days-st.instock))
                     for st,sold in r]
            buylist.sort(key=lambda l:float(l[2]),reverse=True)
    else:
        form=StockCheckForm(depts)
    return ('stockcheck.html',{'form':form,'buylist':buylist})

Example 27

Project: python-sync-db Source File: conflicts.py
def find_unique_conflicts(pull_ops, unversioned_ops, pull_message, session):
    """
    Unique constraints violated in a model. Returns two lists of
    dictionaries, the first one with the solvable conflicts, and the
    second one with the proper errors. Each conflict is a dictionary
    with the following fields::

        object: the local conflicting object, bound to the session
        columns: tuple of column names in the unique constraint
        new_values: tuple of values that can be used to update the
                    conflicting object

    Each error is a dictionary with the following fields::

        model: the model (class) of the conflicting object
        pk: the value of the primary key of the conflicting object
        columns: tuple of column names in the unique constraint
    """

    def verify_constraint(model, columns, values):
        """
        Checks to see whether some local object exists with
        conflicting values.
        """
        match = query_model(session, model, only_pk=True).\
            options(*(undefer(column) for column in columns)).\
            filter_by(**dict((column, value)
                             for column, value in izip(columns, values))).first()
        pk = get_pk(model)
        return match, getattr(match, pk, None)

    def get_remote_values(model, row_id, columns):
        """
        Gets the conflicting values out of the remote object set
        (*container*).
        """
        obj = pull_message.query(model).filter(attr('__pk__') == row_id).first()
        if obj is not None:
            return tuple(getattr(obj, column) for column in columns)
        return (None,)

    # keyed to content type
    unversioned_pks = dict((ct_id, set(op.row_id for op in unversioned_ops
                                       if op.content_type_id == ct_id
                                       if op.command != 'd'))
                           for ct_id in set(operation.content_type_id
                                            for operation in unversioned_ops))
    # the lists to fill with conflicts and errors
    conflicts, errors = [], []

    for op in pull_ops:
        model = op.tracked_model

        for constraint in ifilter(lambda c: isinstance(c, UniqueConstraint),
                                  class_mapper(model).mapped_table.constraints):

            unique_columns = tuple(col.name for col in constraint.columns)
            # Unique values on the server, to check conflicts with local database
            remote_values = get_remote_values(model, op.row_id, unique_columns)

            obj_conflict, pk_conflict = verify_constraint(
                model, unique_columns, remote_values)

            is_unversioned = pk_conflict in unversioned_pks.get(
                op.content_type_id, set())

            if all(value is None for value in remote_values): continue # Null value
            if pk_conflict is None: continue # No problem
            if pk_conflict == op.row_id:
                if op.command == 'i':
                    # Two nodes created objects with the same unique
                    # value and same pk
                    errors.append(
                        {'model': type(obj_conflict),
                         'pk': pk_conflict,
                         'columns': unique_columns})
                continue

            # if pk_conflict != op.row_id:
            remote_obj = pull_message.query(model).\
                filter(attr('__pk__') == pk_conflict).first()

            if remote_obj is not None and not is_unversioned:
                old_values = tuple(getattr(obj_conflict, column)
                                   for column in unique_columns)
                # The new unique value of the conflictive object
                # in server
                new_values = tuple(getattr(remote_obj, column)
                                   for column in unique_columns)

                if old_values != new_values:
                    # Library error
                    # It's necesary to first update the unique value
                    session.refresh(obj_conflict, column_properties(obj_conflict))
                    conflicts.append(
                        {'object': obj_conflict,
                         'columns': unique_columns,
                         'new_values': new_values})
                else:
                    # The server allows two identical unique values
                    # This should be impossible
                    pass
            elif remote_obj is not None and is_unversioned:
                # Two nodes created objects with the same unique
                # values. Human error.
                errors.append(
                    {'model': type(obj_conflict),
                     'pk': pk_conflict,
                     'columns': unique_columns})
            else:
                # The conflicting object hasn't been modified on the
                # server, which must mean the local user is attempting
                # an update that collides with one from another user.
                errors.append(
                    {'model': type(obj_conflict),
                     'pk': pk_conflict,
                     'columns': unique_columns})
    return conflicts, errors

Example 28

Project: sqlalchemy Source File: test_deferred.py
    def test_options(self):
        """Options on a mapper to create deferred and undeferred columns"""

        orders, Order = self.tables.orders, self.classes.Order


        mapper(Order, orders)

        sess = create_session()
        q = sess.query(Order).order_by(Order.id).options(defer('user_id'))

        def go():
            q.all()[0].user_id

        self.sql_eq_(go, [
            ("SELECT orders.id AS orders_id, "
             "orders.address_id AS orders_address_id, "
             "orders.description AS orders_description, "
             "orders.isopen AS orders_isopen "
             "FROM orders ORDER BY orders.id", {}),
            ("SELECT orders.user_id AS orders_user_id "
             "FROM orders WHERE orders.id = :param_1",
             {'param_1':1})])
        sess.expunge_all()

        q2 = q.options(undefer('user_id'))
        self.sql_eq_(q2.all, [
            ("SELECT orders.id AS orders_id, "
             "orders.user_id AS orders_user_id, "
             "orders.address_id AS orders_address_id, "
             "orders.description AS orders_description, "
             "orders.isopen AS orders_isopen "
             "FROM orders ORDER BY orders.id",
             {})])

Example 29

Project: sqlalchemy Source File: test_deferred.py
    def test_deep_options(self):
        users, items, order_items, Order, Item, User, orders = (self.tables.users,
                                self.tables.items,
                                self.tables.order_items,
                                self.classes.Order,
                                self.classes.Item,
                                self.classes.User,
                                self.tables.orders)

        mapper(Item, items, properties=dict(
            description=deferred(items.c.description)))
        mapper(Order, orders, properties=dict(
            items=relationship(Item, secondary=order_items)))
        mapper(User, users, properties=dict(
            orders=relationship(Order, order_by=orders.c.id)))

        sess = create_session()
        q = sess.query(User).order_by(User.id)
        l = q.all()
        item = l[0].orders[1].items[1]
        def go():
            eq_(item.description, 'item 4')
        self.sql_count_(1, go)
        eq_(item.description, 'item 4')

        sess.expunge_all()
        l = q.options(undefer('orders.items.description')).all()
        item = l[0].orders[1].items[1]
        def go():
            eq_(item.description, 'item 4')
        self.sql_count_(0, go)
        eq_(item.description, 'item 4')

Example 30

Project: KaraKara Source File: comunity.py
@view_config(route_name='comunity_list')
@etag_decorator(_generate_cache_key_comunity_list)
@web
@comunity_only
def comunity_list(request):

    def _comnunity_list():

        def track_dict_to_status(track_dict):
            track_dict['status'] = ComunityTrack.factory(track_dict, request).status
            # Flatten tags into a single list
            track_dict['tags_flattened'] = [
                '{}:{}'.format(parent, tag)
                for parent, tags in track_dict['tags'].items()
                for tag in tags
            ]
            del track_dict['tags']
            del track_dict['attachments']
            del track_dict['lyrics']
            return track_dict

        # Get tracks from db
        tracks = [
            # , exclude_fields=('lyrics','attachments','image')
            track_dict_to_status(track.to_dict('full')) \
            for track in DBSession.query(Track) \
                .order_by(Track.source_filename) \
                .options( \
                    joinedload(Track.tags), \
                    joinedload(Track.attachments), \
                    joinedload('tags.parent'), \
                    undefer('lyrics'), \
                )
        ]

        # TODO: look at meta
        # TODO: look at path_upload

        return {
            'tracks': tracks,
            # TODO: add further details of currently processing files?
        }

    # Invalidate cache if db has updated
    last_update_timestamp = last_update()
    global list_cache_timestamp
    if list_cache_timestamp is None or last_update_timestamp != list_cache_timestamp:
        list_cache_timestamp = last_update_timestamp
        invalidate_list_cache(request)

    data_tracks = cache.get_or_create(LIST_CACHE_KEY, _comnunity_list)
    return action_ok(data=data_tracks)

Example 31

Project: flask-lastuser Source File: sqlalchemy.py
Function: load_user
    def load_user(self, userid, create=False):
        # TODO: How do we cache this? Connect to a cache manager
        if hasattr(self.usermodel, 'get'):
            user = self.usermodel.get(userid=userid, defercols=False)
        else:
            user = self.usermodel.query.filter_by(userid=userid
                ).options(undefer('userinfo')).one_or_none()
        if user is None:
            if create:
                user = self.usermodel(userid=userid)
                # We don't have the benefit of assuming add_and_commit is available here,
                # so replicate its behaviour.
                self.db.session.begin_nested()
                try:
                    self.db.session.add(user)
                    self.db.session.commit()
                except IntegrityError:
                    self.db.session.rollback()
                    user = self.usermodel.query.filter_by(userid=userid).one()
        return user

Example 32

Project: impactstory-tng Source File: jobs.py
def update_fn(cls, method_name, obj_id_list, shortcut_data=None, index=1):

    # we are in a fork!  dispose of our engine.
    # will get a new one automatically
    db.engine.dispose()

    start = time()

    q = db.session.query(cls).options(orm.undefer('*')).filter(cls.id.in_(obj_id_list))

    obj_rows = q.all()
    num_obj_rows = len(obj_rows)
    print "{repr}.{method_name}() got {num_obj_rows} objects in {elapsed}sec".format(
        repr=cls.__name__,
        method_name=method_name,
        num_obj_rows=num_obj_rows,
        elapsed=elapsed(start)
    )

    for count, obj in enumerate(obj_rows):
        start_time = time()

        if obj is None:
            return None

        method_to_run = getattr(obj, method_name)

        print u"\n***\n{count}: starting {repr}.{method_name}() method".format(
            count=count + (num_obj_rows*index),
            repr=obj,
            method_name=method_name
        )

        if shortcut_data:
            method_to_run(shortcut_data)
        else:
            method_to_run()

        print u"finished {repr}.{method_name}(). took {elapsed}sec".format(
            repr=obj,
            method_name=method_name,
            elapsed=elapsed(start_time, 4)
        )

    commit_success = safe_commit(db)
    if not commit_success:
        print u"COMMIT fail"
    db.session.remove()  # close connection nicely
    return None  # important for if we use this on RQ

Example 33

Project: indico Source File: display.py
Function: process
    def _process(self):
        q = request.args['q'].lower()
        query = (Category.query
                 .filter(Category.title_matches(q))
                 .options(undefer('deep_children_count'), undefer('deep_events_count'), undefer('has_events'),
                          joinedload('acl_entries')))
        if session.user:
            # Prefer favorite categories
            query = query.order_by(Category.favorite_of.any(favorite_category_table.c.user_id == session.user.id)
                                   .desc())
        # Prefer exact matches and matches at the beginning, then order by category title and if
        # those are identical by the chain titles
        query = (query
                 .order_by((db.func.lower(Category.title) == q).desc(),
                           db.func.lower(Category.title).startswith(q).desc(),
                           db.func.lower(Category.title),
                           Category.chain_titles))
        total_count = query.count()
        query = query.limit(10)
        return jsonify_data(categories=[serialize_category(c, with_favorite=True, with_path=True) for c in query],
                            total_count=total_count, flash=False)

Example 34

Project: indico Source File: categories_test.py
Function: test_effective_protection_mode
def test_effective_protection_mode(db):
    def _cat(id_, protection_mode=ProtectionMode.inheriting, children=None):
        return Category(id=id_, title='cat-{}'.format(id_), protection_mode=protection_mode, children=children or [])
    root = Category.get_root()
    root.protection_mode = ProtectionMode.protected
    root.children = [
        _cat(1),
        _cat(2, ProtectionMode.public, children=[
            _cat(3, children=[
                _cat(4, ProtectionMode.inheriting),
                _cat(5, ProtectionMode.public),
                _cat(6, ProtectionMode.protected),
            ]),
            _cat(7, ProtectionMode.protected, children=[
                _cat(8, ProtectionMode.inheriting),
                _cat(9, ProtectionMode.public),
                _cat(10, ProtectionMode.protected),
            ]),
            _cat(11)
        ])
    ]
    db.session.add(root)
    db.session.flush()
    data = {c.id: c.effective_protection_mode for c in Category.query.options(undefer('effective_protection_mode'))}
    assert data == {
        0: ProtectionMode.protected,
        1: ProtectionMode.protected,
        2: ProtectionMode.public,
        3: ProtectionMode.public,
        4: ProtectionMode.public,
        5: ProtectionMode.public,
        6: ProtectionMode.protected,
        7: ProtectionMode.protected,
        8: ProtectionMode.protected,
        9: ProtectionMode.public,
        10: ProtectionMode.protected,
        11: ProtectionMode.public
    }

Example 35

Project: indico Source File: api.py
    def _build_category_path_data(self, ids):
        return [{'_type': 'CategoryPath', 'categoryId': category.id, 'path': self._serialize_category_path(category)}
                for category in Category.query.filter(Category.id.in_(ids)).options(undefer('chain'))]

Example 36

Project: indico Source File: api.py
Function: event
    def event(self, query):
        def _iterate_objs(query_string):
            query = (Event.query
                     .filter(Event.title_matches(to_unicode(query_string)),
                             ~Event.is_deleted)
                     .options(undefer('effective_protection_mode')))
            if self._orderBy == 'start':
                query = query.order_by(Event.start_dt)
            elif self._orderBy == 'id':
                query = query.order_by(Event.id)

            counter = 0
            # Query the DB in chunks of 1000 records per query until the limit is satisfied
            for event in query.yield_per(1000):
                if event.can_access(self._aw.getUser().user if self._aw.getUser() else None):
                    counter += 1
                    # Start yielding only when the counter reaches the given offset
                    if (self._offset is None) or (counter > self._offset):
                        yield event
                        # Stop querying the DB when the limit is satisfied
                        if (self._limit is not None) and (counter == self._offset + self._limit):
                            break

        if self._orderBy in ['start', 'id', None]:
            obj_list = _iterate_objs(query)
        else:
            obj_list = sorted(_iterate_objs(query), key=self._sortingKeys.get(self._orderBy), reverse=self._descending)
        for event in obj_list:
            yield {
                'id': event.id,
                'title': event.title,
                'startDate': event.start_dt,
                'hasAnyProtection': event.effective_protection_mode == ProtectionMode.protected
            }

Example 37

Project: indico Source File: clone.py
    def _clone_contribs(self, new_event):
        attrs = (get_simple_column_attrs(Contribution) | {'own_room', 'own_venue'}) - {'abstract_id'}
        query = (Contribution.query.with_parent(self.old_event)
                 .options(undefer('_last_friendly_subcontribution_id'),
                          joinedload('own_venue'),
                          joinedload('own_room').lazyload('*'),
                          joinedload('session'),
                          joinedload('session_block').lazyload('session'),
                          joinedload('type'),
                          subqueryload('acl_entries'),
                          subqueryload('subcontributions').joinedload('references'),
                          subqueryload('references'),
                          subqueryload('person_links'),
                          subqueryload('field_values')))
        for old_contrib in query:
            contrib = Contribution()
            contrib.populate_from_attrs(old_contrib, attrs)
            contrib.subcontributions = list(self._clone_subcontribs(old_contrib.subcontributions))
            contrib.acl_entries = clone_principals(ContributionPrincipal, old_contrib.acl_entries)
            contrib.references = list(self._clone_references(ContributionReference, old_contrib.references))
            contrib.person_links = list(self._clone_person_links(ContributionPersonLink, old_contrib.person_links))
            contrib.field_values = list(self._clone_fields(old_contrib.field_values))
            if old_contrib.type is not None:
                contrib.type = self._contrib_type_map[old_contrib.type]
            if old_contrib.session is not None:
                contrib.session = self._session_map[old_contrib.session]
            if old_contrib.session_block is not None:
                contrib.session_block = self._session_block_map[old_contrib.session_block]
            new_event.contributions.append(contrib)
            self._contrib_map[old_contrib] = contrib

Example 38

Project: indico Source File: util.py
def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True):
    """Retrieve time blocks that fall within a specific time interval
       for a given set of categories.

       :param categ_ids: iterable containing list of category IDs
       :param start_dt: start of search interval (``datetime``, expected
                        to be in display timezone)
       :param end_dt: end of search interval (``datetime`` in expected
                      to be in display timezone)
       :param detail_level: the level of detail of information
                            (``event|session|contribution``)
       :param tz: the ``timezone`` information should be displayed in
       :param from_categ: ``Category`` that will be taken into account to calculate
                          visibility
       :param grouped: Whether to group results by start date
       :returns: a dictionary containing timetable information in a
                 structured way. See source code for examples.
    """
    day_start = start_dt.astimezone(utc)
    day_end = end_dt.astimezone(utc)
    dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)

    items = defaultdict(lambda: defaultdict(list))

    # first of all, query TimetableEntries/events that fall within
    # specified range of dates (and category set)
    events = _query_events(categ_ids, day_start, day_end)
    if from_categ:
        events = events.filter(Event.is_visible_in(from_categ))
    for eid, tt_start_dt in events:
        if tt_start_dt:
            items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt)
        else:
            items[eid] = None

    # then, retrieve detailed information about the events
    event_ids = set(items)
    query = (Event.find(Event.id.in_(event_ids))
             .options(subqueryload(Event.person_links).joinedload(EventPersonLink.person),
                      joinedload(Event.own_room).noload('owner'),
                      joinedload(Event.own_venue),
                      joinedload(Event.category).undefer('effective_icon_data'),
                      undefer('effective_protection_mode')))
    scheduled_events = defaultdict(list)
    ongoing_events = []
    events = []
    for e in query:
        if grouped:
            local_start_dt = e.start_dt.astimezone(tz).date()
            local_end_dt = e.end_dt.astimezone(tz).date()
            if items[e.id] is None:
                # if there is no TimetableEntry, this means the event has not timetable on that interval
                for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)):
                    # if the event starts on this date, we've got a time slot
                    if day.date() == local_start_dt:
                        scheduled_events[day.date()].append((e.start_dt, e))
                    else:
                        ongoing_events.append(e)
            else:
                for start_d, start_dts in items[e.id].viewitems():
                    scheduled_events[start_d].append((start_dts[0], e))
        else:
            events.append(e)

    # result['events'][date(...)] -> [(datetime(....), Event(...))]
    # result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))]
    # result['ongoing_events'] = [Event(...)]
    if grouped:
        result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
    else:
        result = defaultdict(lambda: defaultdict(list))

    result.update({
        'events': scheduled_events if grouped else events,
        'ongoing_events': ongoing_events
    })

    # according to detail level, ask for extra information from the DB
    if detail_level != 'event':
        query = _query_blocks(event_ids, dates_overlap, detail_level)
        if grouped:
            for b in query:
                start_date = b.timetable_entry.start_dt.astimezone(tz).date()
                result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b))
        else:
            for b in query:
                result[b.session.event_id]['blocks'].append(b)

    if detail_level == 'contribution':
        query = (Contribution.find(Contribution.event_id.in_(event_ids),
                                   dates_overlap(TimetableEntry),
                                   ~Contribution.is_deleted)
                 .options(contains_eager(Contribution.timetable_entry),
                          joinedload(Contribution.person_links))
                 .join(TimetableEntry))
        if grouped:
            for c in query:
                start_date = c.timetable_entry.start_dt.astimezone(tz).date()
                result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c))
        else:
            for c in query:
                result[c.event_id]['contributions'].append(c)

        query = (Break.find(TimetableEntry.event_id.in_(event_ids), dates_overlap(TimetableEntry))
                 .options(contains_eager(Break.timetable_entry))
                 .join(TimetableEntry))
        if grouped:
            for b in query:
                start_date = b.timetable_entry.start_dt.astimezone(tz).date()
                result[b.timetable_entry.event_id]['breaks'][start_date].append((b.timetable_entry, b))
        else:
            for b in query:
                result[b.timetable_entry.event_id]['breaks'].append(b)
    return result

Example 39

Project: indico Source File: util.py
def get_related_categories(user, detailed=True):
    """Gets the related categories of a user for the dashboard"""
    favorites = set()
    if user.favorite_categories:
        favorites = set(Category.query
                        .filter(Category.id.in_(c.id for c in user.favorite_categories))
                        .options(undefer('chain_titles'))
                        .all())
    managed = set(Category.query
                  .filter(Category.acl_entries.any(db.and_(CategoryPrincipal.type == PrincipalType.user,
                                                           CategoryPrincipal.user == user,
                                                           CategoryPrincipal.has_management_role())),
                          ~Category.is_deleted)
                  .options(undefer('chain_titles')))
    if not detailed:
        return favorites | managed
    res = {}
    for categ in favorites | managed:
        res[(categ.title, categ.id)] = {
            'categ': categ,
            'favorite': categ in favorites,
            'managed': categ in managed,
            'path': truncate_path(categ.chain_titles[:-1], chars=50)
        }
    return OrderedDict(sorted(res.items(), key=itemgetter(0)))

Example 40

Project: mediadrop Source File: categories.py
    @expose('admin/categories/index.html')
    @paginate('tags', items_per_page=25)
    @observable(events.Admin.CategoriesController.index)
    def index(self, **kwargs):
        """List categories.

        :rtype: Dict
        :returns:
            categories
                The list of :class:`~mediadrop.model.categories.Category`
                instances for this page.
            category_form
                The :class:`~mediadrop.forms.admin.settings.categories.CategoryForm` instance.

        """
        categories = Category.query\
            .order_by(Category.name)\
            .options(orm.undefer('media_count'))\
            .populated_tree()

        return dict(
            categories = categories,
            category_form = category_form,
            category_row_form = category_row_form,
        )

Example 41

Project: mediadrop Source File: media.py
    @expose_xhr('admin/media/index.html', 'admin/media/index-table.html')
    @paginate('media', items_per_page=15)
    @observable(events.Admin.MediaController.index)
    def index(self, page=1, search=None, filter=None, podcast=None,
              category=None, tag=None, **kwargs):
        """List media with pagination and filtering.

        :param page: Page number, defaults to 1.
        :type page: int
        :param search: Optional search term to filter by
        :type search: unicode or None
        :param podcast_filter: Optional podcast to filter by
        :type podcast_filter: int or None
        :rtype: dict
        :returns:
            media
                The list of :class:`~mediadrop.model.media.Media` instances
                for this page.
            search
                The given search term, if any
            search_form
                The :class:`~mediadrop.forms.admin.SearchForm` instance
            podcast
                The podcast object for rendering if filtering by podcast.

        """
        media = Media.query.options(orm.undefer('comment_count_published'))

        if search:
            media = media.admin_search(search)
        else:
            media = media.order_by_status()\
                         .order_by(Media.publish_on.desc(),
                                   Media.modified_on.desc())

        if not filter:
            pass
        elif filter == 'unreviewed':
            media = media.reviewed(False)
        elif filter == 'unencoded':
            media = media.reviewed().encoded(False)
        elif filter == 'drafts':
            media = media.drafts()
        elif filter == 'published':
            media = media.published()

        if category:
            category = fetch_row(Category, slug=category)
            media = media.filter(Media.categories.contains(category))
        if tag:
            tag = fetch_row(Tag, slug=tag)
            media = media.filter(Media.tags.contains(tag))
        if podcast:
            podcast = fetch_row(Podcast, slug=podcast)
            media = media.filter(Media.podcast == podcast)

        return dict(
            media = media,
            search = search,
            search_form = search_form,
            media_filter = filter,
            category = category,
            tag = tag,
            podcast = podcast,
        )

Example 42

Project: mediadrop Source File: storage.py
Function: index
    @expose('admin/storage/index.html')
    @observable(events.Admin.StorageController.index)
    def index(self, page=1, **kwargs):
        """List storage engines with pagination.

        :rtype: Dict
        :returns:
            engines
                The list of :class:`~mediadrop.lib.storage.StorageEngine`
                instances for this page.

        """
        engines = DBSession.query(StorageEngine)\
            .options(orm.undefer('file_count'),
                     orm.undefer('file_size_sum'))\
            .all()
        engines = list(sort_engines(engines))
        existing_types = set(ecls.engine_type for ecls in engines)
        addable_engines = [
            ecls
            for ecls in StorageEngine
            if not ecls.is_singleton or ecls.engine_type not in existing_types
        ]

        return {
            'engines': engines,
            'addable_engines': addable_engines,
        }

Example 43

Project: mediadrop Source File: tags.py
Function: index
    @expose('admin/tags/index.html')
    @paginate('tags', items_per_page=25)
    @observable(events.Admin.TagsController.index)
    def index(self, page=1, **kwargs):
        """List tags with pagination.

        :param page: Page number, defaults to 1.
        :type page: int
        :rtype: Dict
        :returns:
            tags
                The list of :class:`~mediadrop.model.tags.Tag`
                instances for this page.
            tag_form
                The :class:`~mediadrop.forms.admin.settings.tags.TagForm` instance.

        """
        tags = DBSession.query(Tag)\
            .options(orm.undefer('media_count'))\
            .order_by(Tag.name)

        return dict(
            tags = tags,
            tag_form = tag_form,
            tag_row_form = tag_row_form,
        )

Example 44

Project: mediadrop Source File: media.py
    @expose('json')
    @require_api_key_if_necessary
    @observable(events.API.MediaController.index)
    def index(self, type=None, podcast=None, tag=None, category=None, search=None,
              max_age=None, min_age=None, order=None, offset=0, limit=10,
              published_after=None, published_before=None, featured=False,
              id=None, slug=None, include_embed=False, format="json", **kwargs):
        """Query for a list of media.

        :param type:
            Filter by '%s' or '%s'. Defaults to any type.

        :param podcast:
            A podcast slug (or slugs) to filter by. Use 0 to include
            only non-podcast media or 1 to include any podcast media.
            For multiple podcasts, separate the slugs with commas.

        :param tag:
            A tag slug to filter by.

        :param category:
            A category slug to filter by.

        :param search:
            A boolean search query. See
            http://dev.mysql.com/doc/refman/5.0/en/fulltext-boolean.html

        :param published_after:
            If given, only media published *on or after* this date is
            returned. The expected format is 'YYYY-MM-DD HH:MM:SS'
            (ISO 8601) and must include the year at a bare minimum.

        :param published_before:
            If given, only media published *on or before* this date is
            returned. The expected format is 'YYYY-MM-DD HH:MM:SS'
            (ISO 8601) and must include the year at a bare minimum.

        :param max_age:
            If given, only media published within this many days is
            returned. This is a convenience shortcut for publish_after
            and will override its value if both are given.
        :type max_age: int

        :param min_age:
            If given, only media published prior to this number of days
            ago will be returned. This is a convenience shortcut for
            publish_before and will override its value if both are given.
        :type min_age: int

        :param order:
            A column name and 'asc' or 'desc', seperated by a space.
            The column name can be any one of the returned columns.
            Defaults to newest media first (publish_on desc).

        :param offset:
            Where in the complete resultset to start returning results.
            Defaults to 0, the very beginning. This is useful if you've
            already fetched the first 50 results and want to fetch the
            next 50 and so on.
        :type offset: int

        :param limit:
            Number of results to return in each query. Defaults to 10.
            The maximum allowed value defaults to 50 and is set via
            :attr:`request.settings['api_media_max_results']`.
        :type limit: int

        :param featured:
            If nonzero, the results will only include media from the
            configured featured category, if there is one.
        :type featured: bool

        :param include_embed:
            If nonzero, the HTML for the embeddable player is included
            for all results.
        :type include_embed: bool

        :param id:
            Filters the results to include the one item with the given ID.
            Note that we still return a list.
        :type id: int or None

        :param slug:
            Filters the results to include the one item with the given slug.
            Note that we still return a list.
        :type slug: unicode or None

        :param api_key:
            The api access key if required in settings
        :type api_key: unicode or None

        :raises APIException:
            If there is an user error in the query params.

        :rtype: JSON-ready dict
        :returns: The returned dict has the following fields:

            count (int)
                The total number of results that match this query.
            media (list of dicts)
                A list of **media_info** dicts, as generated by the
                :meth:`_info <mediadrop.controllers.api.media.MediaController._info>`
                method. The number of dicts in this list will be the lesser
                of the number of matched items and the requested limit.
                **Note**: unless the 'include_embed' option is specified,
                The returned **media_info** dicts will not include the
                'embed' entry.

        """

        if format not in ("json", "mrss"):
            return dict(error= INVALIDFORMATERROR % format)

        query = Media.query\
            .published()\
            .options(orm.undefer('comment_count_published'))

        # Basic filters
        if id:
            query = query.filter_by(id=id)
        if slug:
            query = query.filter_by(slug=slug)

        if type:
            query = query.filter_by(type=type)

        if podcast:
            podcast_query = DBSession.query(Podcast.id)\
                .filter(Podcast.slug.in_(podcast.split(',')))
            query = query.filter(Media.podcast_id.in_(podcast_query))

        if tag:
            tag = fetch_row(Tag, slug=tag)
            query = query.filter(Media.tags.contains(tag))

        if category:
            category = fetch_row(Category, slug=category)
            query = query.filter(Media.categories.contains(category))

        if max_age:
            published_after = datetime.now() - timedelta(days=int(max_age))
        if min_age:
            published_before = datetime.now() - timedelta(days=int(min_age))

        # FIXME: Parse the date and catch formatting problems before it
        #        it hits the database. Right now support for partial
        #        dates like '2010-02' is thanks to leniancy in MySQL.
        #        Hopefully this leniancy is common to Postgres etc.
        if published_after:
            query = query.filter(Media.publish_on >= published_after)
        if published_before:
            query = query.filter(Media.publish_on <= published_before)

        query = query.order_by(get_order_by(order, order_columns))

        # Search will supercede the ordering above
        if search:
            query = query.search(search)

        if featured:
            featured_cat = get_featured_category()
            if featured_cat:
                query = query.in_category(featured_cat)

        # Preload podcast slugs so we don't do n+1 queries
        podcast_slugs = dict(DBSession.query(Podcast.id, Podcast.slug))

        # Rudimentary pagination support
        start = int(offset)
        end = start + min(int(limit), int(request.settings['api_media_max_results']))

        if format == "mrss":
            request.override_template = "sitemaps/mrss.xml"
            return dict(
                media = query[start:end],
                title = "Media Feed",
            )

        media = [self._info(m, podcast_slugs, include_embed) for m in query[start:end]]

        return dict(
            media = media,
            count = query.count(),
        )

Example 45

Project: mediadrop Source File: categories.py
    def __before__(self, *args, **kwargs):
        """Load all our category data before each request."""
        BaseController.__before__(self, *args, **kwargs)

        c.categories = Category.query\
            .order_by(Category.name)\
            .options(orm.undefer('media_count_published'))\
            .populated_tree()

        counts = dict((cat.id, cat.media_count_published)
                      for cat, depth in c.categories.traverse())
        c.category_counts = counts.copy()
        for cat, depth in c.categories.traverse():
            count = counts[cat.id]
            if count:
                for ancestor in cat.ancestors():
                    c.category_counts[ancestor.id] += count

        category_slug = request.environ['pylons.routes_dict'].get('slug', None)
        if category_slug:
            c.category = fetch_row(Category, slug=category_slug)
            c.breadcrumb = c.category.ancestors()
            c.breadcrumb.append(c.category)

Example 46

Project: mediadrop Source File: podcasts.py
    @expose('podcasts/index.html')
    @observable(events.PodcastsController.index)
    def index(self, **kwargs):
        """List podcasts and podcast media.

        :rtype: dict
        :returns:
            podcasts
                The :class:`~mediadrop.model.podcasts.Podcast` instance

        """
        podcasts = Podcast.query\
            .options(orm.undefer('media_count_published'))\
            .all()

        if len(podcasts) == 1:
            redirect(action='view', slug=podcasts[0].slug)

        podcast_episodes = {}
        for podcast in podcasts:
            episode_query = podcast.media.published().order_by(Media.publish_on.desc())
            podcast_episodes[podcast] = viewable_media(episode_query)[:4]

        return dict(
            podcasts = podcasts,
            podcast_episodes = podcast_episodes,
        )

Example 47

Project: fuel-web Source File: deployment_history.py
    @classmethod
    def get_history(cls, transaction, nodes_ids=None, statuses=None,
                    tasks_names=None, include_summary=False):
        """Get deployment tasks history.

        :param transaction: task SQLAlchemy object
        :type transaction: models.Task
        :param nodes_ids: filter by node IDs
        :type nodes_ids: list[int]|None
        :param statuses: filter by statuses
        :type statuses: list[basestring]|None
        :param tasks_names: filter by deployment graph task names
        :param include_summary: bool flag to include summary
        :type tasks_names: list[basestring]|None
        :returns: tasks history
        :rtype: list[dict]
        """
        nodes_ids = nodes_ids and frozenset(nodes_ids)
        statuses = statuses and frozenset(statuses)
        tasks_names = tasks_names and frozenset(tasks_names)

        task_parameters_by_name = {}
        visited_tasks = set()
        tasks_snapshot = Transaction.get_tasks_snapshot(transaction)
        history = []

        if tasks_snapshot:
            # make a copy for each task to avoid modification
            for task in six.moves.map(dict, tasks_snapshot):
                # remove ambiguous id field
                task.pop('id', None)
                task_parameters_by_name[task['task_name']] = task
        else:
            logger.warning('No tasks snapshot is defined in given '
                           'transaction, probably it is a legacy '
                           '(Fuel<10.0) or malformed.')
        query = None
        if include_summary:
            query = cls.options(query, undefer('summary'))
        history_records = cls.filter_by(query, task_id=transaction.id)
        if tasks_names:
            history_records = cls.filter_by_list(
                history_records, 'deployment_graph_task_name', tasks_names
            )
        if nodes_ids:
            history_records = cls.filter_by_list(
                history_records, 'node_id', nodes_ids
            )
        if statuses and HISTORY_TASK_STATUSES.skipped not in statuses:
            history_records = cls.filter_by_list(
                history_records, 'status', statuses
            )

        for history_record in history_records:
            task_name = history_record.deployment_graph_task_name
            visited_tasks.add(task_name)

            # the visited tasks should be calculated, it is
            # reason why the query filter cannot be used here
            if statuses and history_record.status not in statuses:
                continue

            fields = list(DeploymentHistorySerializer.fields)
            if include_summary:
                fields.append('summary')
            record = cls.single.to_dict(history_record, fields=fields)
            history.append(record)
            # remove ambiguous field
            record['task_name'] = record.pop('deployment_graph_task_name')

            if task_parameters_by_name:
                try:
                    record.update(task_parameters_by_name[task_name])
                except KeyError:
                    logger.warning(
                        'Definition of "{0}" task is not found'
                        .format(task_name)
                    )

        # calculates absent tasks respecting filter
        if (not nodes_ids and (
                not statuses or HISTORY_TASK_STATUSES.skipped in statuses)):

            for task_name in task_parameters_by_name:
                if tasks_names and task_name not in tasks_names:
                    continue
                if task_name in visited_tasks:
                    continue

                history.append(dict(
                    task_parameters_by_name[task_name],
                    task_name=task_name,
                    node_id='-',
                    status=HISTORY_TASK_STATUSES.skipped,
                    time_start=None,
                    time_end=None,
                ))
        return history