# -*- coding: iso-8859-1 -*-

"""
    MoinMoin - ForEach macro
    Version 0.9

    (C) 2008 Francesco Chemolli <kinkie@squid-cache.org>
    Distributed under the terms of the GNU General Public License
    version three.

    [[ForEach(FullTextExpression,TextExpression)]]

    searches for FullTextExpression and builds a list of TextExpression's,
    one per page found. %% gets replaced with the page name.
    Special characters can be escaped by \ (\ is expressed as \\)

    e.g.
    [[ForEach(CategoryFoo,== %% ==
\[\[TocOf(\%%\)\]\]
)]]

    If pages contains a ##priority <number> directive, they will be sorted
    according to it (higher number have higher preference),
    pages with no priority specification come in last

    Bazaar development branch at 
    http://eu.squid-cache.org/~kinkie/moinmoin

    to get a checkout:
    bzr co http://eu.squid-cache.org/~kinkie/moinmoin

"""

import re
import StringIO
from MoinMoin import config, wikiutil, search
from MoinMoin.Page import Page

Dependencies = ["pages"]

def execute(macro,args):
    request = macro.request
    formatter = macro.formatter

    #only in moin 1.6.. :\
    #wikiutil.parse_quoted_separated(args,',',False);
    args = parse_arguments(args)
    if (len(args)==0):
        return "No arguments given to [[ForEach()]]"

    needle = args[0]
    repl = args[1]

    query = search.QueryParser().parse_query(needle)
    results = search.searchPages(request,query).hits

    results = [p.page_name for p in results]
    macro.formatter.text('got pages: %s' % ', '.join(results))
    results = sort_by_prios(request,results)

    #results now contains the sorted list of pages matched.
    #we need to change them to the matched text.

    #for debugging purposes:
    #tmp = [ (r+'('+get_page_prio(request,r)+')') for r in results]

    results = [repl.replace('%%',r) for r in results]
    p = re.compile( r'\\(.)' )
    results = [p.sub(r'\1',r) for r in results]

    #return macro.formatter.text('[[ForEach()]] is not yet complete. Pages found:'+", ".join(tmp))
    if 0:
        #FIXME: following block is for version 1.6 of MoinMoin
	# buf is needed since the parser will try to output directly
        buf = StringIO.StringIO() 
        try:
            request.redirect(buf)
            ret = request.formatter.parser('wiki',''.join(results))
        finally:
            request.redirect()
        buf.flush()
        writ = buf.getvalue()
        buf.close()
        return macro.formatter.rawHTML('%s%s' % (ret,writ))

    #for version 1.5 of MoinMoin
    Parser = wikiutil.importPlugin(request.cfg,'parser','wiki','Parser')
    parser = Parser(''.join(results),request)
    return parser.format(request.formatter) or ''



def parse_arguments(args):
    if args:
        args = args.split(',',1)
        args = [arg.strip() for arg in args]
    else:
        args = []
    return args

def sort_by_prios(request,pages):
    """ Identifies the priority for a page

    This function returns a priority specifier for a page, greater prios first.
    It does so by checking the page contents.
    Priority for pages not specifying any is set to -1 (default)
    """
    tmp = [(get_page_prio(request,p),p) for p in pages]
    tmp.sort()
    tmp.reverse()
    tmp = [t[1] for t in tmp]
    return tmp

def get_page_prio(request,path):
    """ Gets the priority for the named page """

    p = re.compile(r'##priority (?P<prio>\d+)')
    page = Page(request,path)
    body = page.get_raw_body()
    m = p.search(body)
    if m:
        return m.group('prio')
    else:
        return -1
