Wikipedia: internationalize and fix test case

This commit is contained in:
Valentin Lorentz 2011-03-02 11:00:10 +01:00
parent fdbd3dba67
commit 5854a2af5c
4 changed files with 52 additions and 22 deletions

View File

@ -45,7 +45,12 @@ __author__ = supybot.Author('quantumlemur', 'quantumlemur',
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
if not hasattr(supybot.authors, 'progval'):
supybot.authors.progval = supybot.Author('Valentin Lorentz', 'ProgVal',
'progval@gmail.com')
__contributors__ = {supybot.authors.progval: ['enhance configurability',
'many bug fixes',
'internationalization']}
# This is a url where the most recent plugin package can be downloaded.
__url__ = '' # 'http://supybot.com/Members/yourname/Wikipedia/download'

View File

@ -31,6 +31,15 @@
import supybot.conf as conf
import supybot.registry as registry
try:
from supybot.i18n import PluginInternationalization
from supybot.i18n import internationalizeDocstring
_ = PluginInternationalization('Wikipedia')
except:
# This are useless functions that's allow to run the plugin on a bot
# without the i18n plugin
_ = lambda x:x
internationalizeDocstring = lambda x:x
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
@ -46,5 +55,9 @@ Wikipedia = conf.registerPlugin('Wikipedia')
# conf.registerGlobalValue(Wikipedia, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
conf.registerChannelValue(Wikipedia, 'url',
registry.String(_('en.wikipedia.org'), _("""URL of the website from
where you want to pull pages (usually: your language's
wikipedia)""")))
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:

View File

@ -41,8 +41,15 @@ from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
# plugins.wikipedia.snippetStyle in ['sentence','paragraph','none']
try:
from supybot.i18n import PluginInternationalization
from supybot.i18n import internationalizeDocstring
_ = PluginInternationalization('Wikipedia')
except:
# This are useless functions that's allow to run the plugin on a bot
# without the i18n plugin
_ = lambda x:x
internationalizeDocstring = lambda x:x
class Wikipedia(callbacks.Plugin):
@ -51,15 +58,17 @@ class Wikipedia(callbacks.Plugin):
threaded = True
@internationalizeDocstring
def wiki(self, irc, msg, args, search):
"""<search term>
Returns the first paragraph of a Wikipedia article"""
reply = ''
# first, we get the page
addr = 'http://en.wikipedia.org/wiki/Special:Search?search=%s' % \
urllib.quote_plus(search)
article = utils.web.getUrl(addr)
addr = 'http://%s/wiki/Special:Search?search=%s' % \
(self.registryValue('url', msg.args[0]),
urllib.quote_plus(search))
article = utils.web.getUrl(addr)
# parse the page
tree = lxml.html.document_fromstring(article)
# check if it gives a "Did you mean..." redirect
@ -67,9 +76,10 @@ class Wikipedia(callbacks.Plugin):
'[@title="Special:Search"]')
if didyoumean:
redirect = didyoumean[0].text_content().strip()
reply += ('I didn\'t find anything for "%s". Did you mean "%s"? ' %
(search, redirect))
addr = 'http://en.wikipedia.org%s' % didyoumean[0].get('href')
reply += _('I didn\'t find anything for "%s".'
'Did you mean "%s"? ') % (search, redirect)
addr = self.registryValue('url', msg.args[0]) + \
didyoumean[0].get('href')
article = utils.web.getUrl(addr)
tree = lxml.html.document_fromstring(article)
search = redirect
@ -78,16 +88,17 @@ class Wikipedia(callbacks.Plugin):
searchresults = tree.xpath('//div[@class="searchresults"]/ul/li/a')
if searchresults:
redirect = searchresults[0].text_content().strip()
reply += 'I didn\'t find anything for "%s", but here\'s the ' + \
'result for "%s": ' % (search, redirect)
addr = 'http://en.wikipedia.org%s' % searchresults[0].get('href')
reply += _('I didn\'t find anything for "%s", but here\'s the '
'result for "%s": ') % (search, redirect)
addr = self.registryValue('url', msg.args[0]) + \
searchresults[0].get('href')
article = utils.web.getUrl(addr)
tree = lxml.html.document_fromstring(article)
search = redirect
# otherwise, simply return the title and whether it redirected
else:
redirect = re.search('\(Redirected from <a href=[^>]*>([^<]*)'
'</a>\)', article)
redirect = re.search('\(%s <a href=[^>]*>([^<]*)</a>\)' %
_('Redirected from'), article)
if redirect:
redirect = tree.xpath('//div[@id="contentSub"]/a')[0]
redirect = redirect.text_content().strip()
@ -95,7 +106,7 @@ class Wikipedia(callbacks.Plugin):
title = title[0].text_content().strip()
reply += '"%s" (Redirect from "%s"): ' % (title, redirect)
# extract the address we got it from
addr = re.search('Retrieved from "<a href="([^"]*)">', article)
addr = re.search(_('Retrieved from') + ' "<a href="([^"]*)">', article)
addr = addr.group(1)
# check if it's a disambiguation page
disambig = tree.xpath('//table[@id="disambigbox"]')
@ -104,12 +115,12 @@ class Wikipedia(callbacks.Plugin):
disambig = disambig[:5]
disambig = [item.text_content() for item in disambig]
r = utils.str.commaAndify(disambig)
reply += '%s is a disambiguation page. Possible results are: %s' %\
(addr, r)
reply += _('%s is a disambiguation page. '
'Possible results are: %s') % (addr, r)
# or just as bad, a page listing events in that year
elif re.search('This article is about the year [\d]*\. '
'For the [a-zA-Z ]* [\d]*, see', article):
reply += ('"%s" is a page full of events that happened in that '
elif re.search(_('This article is about the year [\d]*\. '
'For the [a-zA-Z ]* [\d]*, see'), article):
reply += _('"%s" is a page full of events that happened in that '
'year. If you were looking for information about the '
'number itself, try searching for "%s_(number)", but '
'don\'t expect anything useful...') % (search, search)
@ -117,7 +128,7 @@ class Wikipedia(callbacks.Plugin):
##### etree!
p = tree.xpath("//div[@id='bodyContent']/p[1]")
if len(p) == 0:
reply += 'Not found, or page bad formed. '
reply += _('Not found, or page bad formed.')
else:
p = p[0]
p = p.text_content()

View File

@ -40,7 +40,8 @@ class WikipediaTestCase(PluginTestCase):
self.assertRegexp('wiki Python', '.*is a disambiguation page.*')
self.assertRegexp('wiki Foo', '"Foobar" \(Redirect from "Foo"\): '
'The terms foobar.*')
self.assertRegexp('wiki roegdfjpoepo', '.*is a disambiguation page.*')
self.assertRegexp('wiki roegdfjpoepo',
'Not found, or page bad formed.*')