annotate myrss/myrss_app.py @ 46:aca02ce71274

myrss_app.py: add urlopen timeout, and render time
author paulo
date Tue, 12 Feb 2013 00:43:11 -0700
parents c673e9e9c4ca
children 315afeb47e52
rev   line source
paulo@39 1 import os
paulo@40 2 import sys
paulo@39 3 import re
paulo@40 4 import urllib2
paulo@40 5 import threading
paulo@40 6 import Queue
paulo@41 7 import datetime
paulo@41 8 import time
paulo@42 9 import logging
paulo@42 10 logging.basicConfig(level=logging.INFO)
paulo@39 11
paulo@39 12 import html
paulo@39 13 import xml.etree.ElementTree
paulo@39 14
paulo@39 15
paulo@41 16 FEEDS_FILE = "FEEDS"
paulo@41 17 CACHE_HTML_FILE = "__cache__.html"
paulo@41 18
paulo@44 19 CACHE_LIFE = 1200 # [seconds]
paulo@39 20 MAX_ITEMS = 30
paulo@39 21 MAX_LINK_Z = 4
paulo@40 22 MAX_THREADS = 20
paulo@46 23 URLOPEN_TIMEOUT = 60 # [seconds]
paulo@39 24
paulo@39 25
paulo@39 26 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
paulo@39 27
paulo@39 28 def _parse_root_tag(root_tag):
paulo@39 29 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
paulo@39 30
paulo@39 31 if re_match is None:
paulo@39 32 return (None, None)
paulo@39 33 else:
paulo@39 34 return re_match.group(2, 3)
paulo@39 35
paulo@39 36
paulo@39 37 def _go_rss(elementTree):
paulo@39 38 title = elementTree.find("channel/title").text.strip()
paulo@39 39 link = elementTree.find("channel/link").text
paulo@39 40
paulo@39 41 items = []
paulo@39 42
paulo@39 43 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
paulo@39 44 it_title = i.find("title").text.strip()
paulo@39 45 it_link = i.find("link").text
paulo@39 46
paulo@39 47 items.append((it_title, it_link))
paulo@39 48
paulo@39 49 return (title, link, items)
paulo@39 50
paulo@39 51
paulo@39 52 def _go_atom(elementTree):
paulo@39 53 ns = "http://www.w3.org/2005/Atom"
paulo@39 54
paulo@39 55 title = elementTree.find("{%s}title" % ns).text.strip()
paulo@39 56 link = ''
paulo@39 57
paulo@39 58 for i in elementTree.findall("{%s}link" % ns):
paulo@39 59 if i.get("type") == "text/html" and i.get("rel") == "alternate":
paulo@39 60 link = i.get("href")
paulo@39 61 break
paulo@39 62
paulo@39 63 items = []
paulo@39 64
paulo@39 65 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
paulo@39 66 it_title = i.find("{%s}title" % ns).text.strip()
paulo@39 67 it_link = ''
paulo@39 68
paulo@39 69 for j in i.findall("{%s}link" % ns):
paulo@39 70 if j.get("type") == "text/html" and j.get("rel") == "alternate":
paulo@39 71 it_link = j.get("href")
paulo@39 72 break
paulo@39 73
paulo@39 74 items.append((it_title, it_link))
paulo@39 75
paulo@39 76 return (title, link, items)
paulo@39 77
paulo@39 78
paulo@41 79 def _to_html(dtnow, docstruct):
paulo@41 80 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
paulo@41 81 page_title = "myrss -- %s" % datetime_str
paulo@41 82
paulo@42 83 root = html.HTML("html")
paulo@39 84
paulo@39 85 header = root.header
paulo@41 86 header.title(page_title)
paulo@39 87 header.link(rel="stylesheet", type="text/css", href="index.css")
paulo@39 88
paulo@41 89 body = root.body
paulo@41 90 body.h1(page_title)
paulo@41 91
paulo@39 92 link_z = 0
paulo@39 93
paulo@39 94 for feed in docstruct:
paulo@40 95 if feed is None:
paulo@40 96 continue
paulo@40 97
paulo@39 98 (title, link, items) = feed
paulo@39 99
paulo@41 100 body.h2.a(title, href=link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@39 101 link_z += 1
paulo@41 102 p = body.p
paulo@39 103
paulo@39 104 for (i, (it_title, it_link)) in enumerate(items):
paulo@39 105 if i > 0:
paulo@39 106 p += " - "
paulo@39 107
paulo@39 108 p.a(it_title, href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@39 109 link_z += 1
paulo@39 110
paulo@46 111 dtdelta = datetime.datetime.now() - dtnow
paulo@46 112 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
paulo@46 113
paulo@39 114 return unicode(root).encode("utf-8")
paulo@39 115
paulo@39 116
paulo@40 117 def _process_url(url):
paulo@40 118 ret = None
paulo@40 119
paulo@40 120 try:
paulo@42 121 logging.info("processing %s" % url)
paulo@46 122 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": ''}), timeout=URLOPEN_TIMEOUT)
paulo@40 123 except urllib2.HTTPError as e:
paulo@42 124 logging.info("(%s) %s" % (url, e))
paulo@40 125 return ret
paulo@40 126
paulo@40 127 elementTree = xml.etree.ElementTree.parse(feed)
paulo@40 128 root = elementTree.getroot()
paulo@40 129
paulo@40 130 parsed_root_tag = _parse_root_tag(root.tag)
paulo@40 131
paulo@40 132 if parsed_root_tag == (None, "rss"):
paulo@40 133 version = float(root.get("version", 0.0))
paulo@40 134 if version >= 2.0:
paulo@40 135 ret = _go_rss(elementTree)
paulo@40 136 else:
paulo@40 137 raise NotImplementedError("Unsupported rss version")
paulo@40 138 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
paulo@40 139 ret = _go_atom(elementTree)
paulo@40 140 else:
paulo@40 141 raise NotImplementedError("Unknown root tag")
paulo@40 142
paulo@40 143 return ret
paulo@40 144
paulo@40 145
paulo@40 146 class WorkerThread(threading.Thread):
paulo@40 147 def __init__(self, *args, **kwargs):
paulo@40 148 self._input_queue = kwargs.pop("input_queue")
paulo@40 149 self._output_queue = kwargs.pop("output_queue")
paulo@40 150 threading.Thread.__init__(self, *args, **kwargs)
paulo@40 151 self.daemon = True
paulo@40 152
paulo@40 153 def run(self):
paulo@40 154 while True:
paulo@40 155 (idx, url) = self._input_queue.get()
paulo@40 156 docfeed = None
paulo@40 157 try:
paulo@40 158 docfeed = _process_url(url)
paulo@40 159 except Exception as e:
paulo@42 160 logging.info("(%s) exception: %s" % (url, e))
paulo@40 161 self._output_queue.put((idx, docfeed))
paulo@40 162
paulo@40 163
paulo@44 164 def main(input_queue, output_queue, lock):
paulo@41 165 ret = ''
paulo@41 166
paulo@44 167 with lock:
paulo@44 168 epoch_now = time.time()
paulo@44 169 dtnow = datetime.datetime.fromtimestamp(epoch_now)
paulo@41 170
paulo@44 171 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
paulo@44 172 with open(CACHE_HTML_FILE) as cache_html_file:
paulo@44 173 ret = cache_html_file.read()
paulo@41 174
paulo@44 175 else:
paulo@44 176 with open(FEEDS_FILE) as feeds_file:
paulo@44 177 feedlines = feeds_file.readlines()
paulo@41 178
paulo@44 179 docstruct = [None]*len(feedlines)
paulo@44 180 num_input = 0
paulo@44 181 for (i, l) in enumerate(feedlines):
paulo@44 182 if l[0] != '#':
paulo@44 183 l = l.strip()
paulo@44 184 input_queue.put((i, l))
paulo@44 185 num_input += 1
paulo@41 186
paulo@44 187 for _ in range(num_input):
paulo@44 188 (idx, docfeed) = output_queue.get()
paulo@44 189 docstruct[idx] = docfeed
paulo@41 190
paulo@44 191 ret = _to_html(dtnow, docstruct)
paulo@41 192
paulo@44 193 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
paulo@44 194 cache_html_file.write(ret)
paulo@41 195
paulo@41 196 return ret
paulo@41 197
paulo@41 198
paulo@42 199 class MyRssApp:
paulo@42 200 def __init__(self):
paulo@42 201 self._iq = Queue.Queue(MAX_THREADS)
paulo@42 202 self._oq = Queue.Queue(MAX_THREADS)
paulo@44 203 self._main_lock = threading.Lock()
paulo@39 204
paulo@42 205 for _ in range(MAX_THREADS):
paulo@42 206 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
paulo@42 207
paulo@42 208 def __call__(self, environ, start_response):
paulo@44 209 response_body = main(self._iq, self._oq, self._main_lock)
paulo@42 210 response_headers = [
paulo@42 211 ("Content-Type", "text/html"),
paulo@42 212 ("Content-Length", str(len(response_body))),
paulo@42 213 ]
paulo@42 214 start_response("200 OK", response_headers)
paulo@42 215
paulo@42 216 return [response_body]