view myrss/myrss_parser.py @ 41:5f9bc02e9caf

add datetimestamp and caching
author paulo
date Fri, 01 Feb 2013 01:26:07 -0800
parents 62464a0034d1
children a1456ecd25b9
line source
1 import os
2 import sys
3 import re
4 import urllib2
5 import threading
6 import Queue
7 import datetime
8 import time
10 import html
11 import xml.etree.ElementTree
14 FEEDS_FILE = "FEEDS"
15 CACHE_HTML_FILE = "__cache__.html"
17 #CACHE_LIFE = 1200 # [seconds]
18 CACHE_LIFE = 30 # [seconds]
19 MAX_ITEMS = 30
20 MAX_LINK_Z = 4
21 MAX_THREADS = 20
24 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
26 def _parse_root_tag(root_tag):
27 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
29 if re_match is None:
30 return (None, None)
31 else:
32 return re_match.group(2, 3)
35 def _go_rss(elementTree):
36 title = elementTree.find("channel/title").text.strip()
37 link = elementTree.find("channel/link").text
39 items = []
41 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
42 it_title = i.find("title").text.strip()
43 it_link = i.find("link").text
45 items.append((it_title, it_link))
47 return (title, link, items)
50 def _go_atom(elementTree):
51 ns = "http://www.w3.org/2005/Atom"
53 title = elementTree.find("{%s}title" % ns).text.strip()
54 link = ''
56 for i in elementTree.findall("{%s}link" % ns):
57 if i.get("type") == "text/html" and i.get("rel") == "alternate":
58 link = i.get("href")
59 break
61 items = []
63 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
64 it_title = i.find("{%s}title" % ns).text.strip()
65 it_link = ''
67 for j in i.findall("{%s}link" % ns):
68 if j.get("type") == "text/html" and j.get("rel") == "alternate":
69 it_link = j.get("href")
70 break
72 items.append((it_title, it_link))
74 return (title, link, items)
77 def _to_html(dtnow, docstruct):
78 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
79 page_title = "myrss -- %s" % datetime_str
81 root = html.HTML()
83 header = root.header
84 header.title(page_title)
85 header.link(rel="stylesheet", type="text/css", href="index.css")
87 body = root.body
88 body.h1(page_title)
90 link_z = 0
92 for feed in docstruct:
93 if feed is None:
94 continue
96 (title, link, items) = feed
98 body.h2.a(title, href=link, klass="z%d" % (link_z % MAX_LINK_Z))
99 link_z += 1
100 p = body.p
102 for (i, (it_title, it_link)) in enumerate(items):
103 if i > 0:
104 p += " - "
106 p.a(it_title, href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
107 link_z += 1
109 return unicode(root).encode("utf-8")
112 def _process_url(url):
113 ret = None
115 try:
116 print >> sys.stderr, "--> processing %s" % url
117 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": ''}))
118 except urllib2.HTTPError as e:
119 print >> sys.stderr, "--> (%s) %s" % (url, e)
120 return ret
122 elementTree = xml.etree.ElementTree.parse(feed)
123 root = elementTree.getroot()
125 parsed_root_tag = _parse_root_tag(root.tag)
127 if parsed_root_tag == (None, "rss"):
128 version = float(root.get("version", 0.0))
129 if version >= 2.0:
130 ret = _go_rss(elementTree)
131 else:
132 raise NotImplementedError("Unsupported rss version")
133 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
134 ret = _go_atom(elementTree)
135 else:
136 raise NotImplementedError("Unknown root tag")
138 return ret
141 class WorkerThread(threading.Thread):
142 def __init__(self, *args, **kwargs):
143 self._input_queue = kwargs.pop("input_queue")
144 self._output_queue = kwargs.pop("output_queue")
145 threading.Thread.__init__(self, *args, **kwargs)
146 self.daemon = True
148 def run(self):
149 while True:
150 (idx, url) = self._input_queue.get()
151 docfeed = None
152 try:
153 docfeed = _process_url(url)
154 except Exception as e:
155 print >> sys.stderr, "--> (%s) exception: %s" % (url, e)
156 self._output_queue.put((idx, docfeed))
157 self._input_queue.task_done()
160 def main():
161 ret = ''
163 epoch_now = time.time()
164 dtnow = datetime.datetime.fromtimestamp(epoch_now)
166 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
167 with open(CACHE_HTML_FILE) as cache_html_file:
168 ret = cache_html_file.read()
170 else:
171 with open(FEEDS_FILE) as feeds_file:
172 feedlines = feeds_file.readlines()
174 docstruct = [None]*len(feedlines)
175 iq = Queue.Queue(feedlines)
176 oq = Queue.Queue(feedlines)
178 for _ in range(MAX_THREADS):
179 WorkerThread(input_queue=iq, output_queue=oq).start()
181 for (i, l) in enumerate(feedlines):
182 if l[0] != '#':
183 l = l.strip()
184 iq.put((i, l))
186 iq.join()
188 while True:
189 try:
190 (idx, docfeed) = oq.get_nowait()
191 docstruct[idx] = docfeed
192 except Queue.Empty:
193 break
195 ret = _to_html(dtnow, docstruct)
197 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
198 cache_html_file.write(ret)
200 return ret
203 if __name__ == "__main__":
204 print main()