view myrss/myrss_app.py @ 97:9bd400576469

myrss: fix meta viewport scale for mobile optimization, and add extra debug logging line
author paulo
date Mon, 09 Jul 2018 00:50:35 -0600
parents 259a484f691b
children e2817e789895
line source
1 import os
2 import sys
3 import re
4 import urllib2
5 import threading
6 import Queue
7 import datetime
8 import time
9 import traceback
11 import logging
12 #logging.basicConfig(
13 # level=logging.DEBUG,
14 # filename="_LOG",
15 # format="%(asctime)s %(levelname)-8s %(message)s",
16 #)
18 import xml.etree.ElementTree
19 import HTMLParser
21 import html
24 FEEDS_FILE = "FEEDS"
25 CACHE_HTML_FILE = "__cache__.html"
27 CACHE_LIFE = 1200 # [seconds]
28 MAX_ITEMS = 50
29 MAX_LINK_Z = 4
30 MAX_THREADS = 20
31 URLOPEN_TIMEOUT = 60 # [seconds]
34 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
36 def _parse_root_tag(root_tag):
37 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
39 if re_match is None:
40 return (None, None)
41 else:
42 return re_match.group(2, 3)
45 def _strip_if_not_none(txt):
46 return txt.strip() if txt is not None else ''
49 def _go_rss(elementTree):
50 title = _strip_if_not_none(elementTree.find("channel/title").text)
51 link = elementTree.find("channel/link").text
53 items = []
55 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
56 it_title = _strip_if_not_none(i.find("title").text)
57 it_link = i.find("link").text
59 items.append((it_title, it_link))
61 return (title, link, items)
64 def _go_atom(elementTree):
65 ns = "http://www.w3.org/2005/Atom"
67 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
68 link = ''
70 links = elementTree.findall("{%s}link" % ns)
71 for i in links:
72 if len(links) == 1 or i.get("rel") == "alternate":
73 link = i.get("href")
74 break
76 items = []
78 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
79 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
80 it_link = ''
82 it_links = i.findall("{%s}link" % ns)
83 for j in it_links:
84 if len(it_links) == 1 or j.get("rel") == "alternate":
85 it_link = j.get("href")
86 break
88 items.append((it_title, it_link))
90 return (title, link, items)
93 def _go_purl_rss(elementTree):
94 ns = "http://purl.org/rss/1.0/"
96 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
97 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
99 items = []
101 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
102 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
103 it_link = i.find("{%s}link" % ns).text
105 items.append((it_title, it_link))
107 return (title, link, items)
110 _STRIP_HTML_RE = re.compile(r"<.*?>")
111 _htmlParser = HTMLParser.HTMLParser()
113 def _strip_html(txt):
114 return _htmlParser.unescape(_STRIP_HTML_RE.sub('', txt))
117 def _to_html(dtnow, docstruct):
118 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
119 page_title = "myrss -- %s" % datetime_str
121 root = html.HTML("html")
123 header = root.head
124 header.meta(name="viewport", content="width=device-width, initial-scale=1")
125 header.title(page_title)
126 header.link(rel="stylesheet", type="text/css", href="index.css")
128 body = root.body
129 body.h1(page_title)
131 link_z = 0
133 for feed in docstruct:
134 if feed is None:
135 continue
137 (title, link, items) = feed
139 logging.debug("title: %s", title)
140 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
141 link_z += 1
142 p = body.p
144 for (i, (it_title, it_link)) in enumerate(items):
145 if i > 0:
146 p += " - "
148 if not it_title:
149 it_title = "(missing title)"
150 if it_link is not None:
151 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
152 else:
153 p += _strip_html(it_title)
155 link_z += 1
157 dtdelta = datetime.datetime.now() - dtnow
158 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
160 return unicode(root).encode("utf-8")
163 def _fetch_url(url):
164 try:
165 logging.info("processing %s" % url)
166 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": "Mozilla/5.0 Browser"}), timeout=URLOPEN_TIMEOUT)
167 except urllib2.HTTPError as e:
168 logging.info("(%s) %s" % (url, e))
169 return None
171 return feed
174 def _process_feed(feed):
175 ret = None
177 elementTree = xml.etree.ElementTree.parse(feed)
178 root = elementTree.getroot()
180 parsed_root_tag = _parse_root_tag(root.tag)
182 if parsed_root_tag == (None, "rss"):
183 version = float(root.get("version", 0.0))
184 if version >= 2.0:
185 ret = _go_rss(elementTree)
186 else:
187 raise NotImplementedError("Unsupported rss version")
188 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
189 ret = _go_atom(elementTree)
190 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
191 ret = _go_purl_rss(elementTree)
192 else:
193 raise NotImplementedError("Unknown root tag")
195 return ret
198 class WorkerThread(threading.Thread):
199 def __init__(self, *args, **kwargs):
200 self._input_queue = kwargs.pop("input_queue")
201 self._output_queue = kwargs.pop("output_queue")
202 threading.Thread.__init__(self, *args, **kwargs)
203 self.daemon = True
205 def run(self):
206 while True:
207 (idx, url) = self._input_queue.get()
208 docfeed = None
209 try:
210 feed = _fetch_url(url)
211 if feed is not None:
212 docfeed = _process_feed(feed)
213 except Exception as e:
214 logging.info("(%s) exception: (%s) %s" % (url, type(e), e))
215 self._output_queue.put((idx, docfeed))
218 def main(input_queue, output_queue, lock):
219 ret = ''
221 with lock:
222 logging.debug("main() started")
223 epoch_now = time.time()
224 dtnow = datetime.datetime.fromtimestamp(epoch_now)
226 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
227 with open(CACHE_HTML_FILE) as cache_html_file:
228 ret = cache_html_file.read()
230 else:
231 with open(FEEDS_FILE) as feeds_file:
232 feedlines = feeds_file.readlines()
234 docstruct = [None]*len(feedlines)
235 num_input = 0
236 for (i, l) in enumerate(feedlines):
237 if l[0] != '#':
238 l = l.strip()
239 input_queue.put((i, l))
240 num_input += 1
242 for _ in range(num_input):
243 (idx, docfeed) = output_queue.get()
244 docstruct[idx] = docfeed
246 ret = _to_html(dtnow, docstruct)
248 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
249 cache_html_file.write(ret)
251 return ret
254 class MyRssApp:
255 def __init__(self):
256 logging.debug("MyRssApp.__init__() called")
257 self._iq = Queue.Queue(MAX_THREADS)
258 self._oq = Queue.Queue(MAX_THREADS)
259 self._main_lock = threading.Lock()
261 for i in range(MAX_THREADS):
262 logging.debug("Starting thread: %d" % i)
263 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
265 def __call__(self, environ, start_response):
266 response_code = "500 Internal Server Error"
267 response_type = "text/plain; charset=UTF-8"
269 try:
270 response_body = main(self._iq, self._oq, self._main_lock)
271 response_code = "200 OK"
272 response_type = "text/html; charset=UTF-8"
273 except:
274 response_body = traceback.format_exc()
276 response_headers = [
277 ("Content-Type", response_type),
278 ("Content-Length", str(len(response_body))),
279 ]
280 start_response(response_code, response_headers)
282 return [response_body]