annotate myrss2/myrss_app.py @ 108:cffd95813b82

add myrss2
author paulo
date Sun, 24 May 2020 00:22:05 -0700
parents
children 1a5c0fc5627a
rev   line source
paulo@108 1 import io
paulo@108 2 import os
paulo@108 3 import sys
paulo@108 4 import re
paulo@108 5 import urllib.request
paulo@108 6 import urllib.error
paulo@108 7 import threading
paulo@108 8 import queue
paulo@108 9 import datetime
paulo@108 10 import time
paulo@108 11 import traceback
paulo@108 12
paulo@108 13 import logging
paulo@108 14 LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
paulo@108 15 logging.basicConfig(
paulo@108 16 level=getattr(logging, LOG_LEVEL),
paulo@108 17 format="%(asctime)s %(levelname)-8s %(message)s",
paulo@108 18 )
paulo@108 19
paulo@108 20 import xml.etree.ElementTree
paulo@108 21 import html
paulo@108 22
paulo@108 23 from html3.html3 import HTML
paulo@108 24
paulo@108 25
paulo@108 26 FEEDS_FILE = "FEEDS"
paulo@108 27 CACHE_HTML_FILE = "__cache__.html"
paulo@108 28
paulo@108 29 CACHE_LIFE = 1200 # [seconds]
paulo@108 30 MAX_ITEMS = 50
paulo@108 31 MAX_LINK_Z = 4
paulo@108 32 MAX_THREADS = 20
paulo@108 33 URLOPEN_TIMEOUT = 10 # [seconds]
paulo@108 34
paulo@108 35
paulo@108 36 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
paulo@108 37
paulo@108 38 def _parse_root_tag(root_tag):
paulo@108 39 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
paulo@108 40
paulo@108 41 if re_match is None:
paulo@108 42 return (None, None)
paulo@108 43 else:
paulo@108 44 return re_match.group(2, 3)
paulo@108 45
paulo@108 46
paulo@108 47 def _strip_if_not_none(txt):
paulo@108 48 return txt.strip() if txt is not None else ''
paulo@108 49
paulo@108 50
paulo@108 51 def _go_rss(elementTree):
paulo@108 52 title = _strip_if_not_none(elementTree.find("channel/title").text)
paulo@108 53 link = elementTree.find("channel/link").text
paulo@108 54
paulo@108 55 items = []
paulo@108 56
paulo@108 57 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
paulo@108 58 it_title = _strip_if_not_none(i.find("title").text)
paulo@108 59 it_link = i.find("link").text
paulo@108 60
paulo@108 61 items.append((it_title, it_link))
paulo@108 62
paulo@108 63 return (title, link, items)
paulo@108 64
paulo@108 65
paulo@108 66 def _go_atom(elementTree):
paulo@108 67 ns = "http://www.w3.org/2005/Atom"
paulo@108 68
paulo@108 69 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
paulo@108 70 link = ''
paulo@108 71
paulo@108 72 links = elementTree.findall("{%s}link" % ns)
paulo@108 73 for i in links:
paulo@108 74 if len(links) == 1 or i.get("rel") == "alternate":
paulo@108 75 link = i.get("href")
paulo@108 76 break
paulo@108 77
paulo@108 78 items = []
paulo@108 79
paulo@108 80 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
paulo@108 81 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
paulo@108 82 it_link = ''
paulo@108 83
paulo@108 84 it_links = i.findall("{%s}link" % ns)
paulo@108 85 for j in it_links:
paulo@108 86 if len(it_links) == 1 or j.get("rel") == "alternate":
paulo@108 87 it_link = j.get("href")
paulo@108 88 break
paulo@108 89
paulo@108 90 items.append((it_title, it_link))
paulo@108 91
paulo@108 92 return (title, link, items)
paulo@108 93
paulo@108 94
paulo@108 95 def _go_purl_rss(elementTree):
paulo@108 96 ns = "http://purl.org/rss/1.0/"
paulo@108 97
paulo@108 98 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
paulo@108 99 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
paulo@108 100
paulo@108 101 items = []
paulo@108 102
paulo@108 103 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
paulo@108 104 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
paulo@108 105 it_link = i.find("{%s}link" % ns).text
paulo@108 106
paulo@108 107 items.append((it_title, it_link))
paulo@108 108
paulo@108 109 return (title, link, items)
paulo@108 110
paulo@108 111
paulo@108 112 _STRIP_HTML_RE = re.compile(r"<.*?>")
paulo@108 113
paulo@108 114 def _strip_html(txt):
paulo@108 115 return html.unescape(_STRIP_HTML_RE.sub('', txt))
paulo@108 116
paulo@108 117
paulo@108 118 def _to_html(dtnow, docstruct):
paulo@108 119 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
paulo@108 120 page_title = "myrss -- %s" % datetime_str
paulo@108 121
paulo@108 122 root = HTML("html")
paulo@108 123
paulo@108 124 header = root.head
paulo@108 125 header.meta(name="viewport", content="width=device-width, initial-scale=1")
paulo@108 126 header.title(page_title)
paulo@108 127 header.link(rel="stylesheet", type="text/css", href="static/index.css")
paulo@108 128
paulo@108 129 body = root.body
paulo@108 130 body.h1(page_title)
paulo@108 131
paulo@108 132 link_z = 0
paulo@108 133
paulo@108 134 for feed in docstruct:
paulo@108 135 if feed is None:
paulo@108 136 continue
paulo@108 137
paulo@108 138 (title, link, items) = feed
paulo@108 139
paulo@108 140 logging.debug("title: %s", title)
paulo@108 141 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@108 142 link_z += 1
paulo@108 143 p = body.p
paulo@108 144
paulo@108 145 for (i, (it_title, it_link)) in enumerate(items):
paulo@108 146 if i > 0:
paulo@108 147 p += " - "
paulo@108 148
paulo@108 149 if not it_title:
paulo@108 150 it_title = "(missing title)"
paulo@108 151 if it_link is not None:
paulo@108 152 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@108 153 else:
paulo@108 154 p += _strip_html(it_title)
paulo@108 155
paulo@108 156 link_z += 1
paulo@108 157
paulo@108 158 dtdelta = datetime.datetime.now() - dtnow
paulo@108 159 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
paulo@108 160
paulo@108 161 return str(root)
paulo@108 162
paulo@108 163
paulo@108 164 def _fetch_url(url):
paulo@108 165 try:
paulo@108 166 logging.info("processing %s" % url)
paulo@108 167 feed = urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": "Mozilla/5.0 Browser"}), timeout=URLOPEN_TIMEOUT)
paulo@108 168 except urllib.error.HTTPError as e:
paulo@108 169 logging.info("(%s) %s" % (url, e))
paulo@108 170 return None
paulo@108 171
paulo@108 172 return str(feed.read(), encoding="utf-8")
paulo@108 173
paulo@108 174
paulo@108 175 def _filter_feed(feed):
paulo@108 176 ret = feed
paulo@108 177
paulo@108 178 filter_out = ["\x16"]
paulo@108 179 for i in filter_out:
paulo@108 180 ret = ret.replace(i, "")
paulo@108 181
paulo@108 182 return ret
paulo@108 183
paulo@108 184
paulo@108 185 def _process_feed(feed):
paulo@108 186 ret = None
paulo@108 187
paulo@108 188 feed_sio = io.StringIO(feed)
paulo@108 189 elementTree = xml.etree.ElementTree.parse(feed_sio)
paulo@108 190 root = elementTree.getroot()
paulo@108 191
paulo@108 192 parsed_root_tag = _parse_root_tag(root.tag)
paulo@108 193
paulo@108 194 if parsed_root_tag == (None, "rss"):
paulo@108 195 version = float(root.get("version", 0.0))
paulo@108 196 if version >= 2.0:
paulo@108 197 ret = _go_rss(elementTree)
paulo@108 198 else:
paulo@108 199 raise NotImplementedError("Unsupported rss version")
paulo@108 200 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
paulo@108 201 ret = _go_atom(elementTree)
paulo@108 202 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
paulo@108 203 ret = _go_purl_rss(elementTree)
paulo@108 204 else:
paulo@108 205 raise NotImplementedError("Unknown root tag")
paulo@108 206
paulo@108 207 return ret
paulo@108 208
paulo@108 209
paulo@108 210 class WorkerThread(threading.Thread):
paulo@108 211 def __init__(self, *args, **kwargs):
paulo@108 212 self._input_queue = kwargs.pop("input_queue")
paulo@108 213 self._output_queue = kwargs.pop("output_queue")
paulo@108 214 threading.Thread.__init__(self, *args, **kwargs)
paulo@108 215 self.daemon = True
paulo@108 216
paulo@108 217 def run(self):
paulo@108 218 while True:
paulo@108 219 (idx, url) = self._input_queue.get()
paulo@108 220 docfeed = None
paulo@108 221 try:
paulo@108 222 feed = _fetch_url(url)
paulo@108 223 if feed is not None:
paulo@108 224 docfeed = _process_feed(_filter_feed(feed))
paulo@108 225 except Exception as e:
paulo@108 226 logging.info("(%s) exception: (%s) %s" % (url, type(e), e))
paulo@108 227 self._output_queue.put((idx, docfeed))
paulo@108 228
paulo@108 229
paulo@108 230 def main(input_queue, output_queue, lock):
paulo@108 231 ret = ''
paulo@108 232
paulo@108 233 with lock:
paulo@108 234 logging.debug("main() started")
paulo@108 235 epoch_now = time.time()
paulo@108 236 dtnow = datetime.datetime.fromtimestamp(epoch_now)
paulo@108 237
paulo@108 238 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
paulo@108 239 with open(CACHE_HTML_FILE) as cache_html_file:
paulo@108 240 ret = cache_html_file.read()
paulo@108 241
paulo@108 242 else:
paulo@108 243 with open(FEEDS_FILE) as feeds_file:
paulo@108 244 feedlines = feeds_file.readlines()
paulo@108 245
paulo@108 246 docstruct = [None]*len(feedlines)
paulo@108 247 num_input = 0
paulo@108 248 for (i, l) in enumerate(feedlines):
paulo@108 249 if l[0] != '#':
paulo@108 250 l = l.strip()
paulo@108 251 input_queue.put((i, l))
paulo@108 252 num_input += 1
paulo@108 253
paulo@108 254 for _ in range(num_input):
paulo@108 255 (idx, docfeed) = output_queue.get()
paulo@108 256 docstruct[idx] = docfeed
paulo@108 257
paulo@108 258 ret = _to_html(dtnow, docstruct)
paulo@108 259
paulo@108 260 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
paulo@108 261 cache_html_file.write(ret)
paulo@108 262 logging.debug("main() ended")
paulo@108 263
paulo@108 264 return ret
paulo@108 265
paulo@108 266
paulo@108 267 class MyRssApp:
paulo@108 268 def __init__(self):
paulo@108 269 logging.debug("MyRssApp.__init__() called")
paulo@108 270 self._iq = queue.Queue(MAX_THREADS)
paulo@108 271 self._oq = queue.Queue(MAX_THREADS)
paulo@108 272 self._main_lock = threading.Lock()
paulo@108 273
paulo@108 274 for i in range(MAX_THREADS):
paulo@108 275 logging.debug("Starting thread: %d" % i)
paulo@108 276 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
paulo@108 277
paulo@108 278 # Raw WSGI
paulo@108 279 def __call__(self, environ, start_response):
paulo@108 280 response_code = "500 Internal Server Error"
paulo@108 281 response_type = "text/plain; charset=UTF-8"
paulo@108 282
paulo@108 283 try:
paulo@108 284 response_body = main(self._iq, self._oq, self._main_lock)
paulo@108 285 response_code = "200 OK"
paulo@108 286 response_type = "text/html; charset=UTF-8"
paulo@108 287 except:
paulo@108 288 response_body = traceback.format_exc()
paulo@108 289
paulo@108 290 response_headers = [
paulo@108 291 ("Content-Type", response_type),
paulo@108 292 ("Content-Length", str(len(response_body))),
paulo@108 293 ]
paulo@108 294 start_response(response_code, response_headers)
paulo@108 295
paulo@108 296 return [bytes(response_body, encoding="utf-8")]
paulo@108 297
paulo@108 298 def call(self):
paulo@108 299 return main(self._iq, self._oq, self._main_lock)