annotate myrss2/myrss_app.py @ 144:90f3021e3137

myrss2: FEEDS: Remove longform.org; add propublic.org
author paulo
date Tue, 28 May 2024 06:23:58 +0000
parents 1a5c0fc5627a
children
rev   line source
paulo@110 1 import datetime
paulo@110 2 import gzip
paulo@108 3 import io
paulo@108 4 import os
paulo@110 5 import queue
paulo@110 6 import re
paulo@108 7 import sys
paulo@108 8 import threading
paulo@108 9 import time
paulo@108 10 import traceback
paulo@110 11 import urllib.error
paulo@110 12 import urllib.request
paulo@108 13
paulo@108 14 import logging
paulo@108 15 LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
paulo@108 16 logging.basicConfig(
paulo@108 17 level=getattr(logging, LOG_LEVEL),
paulo@108 18 format="%(asctime)s %(levelname)-8s %(message)s",
paulo@108 19 )
paulo@108 20
paulo@108 21 import xml.etree.ElementTree
paulo@108 22 import html
paulo@108 23
paulo@108 24 from html3.html3 import HTML
paulo@108 25
paulo@108 26
paulo@108 27 FEEDS_FILE = "FEEDS"
paulo@108 28 CACHE_HTML_FILE = "__cache__.html"
paulo@108 29
paulo@108 30 CACHE_LIFE = 1200 # [seconds]
paulo@108 31 MAX_ITEMS = 50
paulo@108 32 MAX_LINK_Z = 4
paulo@108 33 MAX_THREADS = 20
paulo@108 34 URLOPEN_TIMEOUT = 10 # [seconds]
paulo@108 35
paulo@108 36
paulo@108 37 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
paulo@108 38
paulo@108 39 def _parse_root_tag(root_tag):
paulo@108 40 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
paulo@108 41
paulo@108 42 if re_match is None:
paulo@108 43 return (None, None)
paulo@108 44 else:
paulo@108 45 return re_match.group(2, 3)
paulo@108 46
paulo@108 47
paulo@108 48 def _strip_if_not_none(txt):
paulo@108 49 return txt.strip() if txt is not None else ''
paulo@108 50
paulo@108 51
paulo@108 52 def _go_rss(elementTree):
paulo@108 53 title = _strip_if_not_none(elementTree.find("channel/title").text)
paulo@108 54 link = elementTree.find("channel/link").text
paulo@108 55
paulo@108 56 items = []
paulo@108 57
paulo@108 58 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
paulo@108 59 it_title = _strip_if_not_none(i.find("title").text)
paulo@108 60 it_link = i.find("link").text
paulo@108 61
paulo@108 62 items.append((it_title, it_link))
paulo@108 63
paulo@108 64 return (title, link, items)
paulo@108 65
paulo@108 66
paulo@108 67 def _go_atom(elementTree):
paulo@108 68 ns = "http://www.w3.org/2005/Atom"
paulo@108 69
paulo@108 70 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
paulo@108 71 link = ''
paulo@108 72
paulo@108 73 links = elementTree.findall("{%s}link" % ns)
paulo@108 74 for i in links:
paulo@108 75 if len(links) == 1 or i.get("rel") == "alternate":
paulo@108 76 link = i.get("href")
paulo@108 77 break
paulo@108 78
paulo@108 79 items = []
paulo@108 80
paulo@108 81 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
paulo@108 82 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
paulo@108 83 it_link = ''
paulo@108 84
paulo@108 85 it_links = i.findall("{%s}link" % ns)
paulo@108 86 for j in it_links:
paulo@108 87 if len(it_links) == 1 or j.get("rel") == "alternate":
paulo@108 88 it_link = j.get("href")
paulo@108 89 break
paulo@108 90
paulo@108 91 items.append((it_title, it_link))
paulo@108 92
paulo@108 93 return (title, link, items)
paulo@108 94
paulo@108 95
paulo@108 96 def _go_purl_rss(elementTree):
paulo@108 97 ns = "http://purl.org/rss/1.0/"
paulo@108 98
paulo@108 99 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
paulo@108 100 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
paulo@108 101
paulo@108 102 items = []
paulo@108 103
paulo@108 104 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
paulo@108 105 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
paulo@108 106 it_link = i.find("{%s}link" % ns).text
paulo@108 107
paulo@108 108 items.append((it_title, it_link))
paulo@108 109
paulo@108 110 return (title, link, items)
paulo@108 111
paulo@108 112
paulo@108 113 _STRIP_HTML_RE = re.compile(r"<.*?>")
paulo@108 114
paulo@108 115 def _strip_html(txt):
paulo@108 116 return html.unescape(_STRIP_HTML_RE.sub('', txt))
paulo@108 117
paulo@108 118
paulo@108 119 def _to_html(dtnow, docstruct):
paulo@108 120 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
paulo@108 121 page_title = "myrss -- %s" % datetime_str
paulo@108 122
paulo@108 123 root = HTML("html")
paulo@108 124
paulo@108 125 header = root.head
paulo@108 126 header.meta(name="viewport", content="width=device-width, initial-scale=1")
paulo@108 127 header.title(page_title)
paulo@108 128 header.link(rel="stylesheet", type="text/css", href="static/index.css")
paulo@108 129
paulo@108 130 body = root.body
paulo@108 131 body.h1(page_title)
paulo@108 132
paulo@108 133 link_z = 0
paulo@108 134
paulo@108 135 for feed in docstruct:
paulo@108 136 if feed is None:
paulo@108 137 continue
paulo@108 138
paulo@108 139 (title, link, items) = feed
paulo@108 140
paulo@142 141 if title is None:
paulo@142 142 title = "(No title)"
paulo@142 143 if link is None:
paulo@142 144 link = ""
paulo@142 145
paulo@108 146 logging.debug("title: %s", title)
paulo@108 147 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@108 148 link_z += 1
paulo@108 149 p = body.p
paulo@108 150
paulo@108 151 for (i, (it_title, it_link)) in enumerate(items):
paulo@108 152 if i > 0:
paulo@108 153 p += " - "
paulo@108 154
paulo@108 155 if not it_title:
paulo@108 156 it_title = "(missing title)"
paulo@108 157 if it_link is not None:
paulo@108 158 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@108 159 else:
paulo@108 160 p += _strip_html(it_title)
paulo@108 161
paulo@108 162 link_z += 1
paulo@108 163
paulo@108 164 dtdelta = datetime.datetime.now() - dtnow
paulo@108 165 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
paulo@108 166
paulo@108 167 return str(root)
paulo@108 168
paulo@108 169
paulo@108 170 def _fetch_url(url):
paulo@108 171 try:
paulo@108 172 logging.info("processing %s" % url)
paulo@108 173 feed = urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": "Mozilla/5.0 Browser"}), timeout=URLOPEN_TIMEOUT)
paulo@110 174 response_headers = feed.info().as_string().splitlines()
paulo@110 175 if 'Content-Encoding: gzip' in response_headers:
paulo@110 176 body = gzip.decompress(feed.read())
paulo@110 177 else:
paulo@110 178 body = feed.read()
paulo@110 179
paulo@108 180 except urllib.error.HTTPError as e:
paulo@108 181 logging.info("(%s) %s" % (url, e))
paulo@108 182 return None
paulo@108 183
paulo@110 184 return str(body, encoding="utf-8")
paulo@108 185
paulo@108 186
paulo@108 187 def _filter_feed(feed):
paulo@110 188 ret = feed.strip()
paulo@108 189
paulo@108 190 filter_out = ["\x16"]
paulo@108 191 for i in filter_out:
paulo@108 192 ret = ret.replace(i, "")
paulo@108 193
paulo@108 194 return ret
paulo@108 195
paulo@108 196
paulo@108 197 def _process_feed(feed):
paulo@108 198 ret = None
paulo@108 199
paulo@108 200 feed_sio = io.StringIO(feed)
paulo@108 201 elementTree = xml.etree.ElementTree.parse(feed_sio)
paulo@108 202 root = elementTree.getroot()
paulo@108 203
paulo@108 204 parsed_root_tag = _parse_root_tag(root.tag)
paulo@108 205
paulo@108 206 if parsed_root_tag == (None, "rss"):
paulo@108 207 version = float(root.get("version", 0.0))
paulo@108 208 if version >= 2.0:
paulo@108 209 ret = _go_rss(elementTree)
paulo@108 210 else:
paulo@108 211 raise NotImplementedError("Unsupported rss version")
paulo@108 212 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
paulo@108 213 ret = _go_atom(elementTree)
paulo@108 214 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
paulo@108 215 ret = _go_purl_rss(elementTree)
paulo@108 216 else:
paulo@108 217 raise NotImplementedError("Unknown root tag")
paulo@108 218
paulo@108 219 return ret
paulo@108 220
paulo@108 221
paulo@108 222 class WorkerThread(threading.Thread):
paulo@108 223 def __init__(self, *args, **kwargs):
paulo@108 224 self._input_queue = kwargs.pop("input_queue")
paulo@108 225 self._output_queue = kwargs.pop("output_queue")
paulo@108 226 threading.Thread.__init__(self, *args, **kwargs)
paulo@108 227 self.daemon = True
paulo@108 228
paulo@108 229 def run(self):
paulo@108 230 while True:
paulo@108 231 (idx, url) = self._input_queue.get()
paulo@108 232 docfeed = None
paulo@108 233 try:
paulo@108 234 feed = _fetch_url(url)
paulo@108 235 if feed is not None:
paulo@108 236 docfeed = _process_feed(_filter_feed(feed))
paulo@108 237 except Exception as e:
paulo@108 238 logging.info("(%s) exception: (%s) %s" % (url, type(e), e))
paulo@108 239 self._output_queue.put((idx, docfeed))
paulo@108 240
paulo@108 241
paulo@108 242 def main(input_queue, output_queue, lock):
paulo@108 243 ret = ''
paulo@108 244
paulo@108 245 with lock:
paulo@108 246 logging.debug("main() started")
paulo@108 247 epoch_now = time.time()
paulo@108 248 dtnow = datetime.datetime.fromtimestamp(epoch_now)
paulo@108 249
paulo@108 250 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
paulo@108 251 with open(CACHE_HTML_FILE) as cache_html_file:
paulo@108 252 ret = cache_html_file.read()
paulo@108 253
paulo@108 254 else:
paulo@108 255 with open(FEEDS_FILE) as feeds_file:
paulo@108 256 feedlines = feeds_file.readlines()
paulo@108 257
paulo@108 258 docstruct = [None]*len(feedlines)
paulo@108 259 num_input = 0
paulo@108 260 for (i, l) in enumerate(feedlines):
paulo@108 261 if l[0] != '#':
paulo@108 262 l = l.strip()
paulo@108 263 input_queue.put((i, l))
paulo@108 264 num_input += 1
paulo@108 265
paulo@108 266 for _ in range(num_input):
paulo@108 267 (idx, docfeed) = output_queue.get()
paulo@108 268 docstruct[idx] = docfeed
paulo@108 269
paulo@108 270 ret = _to_html(dtnow, docstruct)
paulo@108 271
paulo@108 272 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
paulo@108 273 cache_html_file.write(ret)
paulo@108 274 logging.debug("main() ended")
paulo@108 275
paulo@108 276 return ret
paulo@108 277
paulo@108 278
paulo@108 279 class MyRssApp:
paulo@108 280 def __init__(self):
paulo@108 281 logging.debug("MyRssApp.__init__() called")
paulo@108 282 self._iq = queue.Queue(MAX_THREADS)
paulo@108 283 self._oq = queue.Queue(MAX_THREADS)
paulo@108 284 self._main_lock = threading.Lock()
paulo@108 285
paulo@108 286 for i in range(MAX_THREADS):
paulo@108 287 logging.debug("Starting thread: %d" % i)
paulo@108 288 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
paulo@108 289
paulo@110 290 # Raw WSGI
paulo@108 291 def __call__(self, environ, start_response):
paulo@108 292 response_code = "500 Internal Server Error"
paulo@108 293 response_type = "text/plain; charset=UTF-8"
paulo@108 294
paulo@108 295 try:
paulo@108 296 response_body = main(self._iq, self._oq, self._main_lock)
paulo@108 297 response_code = "200 OK"
paulo@108 298 response_type = "text/html; charset=UTF-8"
paulo@108 299 except:
paulo@108 300 response_body = traceback.format_exc()
paulo@108 301
paulo@108 302 response_headers = [
paulo@108 303 ("Content-Type", response_type),
paulo@108 304 ("Content-Length", str(len(response_body))),
paulo@108 305 ]
paulo@108 306 start_response(response_code, response_headers)
paulo@108 307
paulo@108 308 return [bytes(response_body, encoding="utf-8")]
paulo@108 309
paulo@108 310 def call(self):
paulo@108 311 return main(self._iq, self._oq, self._main_lock)