import markdown
import os
import shutil
packages = {
"rns": "rns-0.4.6-py3-none-any.whl",
"nomadnet": "nomadnet-0.3.1-py3-none-any.whl",
"lxmf": "lxmf-0.2.8-py3-none-any.whl",
}
DEFAULT_TITLE = "RNode Bootstrap Console"
SOURCES_PATH="./source"
BUILD_PATH="./build"
PACKAGES_PATH = "../../dist_archive"
INPUT_ENCODING="utf-8"
OUTPUT_ENCODING="utf-8"
LXMF_ADDRESS = "8dd57a738226809646089335a6b03695"
document_start = """
{PAGE_TITLE}
RNode Console
{MENU}
"""
document_end = """"""
menu_md = """"""
url_maps = [
# { "path": "", "target": "/.md"},
]
def scan_pages(base_path):
files = [file for file in os.listdir(base_path) if os.path.isfile(os.path.join(base_path, file)) and file[:1] != "."]
directories = [file for file in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, file)) and file[:1] != "."]
page_sources = []
for file in files:
if file.endswith(".md"):
page_sources.append(base_path+"/"+file)
for directory in directories:
page_sources.extend(scan_pages(base_path+"/"+directory))
return page_sources
def get_prop(md, prop):
try:
pt = "["+prop+"]: <> ("
pp = md.find(pt)
if pp != -1:
ps = pp+len(pt)
pe = md.find(")", ps)
return md[ps:pe]
else:
return None
except Exception as e:
print("Error while extracting topic property: "+str(e))
return None
def list_topic(topic):
base_path = SOURCES_PATH+"/"+topic
files = [file for file in os.listdir(base_path) if os.path.isfile(os.path.join(base_path, file)) and file[:1] != "." and file != "index.md"]
topic_entries = []
for file in files:
if file.endswith(".md"):
fp = base_path+"/"+file
f = open(fp, "rb")
link_path = fp.replace(SOURCES_PATH, ".").replace(".md", ".html")
md = f.read().decode(INPUT_ENCODING)
topic_entries.append({
"title": get_prop(md, "title"),
"image": get_prop(md, "image"),
"date": get_prop(md, "date"),
"excerpt": get_prop(md, "excerpt"),
"md": md,
"file": link_path
})
topic_entries.sort(key=lambda e: e["date"], reverse=True)
return topic_entries
def render_topic(topic_entries):
md = ""
for topic in topic_entries:
md += ""
md += ""
md += ""
md += ""+str(topic["title"])+""
md += ""+str(topic["date"])+""
md += ""+str(topic["excerpt"])+""
md += ""
md += ""
return md
def generate_html(f, root_path):
md = f.read().decode(INPUT_ENCODING)
page_title = get_prop(md, "title")
if page_title == None:
page_title = DEFAULT_TITLE
else:
page_title += " | "+DEFAULT_TITLE
tt = "{TOPIC:"
tp = md.find(tt)
if tp != -1:
ts = tp+len(tt)
te = md.find("}", ts)
topic = md[ts:te]
rt = tt+topic+"}"
tl = render_topic(list_topic(topic))
print("Found topic: "+str(topic)+", rt "+str(rt))
md = md.replace(rt, tl)
menu_html = markdown.markdown(menu_md.replace("{CONTENT_PATH}", root_path), extensions=["markdown.extensions.fenced_code"]).replace("", "")
page_html = markdown.markdown(md, extensions=["markdown.extensions.fenced_code"]).replace("{ASSET_PATH}", root_path)
page_html = page_html.replace("{LXMF_ADDRESS}", LXMF_ADDRESS)
for pkg_name in packages:
page_html = page_html.replace("{PKG_"+pkg_name+"}", pkg_name+".zip")
page_html = page_html.replace("{PKG_NAME_"+pkg_name+"}", packages[pkg_name])
page_date = get_prop(md, "date")
if page_date != None:
page_html = page_html.replace("{DATE}", page_date)
return document_start.replace("{ASSET_PATH}", root_path).replace("{MENU}", menu_html).replace("{PAGE_TITLE}", page_title) + page_html + document_end
source_files = scan_pages(SOURCES_PATH)
def fetch_reticulum_site():
r_site_path = BUILD_PATH+"/r"
shutil.copytree(PACKAGES_PATH+"/reticulum.network", r_site_path)
shutil.rmtree(r_site_path+"/manual")
def gz_all():
import gzip
for root, dirs, files in os.walk(BUILD_PATH):
for file in files:
fpath = root+"/"+file
print("Gzipping "+fpath+"...")
f = open(fpath, "rb")
g = gzip.open(fpath+".gz", "wb")
g.writelines(f)
g.close()
f.close()
os.unlink(fpath)
from zipfile import ZipFile
for pkg_name in packages:
pkg_file = packages[pkg_name]
pkg_full_path = PACKAGES_PATH+"/"+pkg_file
if os.path.isfile(pkg_full_path):
print("Including "+pkg_file)
z = ZipFile(BUILD_PATH+"/"+pkg_name+".zip", "w")
z.write(pkg_full_path, pkg_full_path[len(PACKAGES_PATH+"/"):])
z.close()
# shutil.copy(pkg_full_path, BUILD_PATH+"/"+pkg_name)
else:
print("Could not find "+pkg_full_path)
exit(1)
for um in url_maps:
with open(SOURCES_PATH+"/"+um["target"], "rb") as f:
of = BUILD_PATH+um["target"].replace(SOURCES_PATH, "").replace(".md", ".html")
root_path = "../"
html = generate_html(f, root_path)
print("Map path : "+str(um["path"]))
print("Map target : "+str(um["target"]))
print("Mapped root path: "+str(root_path))
if not os.path.isdir(BUILD_PATH+"/"+um["path"]):
os.makedirs(BUILD_PATH+"/"+um["path"], exist_ok=True)
with open(BUILD_PATH+"/"+um["path"]+"/index.html", "wb") as wf:
wf.write(html.encode(OUTPUT_ENCODING))
for mdf in source_files:
with open(mdf, "rb") as f:
of = BUILD_PATH+mdf.replace(SOURCES_PATH, "").replace(".md", ".html")
root_path = "../"*(len(of.replace(BUILD_PATH+"/", "").split("/"))-1)
html = generate_html(f, root_path)
if not os.path.isdir(os.path.dirname(of)):
os.makedirs(os.path.dirname(of), exist_ok=True)
with open(of, "wb") as wf:
wf.write(html.encode(OUTPUT_ENCODING))
fetch_reticulum_site()
gz_all()