609 lines
20 KiB
Python
Executable File
609 lines
20 KiB
Python
Executable File
#!/usr/bin/python3
|
|
|
|
import os,re,time,getpass,subprocess,markdown
|
|
from string import Template
|
|
from argparse import ArgumentParser
|
|
|
|
########################################
|
|
# UI - check for arguments and options #
|
|
########################################
|
|
|
|
parser = ArgumentParser(description="Manage your weblog via commandline. Creates a static html5 compliant website with your publications.")
|
|
|
|
# general options
|
|
parser.add_argument('-d','--blog-dir', dest="blog_dir", default='~/public_html/blog', help='path to blog directory (default: %(default)s)')
|
|
|
|
### subroutines of pb
|
|
subparsers = parser.add_subparsers(help="targets of operation", dest='target')
|
|
|
|
# 'blog' command
|
|
parser_blog = subparsers.add_parser('blog', help='add or update blog or its sections')
|
|
## 'blog' subroutines
|
|
subparsers_blog = parser_blog.add_subparsers(help="blog subcommands", dest='action')
|
|
blog_parser_update = subparsers_blog.add_parser('update', help='updates blog to current state')
|
|
blog_parser_update.add_argument('subsection', choices=['all','recent','archive','feed'], help='sections of blog you may want to update')
|
|
blog_parser_add = subparsers_blog.add_parser("add", help='add an non-existing blog at --blog-dir')
|
|
|
|
# 'draft' command
|
|
parser_draft = subparsers.add_parser('draft', help='create, list, edit, publish or remove drafts')
|
|
## 'draft' subroutines
|
|
subparsers_draft = parser_draft.add_subparsers(help="draft subcommands", dest='action')
|
|
draft_parser_add = subparsers_draft.add_parser('add', help='add a new draft')
|
|
draft_parser_add.add_argument('name', help='define an alphanumeric name')
|
|
draft_parser_list = subparsers_draft.add_parser('list', help='list drafts` names')
|
|
draft_parser_edit = subparsers_draft.add_parser('edit', help='edit certain draft')
|
|
draft_parser_edit.add_argument('name', help='name like specified by [list]')
|
|
draft_parser_remove = subparsers_draft.add_parser('remove', help='remove certain draft')
|
|
draft_parser_remove.add_argument('name', help='name like specified by [list]')
|
|
draft_parser_publish = subparsers_draft.add_parser('publish', help='publish draft as article')
|
|
draft_parser_publish.add_argument('name', help='name of draft to publish')
|
|
|
|
# 'article' command
|
|
parser_article = subparsers.add_parser('article', help='import, list, edit or delete article')
|
|
|
|
## 'article' subroutines
|
|
subparsers_article = parser_article.add_subparsers(help="article subcommands", dest='action')
|
|
article_parser_add = subparsers_article.add_parser('add', help='add a new article')
|
|
article_parser_add.add_argument('name', help='define an alphanumeric name')
|
|
article_parser_list = subparsers_article.add_parser('list', help='list articles')
|
|
article_parser_edit = subparsers_article.add_parser('edit', help='edit certain article')
|
|
article_parser_edit.add_argument('id', help='id like specified by [list]')
|
|
article_parser_remove = subparsers_article.add_parser('remove', help='remove certain article')
|
|
article_parser_remove.add_argument('id', help='id like specified by [list]')
|
|
|
|
|
|
args = parser.parse_args()
|
|
print(args)
|
|
|
|
####################
|
|
# Global Constants #
|
|
####################
|
|
|
|
# logged in user
|
|
user = getpass.getuser()
|
|
|
|
# current and date
|
|
timestamp = time.localtime()
|
|
|
|
|
|
# set path to blog
|
|
try:
|
|
blog_dir = os.path.abspath(os.path.expanduser(vars(args)['blog_dir']))
|
|
if not os.path.isdir(blog_dir):
|
|
print("The path you offered is not a directory!")
|
|
exit
|
|
except:
|
|
raise
|
|
|
|
(pbconf,blogconf,l10nconf) = (False,False,False)
|
|
|
|
# path to blog's templates
|
|
tplDir = os.path.join(blog_dir,"templates")
|
|
tplIndex = os.path.join(tplDir,"index.htm")
|
|
tplIndex = Template(open(tplIndex,'r').read())
|
|
|
|
# path to drafts
|
|
draftDir = os.path.join(blog_dir,"drafts")
|
|
|
|
# path to articles
|
|
articleDir = os.path.join(blog_dir,"articles")
|
|
|
|
# path to archive
|
|
archiveDir = os.path.join(blog_dir,"archive")
|
|
|
|
# path to html files
|
|
htmlDir = os.path.join(blog_dir,"html")
|
|
htmlIndex = os.path.join(htmlDir,"index.html")
|
|
htmlArchive = os.path.join(htmlDir,"archive.html")
|
|
|
|
# paths to configs
|
|
pbconfpath = os.path.join("/","etc","picoblogger","pb.conf") # global config
|
|
blogconfpath = os.path.join(blog_dir,"pb.conf") # local config
|
|
|
|
|
|
|
|
##################
|
|
# Help Functions #
|
|
##################
|
|
|
|
# parse unix-config-style files
|
|
|
|
def parse_conf(path):
|
|
aVars = {}
|
|
if os.path.exists(path):
|
|
f = open(path, "r")
|
|
else:
|
|
f = path.split("\n")
|
|
for line in f:
|
|
if "#" in line:
|
|
line = line[0:line.find("#")]
|
|
pattern = '^(\w+)\s*?[=|:]\s*(.+)[^#]*$'
|
|
match = re.search(pattern, line)
|
|
if match:
|
|
aVars[match.group(1)] = match.group(2)
|
|
return aVars
|
|
|
|
|
|
|
|
######################
|
|
# Parse Config Files #
|
|
######################
|
|
|
|
try:
|
|
if os.path.isfile(pbconfpath):
|
|
pbconf = parse_conf(pbconfpath)
|
|
except:
|
|
print("Couldn't parse systems global picoblogger configuration (%s)."%pbconfpath)
|
|
raise
|
|
|
|
|
|
try:
|
|
if os.path.isfile(blogconfpath):
|
|
blogconf = parse_conf(blogconfpath)
|
|
dateFormat = blogconf['BLOG_DATE_FORMAT']
|
|
timeFormat = blogconf['BLOG_TIME_FORMAT']
|
|
amountRecent = int(blogconf['RECENT_AMOUNT'])
|
|
nrOfCurrentArticles = int(blogconf['BLOG_CURRENT_ARTICLES'])
|
|
|
|
except:
|
|
print("Couldn't parse blog's local configuration (%s)."%blogconfpath)
|
|
raise
|
|
|
|
l10nconfpath = os.path.join(blog_dir,"l10n",blogconf['BLOG_LANG'],"static.conf") # localization file
|
|
try:
|
|
if os.path.isfile(l10nconfpath):
|
|
l10nconf = parse_conf(l10nconfpath)
|
|
except:
|
|
print("Couldn't parse localization file (%s)."%l10nconfpath)
|
|
raise
|
|
|
|
|
|
########################
|
|
# Templating functions #
|
|
########################
|
|
|
|
def parse_article(path):
|
|
article = open(path,"r").read()
|
|
header = article.split("HEADER_BEGIN")[1]
|
|
header = header.split("HEADER_END")[0]
|
|
body = markdown.markdown(article.split("HEADER_END")[1])
|
|
article_dict = parse_conf(header)
|
|
split_path = os.path.split(path)
|
|
article_dict['PATH'] = split_path[0]
|
|
article_dict['FILENAME'] = split_path[1]
|
|
article_dict['BODY'] = body
|
|
article_dict['MODIFIED_DATE'] = time.strftime(dateFormat,time.gmtime(os.stat(path).st_mtime))
|
|
article_dict['MODIFIED_TIME'] = time.strftime(timeFormat,time.gmtime(os.stat(path).st_mtime))
|
|
|
|
return article_dict
|
|
|
|
def tags_to_html(tags):
|
|
tag_list = tags.split(",")
|
|
html_tags = ""
|
|
sTAG = '<a href="tags/${TAG}.html" class="tag">#${TAG}</a> '
|
|
|
|
if len(tag_list) > 0 and len(tag_list[0]) > 1:
|
|
for tag in tag_list:
|
|
html_tags += Template(sTAG).safe_substitute({'TAG':tag})
|
|
|
|
return html_tags
|
|
|
|
|
|
def article_to_html(article_dict):
|
|
sARTICLE = open(os.path.join(tplDir,"article.htm")).read()
|
|
sARTICLE = Template(sARTICLE).safe_substitute(l10nconf)
|
|
|
|
# Read out article txt file
|
|
|
|
article_dict['DATE'] = time.strftime(dateFormat, time.strptime(article_dict['DATE'],"%x"))
|
|
article_dict['TIME'] = time.strftime(timeFormat, time.strptime(article_dict['TIME'],"%X"))
|
|
article_dict['TAGS'] = tags_to_html(article_dict['TAGS'])
|
|
|
|
tplSub = {
|
|
"ARTICLE_ID":article_dict['ID'],
|
|
"ARTICLE_TITLE":article_dict['TITLE'],
|
|
"ARTICLE_AUTHOR":article_dict['AUTHOR'],
|
|
"ARTICLE_CREATED_DATE":article_dict['DATE'],
|
|
"ARTICLE_CREATED_TIME":article_dict['TIME'],
|
|
"ARTICLE_MODIFIED_DATE":article_dict['MODIFIED_DATE'],
|
|
"ARTICLE_MODIFIED_TIME":article_dict['MODIFIED_TIME'],
|
|
"ARTICLE_BODY":article_dict['BODY'],
|
|
"ARTICLE_TAGS":article_dict['TAGS']
|
|
}
|
|
|
|
sARTICLE = Template(sARTICLE).safe_substitute(tplSub)
|
|
return sARTICLE
|
|
|
|
def join_articles(artDir = articleDir):
|
|
|
|
joined_html = ""
|
|
articles_dict = {}
|
|
for name in [os.path.splitext(item)[0] for item in os.listdir(artDir) if not "~" in item]:
|
|
article = (parse_article(os.path.join(artDir, name + ".txt")))
|
|
# Bring articles into chronological order
|
|
t = time.mktime(time.strptime(article['DATE'] + " " + article['TIME'], "%m/%d/%y %H:%M:%S"))
|
|
articles_dict[str(t)] = article
|
|
dates_of_creation = [float(value) for value in articles_dict.keys()]
|
|
dates_of_creation.sort(key=None,reverse=True)
|
|
for date in dates_of_creation:
|
|
joined_html += article_to_html(articles_dict[str(time.strftime(str(date)))])
|
|
|
|
return joined_html
|
|
|
|
def recent_to_html(article_dict):
|
|
sLINK = '<li><a href="${ARTICLE_ID}.html" title="${ARTICLE_CREATED_DATE} ${ARTICLE_CREATED_TIME}">${ARTICLE_TITLE}</a></li>'
|
|
|
|
# Read out article txt file
|
|
|
|
article_dict['DATE'] = time.strftime(dateFormat, time.strptime(article_dict['DATE'],"%x"))
|
|
article_dict['TIME'] = time.strftime(timeFormat, time.strptime(article_dict['TIME'],"%X"))
|
|
article_dict['TAGS'] = tags_to_html(article_dict['TAGS'])
|
|
|
|
tplSub = {
|
|
"ARTICLE_ID":article_dict['ID'],
|
|
"ARTICLE_TITLE":article_dict['TITLE'],
|
|
"ARTICLE_AUTHOR":article_dict['AUTHOR'],
|
|
"ARTICLE_CREATED_DATE":article_dict['DATE'],
|
|
"ARTICLE_CREATED_TIME":article_dict['TIME'],
|
|
"ARTICLE_MODIFIED_DATE":article_dict['MODIFIED_DATE'],
|
|
"ARTICLE_MODIFIED_TIME":article_dict['MODIFIED_TIME'],
|
|
"ARTICLE_BODY":article_dict['BODY'],
|
|
"ARTICLE_TAGS":article_dict['TAGS']
|
|
}
|
|
|
|
sLINK = Template(sLINK).safe_substitute(tplSub)
|
|
return sLINK
|
|
|
|
|
|
def recent_articles(amount):
|
|
artDir = articleDir
|
|
joined_html = ""
|
|
articles_dict = {}
|
|
for name in [os.path.splitext(item)[0] for item in os.listdir(artDir) if not "~" in item]:
|
|
article = (parse_article(os.path.join(artDir, name + ".txt")))
|
|
# Bring articles into chronological order
|
|
t = time.mktime(time.strptime(article['DATE'] + " " + article['TIME'], "%m/%d/%y %H:%M:%S"))
|
|
articles_dict[str(t)] = article
|
|
dates_of_creation = [float(value) for value in articles_dict.keys()]
|
|
dates_of_creation.sort(key=None,reverse=True)
|
|
for i in range(0,amount):
|
|
if i < len(dates_of_creation):
|
|
joined_html += recent_to_html(articles_dict[str(time.strftime(str(dates_of_creation[i])))])
|
|
|
|
return joined_html
|
|
|
|
def archive_articles():
|
|
articles = [parse_article(os.path.join(articleDir,item)) for item in os.listdir(articleDir) if not "~" in item]
|
|
|
|
cMonth = timestamp.tm_mon
|
|
cYear = timestamp.tm_year
|
|
# sort articles by date of creation into archive which is structured by subfolders of year and month
|
|
for article in articles:
|
|
aName = article["FILENAME"]
|
|
aPath = os.path.join(article["PATH"], aName)
|
|
aDate = time.strptime(article["DATE"],"%m/%d/%y")
|
|
aMonth = aDate.tm_mon
|
|
aYear = aDate.tm_year
|
|
|
|
# archive only articles older than current month
|
|
if (aMonth < cMonth) or (aYear < cYear):
|
|
|
|
yearDir = os.path.join(archiveDir,str(aYear))
|
|
monthDir = os.path.join(yearDir,str(aMonth))
|
|
|
|
if not os.path.exists(yearDir):
|
|
try:
|
|
os.makedirs(monthDir, mode=0o755)
|
|
except:
|
|
raise
|
|
else:
|
|
if not os.path.exists(monthDir):
|
|
try:
|
|
os.mkdir(monthDir, mode=0o755)
|
|
except:
|
|
raise
|
|
replPath = os.path.join(monthDir,aName)
|
|
try:
|
|
os.replace(aPath,replPath)
|
|
print("Archived %s into %s."%(aName,monthDir))
|
|
except:
|
|
print("Failed archiving %s."%aName)
|
|
raise
|
|
|
|
def list_of_months():
|
|
dArchives = {}
|
|
years = os.listdir(archiveDir)
|
|
for year in years:
|
|
yearDir = os.path.join(archiveDir,year)
|
|
months = os.listdir(yearDir)
|
|
dArchives[year] = []
|
|
for month in months:
|
|
monthDir = os.path.join(yearDir,month)
|
|
tMonth = time.strptime(month+"/"+year,"%m/%Y")
|
|
sMonth = time.strftime("%B",tMonth)
|
|
dArchives[year].append(month)#sMonth)
|
|
return dArchives
|
|
|
|
def list_articles(folder):
|
|
artDir = folder
|
|
joined_html = ""
|
|
articles_dict = {}
|
|
for name in [os.path.splitext(item)[0] for item in os.listdir(artDir) if not "~" in item]:
|
|
article = (parse_article(os.path.join(artDir, name + ".txt")))
|
|
# Bring articles into chronological order
|
|
t = time.mktime(time.strptime(article['DATE'] + " " + article['TIME'], "%m/%d/%y %H:%M:%S"))
|
|
articles_dict[str(t)] = article
|
|
dates_of_creation = [float(value) for value in articles_dict.keys()]
|
|
dates_of_creation.sort(key=None,reverse=True)
|
|
for i in range(0, len(dates_of_creation)):
|
|
|
|
joined_html += recent_to_html(articles_dict[str(time.strftime(str(dates_of_creation[i])))])
|
|
|
|
return joined_html
|
|
|
|
|
|
|
|
def months_to_html(archived_months):
|
|
html = "<ul>"
|
|
for year in archived_months:
|
|
html += "<li>"+str(year)+"</li>"
|
|
html += "<ul>"
|
|
for month in archived_months[year]:
|
|
html += "<li>"+str(month)+"</li>"
|
|
html += "</ul>"
|
|
html += "</ul>"
|
|
return html
|
|
|
|
|
|
|
|
def list_archive(archived_months):
|
|
html = ""
|
|
for year in archived_months:
|
|
html += "<h2>"+str(year)+"</h2>"
|
|
for month in archived_months[year]:
|
|
html += "<h3>"+str(month)+"</h3>"
|
|
html += list_articles(os.path.join(archiveDir,str(year),str(month)))
|
|
html += ""
|
|
return html
|
|
|
|
def templating(dic):
|
|
|
|
sMAIN = ""
|
|
if 'sMAIN_TITLE' in dic.keys():
|
|
title = dic['sMAIN_TITLE']
|
|
sMAIN = open(os.path.join(tplDir,"main.htm")).read()
|
|
sMAIN = Template(sMAIN).safe_substitute({'MAIN_TITLE':title})
|
|
if 'sMAIN' in dic.keys():
|
|
main = dic['sMAIN']
|
|
sMAIN += main
|
|
if 'sRECENT' in dic.keys():
|
|
recent = dic['sRECENT']
|
|
if 'sASIDE' in dic.keys():
|
|
aside = dic['sASIDE']
|
|
|
|
|
|
|
|
sNAV = open(os.path.join(tplDir,"nav.htm")).read()
|
|
sNAV = Template(sNAV).safe_substitute(l10nconf)
|
|
|
|
sASIDE = open(os.path.join(tplDir,"aside.htm")).read()
|
|
|
|
|
|
sRECENT = open(os.path.join(tplDir,"recent.htm")).read()
|
|
sCONTACT = open(os.path.join(tplDir,"contact.htm")).read()
|
|
sFOOTER = open(os.path.join(tplDir,"footer.htm")).read()
|
|
|
|
tmpIndex = tplIndex.safe_substitute(l10nconf)
|
|
sASIDE = Template(sASIDE).safe_substitute(l10nconf)
|
|
sCONTACT = Template(sCONTACT).safe_substitute(l10nconf)
|
|
sRECENT = Template(sRECENT).safe_substitute(l10nconf)
|
|
sFOOTER = Template(sFOOTER).safe_substitute(l10nconf)
|
|
|
|
if blogconf:
|
|
tmpIndex = Template(tmpIndex).safe_substitute(blogconf)
|
|
sASIDE = Template(sASIDE).safe_substitute(blogconf)
|
|
sCONTACT = Template(sCONTACT).safe_substitute(blogconf)
|
|
sRECENT = Template(sRECENT).safe_substitute(blogconf)
|
|
sFOOTER = Template(sFOOTER).safe_substitute(blogconf)
|
|
if pbconf:
|
|
tmpIndex = Template(tpIndex).safe_substitute(pbconf)
|
|
sASIDE = Template(sASIDE).safe_substitute(pbconf)
|
|
sCONTACT = Template(sCONTACT).safe_substitute(pbconf)
|
|
sRECENT = Template(sRECENT).safe_substitute(pbconf)
|
|
sFOOTER = Template(sFOOTER).safe_substitute(pbconf)
|
|
|
|
sRECENT = Template(sRECENT).safe_substitute({"RECENT_ARTICLES":recent})
|
|
sASIDE = Template(sASIDE).safe_substitute({"ASIDE_RECENT":sRECENT, "ASIDE_CONTACT":sCONTACT})
|
|
|
|
tplSub = {
|
|
"TEMPLATE_NAV":sNAV,
|
|
"TEMPLATE_MAIN":sMAIN,
|
|
"TEMPLATE_ASIDE":sASIDE,
|
|
"TEMPLATE_FOOTER":sFOOTER
|
|
}
|
|
|
|
tmpIndex = Template(tmpIndex).safe_substitute(tplSub)
|
|
return tmpIndex
|
|
|
|
|
|
def build_index():
|
|
param = {}
|
|
param['sRECENT'] = recent_articles(amountRecent)
|
|
param['sMAIN'] = join_articles()
|
|
param['sMAIN_TITLE'] = l10nconf['CURRENT']
|
|
htmlString = templating(param)
|
|
with open(htmlIndex,"w", encoding=l10nconf['BLOG_CHARSET']) as f:
|
|
print(htmlString, file=f)
|
|
|
|
|
|
|
|
def build_archive():
|
|
param={}
|
|
param['sRECENT'] = months_to_html(list_of_months())
|
|
param['sMAIN'] = list_archive(list_of_months())
|
|
param['sMAIN_TITLE'] = l10nconf['ARCHIVE']
|
|
htmlString = templating(param)
|
|
with open(htmlArchive,"w", encoding=l10nconf['BLOG_CHARSET']) as f:
|
|
print(htmlString, file=f)
|
|
|
|
|
|
|
|
def build_article(path):
|
|
param={}
|
|
dic = parse_article(path)
|
|
param['sRECENT'] = recent_articles(amountRecent)
|
|
param['sMAIN'] = article_to_html(dic)
|
|
param['sMAIN_TITLE'] = l10nconf['ARTICLE']
|
|
htmlArticle = os.path.join(htmlDir,dic['ID']+".html")
|
|
htmlString = "<h1>"
|
|
htmlString = templating(param)
|
|
with open(htmlArticle,"w", encoding=l10nconf['BLOG_CHARSET']) as f:
|
|
print(htmlString, file=f)
|
|
|
|
def build_current_articles():
|
|
for name in [os.path.splitext(item)[0] for item in os.listdir(articleDir) if not "~" in item]:
|
|
build_article(os.path.join(articleDir, name + ".txt"))
|
|
|
|
|
|
def update_blog(subsection):
|
|
""" Joins config defined variables into templates"""
|
|
|
|
if subsection in ("recent","all"):
|
|
build_index()
|
|
build_current_articles()
|
|
|
|
elif subsection in ("archive","all"):
|
|
build_archive()
|
|
|
|
|
|
|
|
|
|
##################
|
|
# Main Functions #
|
|
##################
|
|
|
|
# blog functions
|
|
|
|
if vars(args)['target'] == 'blog':
|
|
|
|
|
|
## update subsections or whole blog
|
|
|
|
if vars(args)['action'] == 'update':
|
|
archive_articles()
|
|
|
|
if vars(args)['subsection'] in ('recent','all'):
|
|
pass
|
|
|
|
if vars(args)['subsection'] in ('archive','all'):
|
|
pass
|
|
|
|
if vars(args)['subsection'] in ('feed','all'):
|
|
pass
|
|
|
|
# Joining all templates
|
|
update_blog(vars(args)['subsection'])
|
|
print("Updated your blog.")
|
|
|
|
|
|
|
|
## add a new blog at -d blog-directory
|
|
|
|
if vars(args)['action'] == 'add':
|
|
pass
|
|
|
|
|
|
# draft functions
|
|
|
|
if vars(args)['target'] == 'draft':
|
|
|
|
|
|
## add new draft
|
|
|
|
if vars(args)['action'] == 'add':
|
|
draft = os.path.join(blog_dir,"drafts",vars(args)['name'] + ".txt")
|
|
tpl = os.path.join(tplDir,"article.tpl")
|
|
sTpl = open(tpl).read()
|
|
subs = {"ID":int(time.mktime(time.localtime())), "DATE" : time.strftime("%x",time.localtime()), "TIME" : time.strftime("%X",time.localtime()),
|
|
"AUTHOR" : user
|
|
}
|
|
|
|
sTpl = Template(sTpl).safe_substitute(subs)
|
|
with open(draft,"w", encoding=l10nconf['BLOG_CHARSET']) as f:
|
|
print(sTpl, file=f)
|
|
|
|
subprocess.call(["emacs", "-nw", draft])
|
|
|
|
|
|
## list drafts
|
|
|
|
if vars(args)['action'] == 'list':
|
|
[print(item) for item in [os.path.splitext(item)[0] for item in os.listdir(draftDir) if not "~" in item]]
|
|
|
|
|
|
|
|
## edit draft
|
|
|
|
if vars(args)['action'] == 'edit':
|
|
draft = os.path.join(blog_dir,"drafts",vars(args)['name'] + ".txt")
|
|
subprocess.call(["emacs", "-nw", draft])
|
|
|
|
## remove draft
|
|
|
|
if vars(args)['action'] == 'remove':
|
|
draft = os.path.join(blog_dir,"drafts",vars(args)['name'] + ".txt")
|
|
try:
|
|
os.remove(draft)
|
|
except:
|
|
print("Can't find %s."%draft)
|
|
|
|
## publish draft as article
|
|
|
|
if vars(args)['action'] == 'publish':
|
|
draft = os.path.join(blog_dir,"drafts",vars(args)['name'] + ".txt")
|
|
article = os.path.join(blog_dir,"articles",vars(args)['name'] + ".txt")
|
|
os.replace(draft, article)
|
|
|
|
# article functions
|
|
|
|
if vars(args)['target'] == 'article':
|
|
|
|
|
|
## add new article
|
|
if vars(args)['action'] == 'add':
|
|
article = os.path.join(blog_dir,"articles",vars(args)['name'] + ".txt")
|
|
tpl = os.path.join(tplDir,"article.tpl")
|
|
sTpl = open(tpl).read()
|
|
subs = {"ID":int(time.mktime(time.localtime())), "DATE" : time.strftime("%x",time.localtime()), "TIME" : time.strftime("%X",time.localtime()),
|
|
"AUTHOR" : user
|
|
}
|
|
|
|
sTpl = Template(sTpl).safe_substitute(subs)
|
|
with open(article,"w", encoding=l10nconf['BLOG_CHARSET']) as f:
|
|
print(sTpl, file=f)
|
|
|
|
subprocess.call(["emacs", "-nw", article])
|
|
|
|
|
|
## list articles
|
|
|
|
|
|
if vars(args)['action'] == 'list':
|
|
[print(item) for item in [os.path.splitext(item)[0] for item in os.listdir(articleDir) if not "~" in item]]
|
|
|
|
|
|
|
|
## edit article
|
|
|
|
if vars(args)['action'] == 'edit':
|
|
draft = os.path.join(blog_dir,"articles",vars(args)['id'] + ".txt")
|
|
subprocess.call(["emacs", "-nw", draft])
|
|
|
|
|
|
|
|
## remove article
|
|
|
|
if vars(args)['action'] == 'remove':
|
|
pass
|
|
|