[design] list views

This commit is contained in:
Rushabh Mehta 2015-01-05 17:36:32 +05:30
parent d5f3479f58
commit 7ae3ca289b
24 changed files with 129 additions and 989 deletions

View file

@ -25,7 +25,6 @@
### Python
- html2text.py - GNU GPL 3, (c) 2004-2008 Aaron Swartz
- minify.js - MIT License, (c) 2002 Douglas Crockford
### Icon Fonts

View file

@ -245,6 +245,8 @@ def validate_fields(meta):
frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx))
def check_min_items_in_list(fields):
if not meta.get("__islocal"):
return
if len(filter(lambda d: d.in_list_view, fields))==0:
for d in fields[:5]:
if d.fieldtype in type_map:

View file

@ -8,7 +8,7 @@ import urllib
from frappe import msgprint, throw, _
from frappe.email.smtp import SMTPServer, get_outgoing_email_account
from frappe.email.email_body import get_email, get_formatted_html
from frappe.email.html2text import html2text
from html2text import html2text
from frappe.utils import cint, get_url, nowdate
class BulkLimitCrossedError(frappe.ValidationError): pass

View file

@ -91,7 +91,7 @@ class EMail:
def set_html_as_text(self, html):
"""return html2text"""
import HTMLParser
from frappe.email.html2text import html2text
from html2text import html2text
try:
self.set_text(html2text(html))
except HTMLParser.HTMLParseError:

View file

@ -1,914 +0,0 @@
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.200.3"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError: #Python3
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try: #Python3
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Escape all special characters. Output is less readable, but avoids corner case formatting issues.
ESCAPE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
IGNORE_EMPHASIS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
"""returns a hash of css attributes"""
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
"""returns a hash of css selectors, each of which contains a hash of css attributes"""
# remove @import sentences
data += ';'
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
except ValueError:
elements = {} # not that important
return elements
def element_style(attrs, style_def, parent_style):
"""returns a hash of the 'final' style attributes of the element"""
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
"""finds out whether this is an ordered or unordered list"""
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_has_height(style):
"""check if the style of the element has the 'height' attribute explicitly defined"""
if 'height' in style:
return True
return False
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
"""check if the css of the current element defines a fixed width font"""
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
"""extract numbering from list element attributes"""
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class HTML2Text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
# Config options
self.unicode_snob = UNICODE_SNOB
self.escape_snob = ESCAPE_SNOB
self.links_each_paragraph = LINKS_EACH_PARAGRAPH
self.body_width = BODY_WIDTH
self.skip_internal_links = SKIP_INTERNAL_LINKS
self.inline_links = INLINE_LINKS
self.google_list_indent = GOOGLE_LIST_INDENT
self.ignore_links = IGNORE_ANCHORS
self.ignore_images = IGNORE_IMAGES
self.ignore_emphasis = IGNORE_EMPHASIS
self.google_doc = False
self.ul_item_mark = '*'
self.emphasis_mark = '_'
self.strong_mark = '**'
if out is None:
self.out = self.outtextf
else:
self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.maybe_automatic_link = None
self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
try: del unifiable_n[name2cp('nbsp')]
except KeyError: pass
unifiable['nbsp'] = '&nbsp_place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def handle(self, data):
self.feed(data)
self.feed("")
return self.optwrap(self.close())
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if self.unicode_snob:
nbsp = unichr(name2cp('nbsp'))
else:
nbsp = u' '
self.outtext = self.outtext.replace(u'&nbsp_place_holder;', nbsp)
return self.outtext
def handle_charref(self, c):
self.o(self.charref(c), 1)
def handle_entityref(self, c):
self.o(self.entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
#attrs = fixattrs(attrs)
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if self.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if self.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u'] and not self.ignore_emphasis: self.o(self.emphasis_mark)
if tag in ['strong', 'b'] and not self.ignore_emphasis: self.o(self.strong_mark)
if tag in ['del', 'strike', 's']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if self.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["code", "tt"] and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not self.ignore_links:
if start:
if has_key(attrs, 'href') and not (self.skip_internal_links and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.maybe_automatic_link = attrs['href']
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link:
self.maybe_automatic_link = None
elif a:
if self.inline_links:
self.o("](" + escape_md(a['href']) + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not self.ignore_images:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
self.o("(" + escape_md(attrs['href']) + ")")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("[" + str(attrs['count']) + "]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0:
self.p_p = 1
def p(self):
self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
if not data.startswith("\n"): # <pre>stuff...
data = "\n" + data
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
if not self.list:
bq += " "
#else: list content is already partially indented
for i in xrange(len(self.list)):
bq += " "
data = data.replace("\n", "\n"+bq)
if self.startpre:
self.startpre = 0
if self.list:
data = data.lstrip("\n") # use existing initial indentation
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if href == data and self.absolute_url_matcher.match(href):
self.o("<" + data + ">")
return
else:
self.o("[")
self.maybe_automatic_link = None
if not self.code and not self.pre:
data = escape_md_section(data, snob=self.escape_snob)
self.o(data, 1)
def unknown_decl(self, data): pass
def charref(self, name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not self.unicode_snob and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError: #Python3
return chr(c)
def entityref(self, c):
if not self.unicode_snob and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError: #Python3
return chr(name2cp(c))
def replaceEntities(self, s):
s = s.group(1)
if s[0] == "#":
return self.charref(s[1:])
else: return self.entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(self, s):
return self.r_unescape.sub(self.replaceEntities, s)
def google_nest_count(self, style):
"""calculate the nesting count of google doc lists"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
return nest_count
def optwrap(self, text):
"""Wrap all paragraphs in the provided text."""
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
ordered_list_matcher = re.compile(r'\d+\.\s')
unordered_list_matcher = re.compile(r'[-\*\+]\s')
md_chars_matcher = re.compile(r"([\\\[\]\(\)])")
md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!])")
md_dot_matcher = re.compile(r"""
^ # start of line
(\s*\d+) # optional whitespace and a number
(\.) # dot
(?=\s) # lookahead assert whitespace
""", re.MULTILINE | re.VERBOSE)
md_plus_matcher = re.compile(r"""
^
(\s*)
(\+)
(?=\s)
""", flags=re.MULTILINE | re.VERBOSE)
md_dash_matcher = re.compile(r"""
^
(\s*)
(-)
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
# or another dash (header or hr)
""", flags=re.MULTILINE | re.VERBOSE)
slash_chars = r'\`*_{}[]()#+-.!'
md_backslash_matcher = re.compile(r'''
(\\) # match one slash
(?=[%s]) # followed by a char that requires escaping
''' % re.escape(slash_chars),
flags=re.VERBOSE)
def skipwrap(para):
# If the text begins with four spaces or one tab, it's a code block; don't wrap
if para[0:4] == ' ' or para[0] == '\t':
return True
# If the text begins with only two "--", possibly preceded by whitespace, that's
# an emdash; so wrap.
stripped = para.lstrip()
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
return False
# I'm not sure what this is for; I thought it was to detect lists, but there's
# a <br>-inside-<span> case in one of the tests that also depends upon it.
if stripped[0:1] == '-' or stripped[0:1] == '*':
return True
# If the text begins with a single -, *, or +, followed by a space, or an integer,
# followed by a ., followed by a space (in either case optionally preceeded by
# whitespace), it's a list; don't wrap.
if ordered_list_matcher.match(stripped) or unordered_list_matcher.match(stripped):
return True
return False
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text(html, baseurl=''):
h = HTML2Text(baseurl=baseurl)
return h.handle(html)
def unescape(s, unicode_snob=False):
h = HTML2Text()
h.unicode_snob = unicode_snob
return h.unescape(s)
def escape_md(text):
"""Escapes markdown-sensitive characters within other markdown constructs."""
return md_chars_matcher.sub(r"\\\1", text)
def escape_md_section(text, snob=False):
"""Escapes markdown-sensitive characters across whole document sections."""
text = md_backslash_matcher.sub(r"\\\1", text)
if snob:
text = md_chars_matcher_all.sub(r"\\\1", text)
text = md_dot_matcher.sub(r"\1\\\2", text)
text = md_plus_matcher.sub(r"\1\\\2", text)
text = md_dash_matcher.sub(r"\1\\\2", text)
return text
def main():
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("--ignore-emphasis", dest="ignore_emphasis", action="store_true",
default=IGNORE_EMPHASIS, help="don't include any formatting for emphasis")
p.add_option("--ignore-links", dest="ignore_links", action="store_true",
default=IGNORE_ANCHORS, help="don't include any formatting for links")
p.add_option("--ignore-images", dest="ignore_images", action="store_true",
default=IGNORE_IMAGES, help="don't include any formatting for images")
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk",
default=False, help="use an asterisk rather than an underscore for emphasized text")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=BODY_WIDTH, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevant when -g is specified as well")
p.add_option("--escape-all", action="store_true", dest="escape_snob",
default=False, help="Escape all special characters. Output is less readable, but avoids corner case formatting issues.")
(options, args) = p.parse_args()
# process input
encoding = "utf-8"
if len(args) > 0:
file_ = args[0]
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
data = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, data)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
else:
data = sys.stdin.read()
data = data.decode(encoding)
h = HTML2Text(baseurl=baseurl)
# handle options
if options.ul_style_dash: h.ul_item_mark = '-'
if options.em_style_asterisk:
h.emphasis_mark = '*'
h.strong_mark = '__'
h.body_width = options.body_width
h.list_indent = options.list_indent
h.ignore_emphasis = options.ignore_emphasis
h.ignore_links = options.ignore_links
h.ignore_images = options.ignore_images
h.google_doc = options.google_doc
h.hide_strikethrough = options.hide_strikethrough
h.escape_snob = options.escape_snob
wrapwrite(h.handle(data))
if __name__ == "__main__":
main()

View file

@ -27,8 +27,8 @@
"public/js/lib/slickgrid/slick.grid.css",
"public/js/lib/slickgrid/slick-default-theme.css",
"public/css/tag-it.css",
"public/css/bootstrap.css",
"public/css/tag-it.css",
"public/css/font-awesome.css",
"public/css/octicons/octicons.css",
"public/css/desk.css",

View file

@ -179,6 +179,12 @@ em.link-option {
.indicator.orange::before {
background: #ffa00a;
}
.indicator.purple::before {
background: #743ee2;
}
.indicator.darkgrey::before {
background: #b8c2cc;
}
/* listing */
.no-result {
padding: 15px;
@ -250,6 +256,15 @@ em.link-option {
.progress {
height: 10px;
}
.doclist-row {
font-size: 12px;
}
.doclist-row .list-id {
font-size: 14px;
}
.doclist-row .docstatus .octicon {
font-size: 12px;
}
.doclist-row .progress {
margin-top: 12px;
}

View file

@ -860,7 +860,7 @@ frappe.ui.form.ControlLink = frappe.ui.form.ControlData.extend({
frappe.set_route("Form", me.get_options(), value);
});
if(this.only_input) this.$input_area.find(".btn-open").remove();
if(this.only_input) this.$input_area.find(".link-btn").remove();
},
open_advanced_search: function() {
var doctype = this.get_options();

View file

@ -7,7 +7,7 @@ frappe.ui.form.Dashboard = Class.extend({
this.wrapper = $('<div class="form-dashboard"></div>')
.prependTo(this.frm.layout.wrapper);
this.body = $('<div class="row"></div>').appendTo(this.wrapper)
.css("padding", "15px");
.css("padding", "15px 30px");
},
reset: function() {

View file

@ -2,7 +2,7 @@
<div class="toolbar">
<span class="panel-title">
{%= __("Editing Row") %} #<span class="grid-form-row-index"></span></span>
<button class="btn btn-default btn-xs pull-right"style="margin-left: 7px;">
<button class="btn btn-success btn-xs pull-right"style="margin-left: 7px;">
{%= __("Done") %}</button>
<button class="btn btn-default btn-xs pull-right grid-insert-row"
style="margin-left: 7px;">

View file

@ -52,7 +52,7 @@ frappe.ui.form.Layout = Class.extend({
setTimeout(function() {
me.wrapper.find(".empty-form-alert").remove();
if(!(me.wrapper.find(".frappe-control:visible").length)) {
$('<div class="alert alert-info empty-form-alert">'+__("This form does not have any input")+'</div>')
$('<div class="empty-form-alert text-muted" style="margin: 15px; margin-top: -15px;">'+__("This form does not have any input")+'</div>')
.appendTo(me.wrapper)
}
}, 100);

View file

@ -47,11 +47,11 @@ frappe.ui.form.Toolbar = Class.extend({
if(this.frm.meta.is_submittable && !this.frm.doc.__islocal) {
switch(this.frm.doc.docstatus) {
case 0:
return this.page.set_indicator(__("Draft"), "blue");
return this.page.set_indicator(__("Draft"), "red");
case 1:
return this.page.set_indicator(__("Submitted"), "blue");
case 2:
return this.page.set_indicator(__("Cancelled"), "red");
return this.page.set_indicator(__("Cancelled"), "grey");
}
} else {
this.page.clear_indicator();

View file

@ -24,6 +24,7 @@ frappe.views.ListFactory = frappe.views.Factory.extend({
show: function() {
this._super();
this.set_cur_list();
cur_list && cur_list.refresh();
},
set_cur_list: function() {
cur_list = frappe.container.page && frappe.container.page.doclistview;
@ -49,13 +50,8 @@ frappe.views.DocListView = frappe.ui.Listing.extend({
this.make_page();
this.setup();
var me = this;
$(this.parent).on("show", function() {
me.refresh();
});
// refresh on init
me.refresh();
this.refresh();
},
make_page: function() {
@ -246,6 +242,7 @@ frappe.views.DocListView = frappe.ui.Listing.extend({
});
}
this.last_updated_on = new Date();
this.dirty = false;
this._super(more);
},

View file

@ -1,8 +1,8 @@
<div class="row doclist-row">
<div class="col-xs-10">
<div class="col-sm-10 col-xs-8">
{%= main %}
</div>
<div class="col-xs-2 text-right list-row-right">
<div class="col-sm-2 col-xs-4 text-right list-row-right">
{% if (data._assign_list.length) { %}
<span class="filterable"
data-filter="_assign,like,%{%= data._assign_list[data._assign_list.length - 1] %}%">
@ -24,7 +24,10 @@
{%= comment_when(data.modified) %}
{% if (data._tags && data._tags.length) { %}
<span style="margin-right: 10px;" class="list-tag-preview">
{%= data._tags.join(", ") %}
{% for (var i=0, l=data._tags.length; i < l; i++) { %}
<span class="label label-info filterable"
data-filter="_user_tags,like,%{%= data._tags[i] %}%">{%= data._tags[i] %}</span>
{% } %}
</span>
{% } %}
</div>

View file

@ -1,21 +1,23 @@
<div class="row">
<div class="col-sm-{%= subject_cols %}">
<div class="text-ellipsis">{%= subject %}</div>
</div>
{% var total_cols=parseInt(subject_cols); for (var i=0, l=columns.length; i<l; i++ ) {
{% var total_cols=0; for (var i=0, l=columns.length; i < l; i++ ) {
var col = columns[i], value=data[col.fieldname]; total_cols += parseInt(col.colspan); %}
{% if (total_cols < 12) { %}
<div class="col-sm-{%= col.colspan %} list-item-col">
<div class="text-ellipsis">
{% if (col.fieldtype==="Image") { %}
{% if (total_cols <= 12) { %}
<div class="col-sm-{%= col.colspan %}
{% if(col.type==="Subject") { %}col-xs-12{% } else { %}hidden-xs{% } %}
list-item-col text-ellipsis" title="{%= col.title + ": " + value %}">
{% if (col.type==="Subject") { %}
{%= subject %}
{% } else if (col.fieldtype==="Image") { %}
<img src="{%= value %}" style="max-height: 30px; max-width: 100%;">
{% } else if(col.fieldtype==="Select") { %}
<span class="filterable label label-{%= frappe.utils.guess_style(value) %}"
<span class="filterable indicator {%= frappe.utils.guess_colour(value) %}"
data-filter="{%= col.fieldname %},=,{%= value %}">{%= value %}</span>
{% } else if(col.fieldtype==="Link") { %}
<a class="filterable grey"
data-filter="{%= col.fieldname %},=,{%= value %}">{%= value %}</a>
{% } else { %}
{%= frappe.format(value, col) %}
{% } %}
</div>
</div>
{% } %}
{% } %}

View file

@ -1,5 +1,6 @@
<i class="icon-star{% if (_starred_by.indexOf(_user)===-1) { %}-empty{% } %}
icon-fixed-width star-action text-muted"
<i class="icon-star {% if (_starred_by.indexOf(_user)===-1) {
%}text-extra-muted not-starred{% } else { %}{% }%}
icon-fixed-width star-action"
style="margin-right: 10px; cursor: pointer" data-name="{%= _name %}">
</i>
{% if (_checkbox) { %}
@ -8,14 +9,14 @@
<a class="grey list-id" style="margin-right: 10px;"
href="#Form/{%= _doctype_encoded %}/{%= _name_encoded %}" title="{%= _full_title %}">{%= _title %}</a>
{% if (_submittable) { %}
<span class="docstatus filterable" style="margin-right: 10px;" data-filter="docstatus,=,{%= docstatus %}">
<!-- <span class="docstatus filterable" style="margin-right: 10px;" data-filter="docstatus,=,{%= docstatus %}">
{% if (docstatus===0) { %}
<i class="octicon octicon-pencil" title="{%= __("Draft") %}"></i></span>
<i class="octicon octicon-pencil text-muted" title="{%= __("Draft") %}"></i></span>
{% } else if (docstatus===1) { %}
<i class="octicon octicon-lock" title="{%= __("Submitted") %}"></i></span>
<i class="octicon octicon-lock text-muted" title="{%= __("Submitted") %}"></i></span>
{% } else if (docstatus===2) { %}
<i class="octicon octicon-x" title="{%= __("Cancelled") %}"></i></span>
{% } %}
<i class="octicon octicon-x text-muted" title="{%= __("Cancelled") %}"></i></span>
{% } %} -->
{% } %}
{% if (_workflow && !_without_workflow) { %}
<span class="label label-{%= _workflow.style %} filterable"

View file

@ -99,7 +99,12 @@ frappe.views.ListView = Class.extend({
},
set_columns: function() {
this.columns = [];
this.total_colspans = 0;
this.total_colspans = 4;
this.columns.push({
colspan: 4,
type: "Subject"
});
var me = this;
if(this.workflow_state_fieldname) {
this.columns.push({
@ -107,6 +112,7 @@ frappe.views.ListView = Class.extend({
content: this.workflow_state_fieldname,
type:"select"
});
this.total_colspans += 3;
}
// overridden
@ -136,28 +142,24 @@ frappe.views.ListView = Class.extend({
}
var empty_cols = flt(12 - this.total_colspans);
this.shift_right = cint(empty_cols * 0.6667);
if(this.shift_right < 0) {
this.shift_right = 0;
} else if (this.shift_right > 1) {
// expand each column so that it fills up empty_cols
$.each(this.columns, function(i, c) {
c.colspan = cint(empty_cols / me.columns.length) + cint(c.colspan);
})
while(empty_cols > 0) {
for(var i=0, l=this.columns.length; i < l && empty_cols > 0; i++) {
this.columns[i].colspan = cint(this.columns[i].colspan) + 1;
empty_cols = empty_cols - 1;
}
}
},
add_column: function(df) {
// field width
var colspan = "3";
var colspan = 3;
if(in_list(["Int", "Percent", "Select"], df.fieldtype)) {
colspan = "2";
colspan = 2;
} else if(df.fieldtype=="Check") {
colspan = "1";
colspan = 1;
} else if(in_list(["name", "subject", "title"], df.fieldname)) { // subjects are longer
colspan = "4";
colspan = 4;
} else if(df.fieldtype=="Text Editor" || df.fieldtype=="Text") {
colspan = "4";
colspan = 4;
}
this.total_colspans += parseInt(colspan);
this.columns.push({
@ -194,7 +196,6 @@ frappe.views.ListView = Class.extend({
data: data,
columns: this.columns,
subject: this.get_avatar_and_id(data, true),
subject_cols: 4 + this.shift_right
});
}

View file

@ -178,21 +178,30 @@ frappe.utils = {
// test regExp if not null
return '' !== val ? regExp.test( val ) : false;
},
guess_style: function(text, default_style) {
guess_style: function(text, default_style, _colour) {
var style = default_style || "default";
if(!text)
return style;
if(has_words(["Pending", "Review", "Medium"], text)) {
style = "warning";
} else if(has_words(["Open", "Rejected", "Urgent", "High"], text)) {
style = "danger";
} else if(has_words(["Closed", "Finished", "Converted", "Completed", "Confirmed",
"Approved", "Yes", "Active"], text)) {
style = "success";
} else if(has_words(["Submitted"], text)) {
style = "info";
var colour = "darkgrey";
if(text) {
if(has_words(["Pending", "Review", "Medium"], text)) {
style = "warning";
colour = "orange";
} else if(has_words(["Open", "Rejected", "Urgent", "High"], text)) {
style = "danger";
colour = "red";
} else if(has_words(["Closed", "Finished", "Converted", "Completed", "Confirmed",
"Approved", "Yes", "Active", "Available", "Paid"], text)) {
style = "success";
colour = "green";
} else if(has_words(["Submitted"], text)) {
style = "info";
colour = "blue";
}
}
return style;
return _colour ? colour : style;
},
guess_colour: function(text) {
return frappe.utils.guess_style(text, null, true);
},
sort: function(list, key, compare_type, reverse) {

View file

@ -391,7 +391,6 @@ $.extend(frappe.model, {
d.set_primary_action(__("Rename"), function() {
var args = d.get_values();
if(!args) return;
d.get_input("rename").set_working();
return frappe.call({
method:"frappe.model.rename_doc.rename_doc",
args: {
@ -400,8 +399,8 @@ $.extend(frappe.model, {
"new": args.new_name,
"merge": args.merge
},
btn: d.get_primary_btn(),
callback: function(r,rt) {
d.get_input("rename").done_working();
if(!r.exc) {
$(document).trigger('rename', [doctype, docname,
r.message || args.new_name]);

View file

@ -2,7 +2,7 @@
// MIT License. See license.txt
frappe.ui.toggle_star = function($btn, doctype, name) {
var add = $btn.hasClass("icon-star-empty") ? "Yes" : "No";
var add = $btn.hasClass("not-starred") ? "Yes" : "No";
frappe.call({
method: "frappe.desk.star.toggle_star",
quiet: true,
@ -17,13 +17,13 @@ frappe.ui.toggle_star = function($btn, doctype, name) {
var action_buttons = $(".star-action[data-name='"+ name.replace(/"/g, '\"') +"']");
if(add==="Yes") {
action_buttons.removeClass("icon-star-empty").addClass("icon-star");
action_buttons.removeClass("not-starred").removeClass("text-extra-muted");
} else {
action_buttons.removeClass("icon-star").addClass("icon-star-empty");
action_buttons.addClass("not-starred").addClass("text-extra-muted");
}
// update in locals (form)
var doc = locals[doctype][name];
var doc = locals[doctype] && locals[doctype][name];
if(doc) {
var starred_by = JSON.parse(doc._starred_by || "[]"),
idx = starred_by.indexOf(user);

View file

@ -89,14 +89,18 @@ frappe.views.Container = Class.extend({
if(breadcrumbs.module && breadcrumbs.module != "Desk") {
if(in_list(["Core", "Email", "Custom"], breadcrumbs.module))
breadcrumbs.module = "Setup";
divider();
var module_info = frappe.get_module(breadcrumbs.module),
icon = module_info && module_info.icon,
label = module_info ? module_info.label : breadcrumbs.module;
if(icon) {
icon = '<span class="'+icon+' text-muted"></span> '
if(module_info) {
divider();
if(icon) {
icon = '<span class="'+icon+' text-muted"></span> '
}
$('<li><a href="#Module/'+ breadcrumbs.module +'">'+ icon + __(label) +'</a></li>').appendTo($breadcrumbs);
}
$('<li><a href="#Module/'+ breadcrumbs.module +'">'+ icon + __(label) +'</a></li>').appendTo($breadcrumbs);
}
if(breadcrumbs.doctype) {
divider();

View file

@ -206,6 +206,13 @@ em.link-option {
.indicator.orange::before {
background: @indicator-orange;
}
.indicator.purple::before {
background: @indicator-purple;
}
.indicator.darkgrey::before {
background: @indicator-darkgrey;
}
@ -295,6 +302,18 @@ em.link-option {
height: 10px;
}
.doclist-row {
font-size: 12px;
}
.doclist-row .list-id {
font-size: 14px;
}
.doclist-row .docstatus .octicon {
font-size: 12px;
}
.doclist-row .progress {
margin-top: 12px;
}

View file

@ -11,3 +11,5 @@
@indicator-red: #ff5858;
@indicator-green: #98d85b;
@indicator-orange: #ffa00a;
@indicator-purple: #743ee2;
@indicator-darkgrey: #b8c2cc;

View file

@ -23,3 +23,4 @@ selenium
pdfkit
babel
ipython
html2text