import sys, os, re, time
import yaml
from copy import deepcopy
from math import ceil
import tiny_bits
from eon_date import eon_date
from general_lists import list_of_top_level_domains, table_by_month
# AUTHOR and TITLE are always present
#
# TYPE is optional, but better to include it. It provides an explicit
# mechanism for classifying papers. It is not necessarily used to format
# the publication info into a specific reference format.
#
# Possible values:
#
# in_journal = journal article
# in_proceedings = paper in proceedings for
# conf, workshop, symp, etc.
# in_book = contribution to book (usually a chapter)
# in_news = piece in newsletter-type publication
# (e.g. EOS)
# in_thesis = used for co-authors in chapters in thesis
# PhD/MSc/BSc thesis = thesis
# Tech./Sci./Int. report = "technical", "scientific", "internal" report
# (typically used for internal documents)
# abstract = used when only an abstract is available
# (e.g. in EOS)
# book = for whole book (either as author of a whole
# book, or as editor of collection of papers)
# proceedings = for whole proceedings (as editor)
# online = used for material published on-line only
# presentation = oral presentation (usually links to slides)
# unpublished = unpublished material (last resort if none
# of the above fit
# book review = book review
# editorial = editorial (used for solar physics editorials)
#
# AUTHOR : comma separated list of last name and initials
# Name1, A., Name2, B., Name3, C.
# Always has an even number of comma-separated elements
# TITLE : Title of paper
#
g_REF_KEYS = {
'MARKER' : { 'mandatory' : True , 'order' : 0 } ,
'TYPE' : { 'mandatory' : True , 'order' : 1 } ,
'AUTHOR' : { 'mandatory' : False, 'order' : 2 } , # True For bibtex?
'GEOLOC' : { 'mandatory' : False, 'order' : 3 } ,
'AFFIL' : { 'mandatory' : False, 'order' : 4 } ,
'TITLE' : { 'mandatory' : True , 'order' : 5 } , # For bibtex
'EDITOR' : { 'mandatory' : False, 'order' : 6 } ,
'CTITLE' : { 'mandatory' : False, 'order' : 7 } ,
'SERIAL' : { 'mandatory' : False, 'order' : 8 } ,
'CHAPTER' : { 'mandatory' : False, 'order' : 9 } ,
'VOLUME' : { 'mandatory' : False, 'order' : 10 } ,
'PAGE' : { 'mandatory' : False, 'order' : 11 } ,
'ARTICLE' : { 'mandatory' : False, 'order' : 12 } ,
'YEAR' : { 'mandatory' : True , 'order' : 13 } ,
'PUBLISH' : { 'mandatory' : False, 'order' : 14 } ,
'PLACE' : { 'mandatory' : False, 'order' : 15 } ,
'DOI' : { 'mandatory' : False, 'order' : 16 } ,
'URL' : { 'mandatory' : False, 'order' : 17 } ,
'TOPKEY' : { 'mandatory' : False, 'order' : 18 } ,
'FUNDING' : { 'mandatory' : False, 'order' : 19 } ,
'FILE' : { 'mandatory' : False, 'order' : 20 } ,
'ABS' : { 'mandatory' : False, 'order' : 21 } ,
'REMARK' : { 'mandatory' : False, 'order' : 30 } ,
'REMARK0' : { 'mandatory' : False, 'order' : 31 } ,
'REMARK1' : { 'mandatory' : False, 'order' : 32 } ,
'REMARK2' : { 'mandatory' : False, 'order' : 33 } ,
}
# Gives name of calling function
# used to set up message when raising exception
#def whoami(): Moved to tiny_bits
# return sys._getframe(1).f_code.co_name
class ReferenceError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Author( object ):
def __init__(self, timestamp, author):
self.feature = {
'details': author,
}
self.timestamp = timestamp
return
class AuthorList( object ):
def __init__(self, ref_list):
self.atoms = [ Author( x.timestamp, author ) for x in ref_list.atoms for author in x.feature['author'] ]
return
class Reference( object ):
# TODO: Highlight single author with
# TODO: Create btx entry
def __init__(self, hash):
self.hash = hash
# Should be turned into author objects (author,editor)
# and keyword stack object (topkey)???
self.feature = {
'author': self.split_names () , # Can be empty, but never is (would fail validation)
'editor': self.split_names ('EDITOR'), # Can be empty
'topkey': self.split_topkey() , # Can be empty
'remark': self.all_remarks () , # Can be empty
}
self.validate()
# If the month is not specified self.time is set to Jan 1 of the year
# and self.has_month is set to False.
self.has_month = len(hash['YEAR']) == 7 and '-00' not in hash['YEAR']
year = hash['YEAR'] if self.has_month else hash['YEAR'][0:4]+'-01'
self.timestamp = eon_date(year,format='YYYY-MM')
self.bibtex = None
return
def __repr__(self):
str = ''
for key in sorted( g_REF_KEYS.keys(), key=lambda k: g_REF_KEYS[k]['order'] ):
if self.hash.has_key(key):
str += "%10s: %s\n"%(key,self.hash[key])
if self.bibtex != None:
str += "%10s: %s\n"%('BIBTEX',self.bibtex)
return str
#
# 2002-10
# C. Estan and G. Varghese
# New directions in traffic measurement and accounting
# in: SIGCOMM Conf.
# ACM SIGCOMM Computer Communication Review 32 (4), 323-336, October 2002
#Keys: passive-oc48
# Remark: No acknowledgement or reference to dataset
#
#
'
else:
soft_break = '\n'
hard_break = soft_break
# 1. author names
line = self.join_names(key='AUTHOR',html=html_par or html_list)
str += ('%s'%line if html_par else line)+hard_break
# 2. paper title
line = hash['TITLE']
str += ( line if html_par else '"%s"'%line )+hard_break
page_and_year_needed = False
page_and_year_used = False
volume_needed = False
if hash.has_key('CTITLE'):
# 3. conference proceedings/book
str += 'in: '
# 3a. editor(s) of proceedings
if hash.has_key('EDITOR'):
str += self.join_names(key='EDITOR',html=html_par or html_list)+' (ed'+['','s'][len(self.feature['editor']) > 1]+'.), '
# 3b. title of proceedings or book
line = hash['CTITLE']
for cmark in ['Conf. on','Workshop on','Symp. on']:
if cmark in line:
cpos = line.find(cmark)+len(cmark)+1
line = line[0:cpos]+'"'+line[cpos:]+'"'
str += line
# 3c chapter of book
if hash.has_key('CHAPTER'):
line = hash['CHAPTER']
str += ', %s %s'%( line.split('=') if '=' in line else ('Ch.',line) )
# Conf proceedings could still be published in a 'serial' publication, e.g.
# Springer Lecture Notes. If there is, then start a new line for the
# serial publication.
if hash.has_key('SERIAL'):
str += hard_break
page_and_year_needed = True
volume_needed = True
if hash.has_key('SERIAL'):
str += hash['SERIAL']
page_and_year_needed = True
volume_needed = True
# The volume comes after the conf title (if no serial publication is specified),
# or after the serial publication
if volume_needed and hash.has_key('VOLUME'):
line = hash['VOLUME']
str += ' '+line+'' if html_par else ' Vol. '+line
# The page gets printed here if the conf title and/or serial publication
# has been printed already. If these are not present, the page numbers get
# printed with the publ. type after the publisher.
if page_and_year_needed:
if hash.has_key('PAGE') or hash.has_key('ARTICLE'):
line = hash['PAGE'] if hash.has_key('PAGE') else hash['ARTICLE']
str += (
' ' if line[0] == '(' else
', ' if hash.has_key('VOLUME' ) else
', art. ' if hash.has_key('ARTICLE') else
', p. '
)+line
str += '%s %s'%(', '+month[1] if self.has_month else ',',year)+hard_break
page_and_year_used = True
# Start new line with publisher
# If VOLUME is present, and it was not used after CTITLE or SERIAL
# then use it here after PUBLISH (used for thesis and tech report)
if hash.has_key('PUBLISH'):
str += hash['PUBLISH']
if not volume_needed and hash.has_key('VOLUME'):
str += ' ('+hash['VOLUME']+')'
if page_and_year_used:
str += hard_break
# Append page, year and type to publisher if not used yet
if not page_and_year_used:
line = hash['TYPE']
str += '%s %s'%(', '+month[1] if self.has_month else ',',year) + \
(
'; %s %s'%(line,hash['PAGE']) if hash.has_key('PAGE') else
'; %s %s'%(line,hash['ARTICLE']) if hash.has_key('ARTICLE') else
'; %s'%line
) + \
hard_break
if html_par:
str += 'Keys: %s'%', '.join(self.feature['topkey'])+hard_break
if hash.has_key('FUNDING'):
str += 'Funding: %s'%hash['FUNDING']+hard_break
if html_root != '':
if os.path.isfile( os.path.join(html_root,'abstract','%s.html'%hash['MARKER']) ):
str += ''%hash['MARKER']+soft_break
if os.path.isfile( os.path.join(html_root,'pdf_private','%s.pdf'%hash['MARKER']) ):
str += ''%hash['MARKER']+soft_break
elif os.path.isfile( os.path.join(html_root,'pdf_public','%s.pdf'%hash['MARKER']) ):
str += ''%hash['MARKER']+soft_break
if hash.has_key('URL'):
str += ''+soft_break
if hash.has_key('DOI'):
line = hash['DOI']
str += ('' if html_par else 'doi: '+line)+soft_break
if hash.has_key('ABS'):
if html_par:
cols = 80
rows = long(ceil(1.0*len(hash['ABS'])/cols))
str += ''+soft_break+ \
'
' +soft_break
elif not html_list:
str += '--- '+hash['ABS']+soft_break
str += '' if html_par else '
' if html_list else ''
return str+soft_break
def print_bibtex(self, keywords):
hash = self.hash
type = hash['TYPE']
btx_type = 'ARTICLE' if type == 'in_journal' else \
'INPROCEEDINGS' if type == 'in_proceedings' else \
'INPROCEEDINGS' if type == 'in_book' else \
'PHDTHESIS' if type == 'PhD thesis' else \
'MASTERSTHESIS' if type == 'MSc thesis' else \
'TECHREPORT' if type == 'tech. report' else \
'TECHREPORT' if type == 'BSc thesis' else \
'TECHREPORT' if type == 'on line' else \
'TECHREPORT' if type == 'class report' else \
'MISC'
#if ctitle[0] == '=':
# prefix, ctitle = ctitle[1:].split('=')
# Keywords TITLE and YEAR are always present
# (tested in method Reference.validate())
# Keyword AUTHOR is absent only for type='proceedings', i.e. if a
# conf. proceedings is listed as a whole with only the EDITOR field
# present.
str = '@%s{\n%s,\n Author = {%s},\n Title = {{%s}},\n'%(
btx_type,
self.bibtex,
self.join_names(key='AUTHOR' if hash.has_key('AUTHOR') else 'EDITOR',reverse=True,separator=[' and ',' and ']),
hash['TITLE']
)
if btx_type == 'ARTICLE':
# Required fields: author, title, journal, year
# Optional fields: volume, number, pages, month, note, key
#@ARTICLE{
#WZGW2013,
# Author = {Wang, F. and Zhang, Y. and Guo, H. and Wang, C.},
# Title = {{Combating good point set scanning-based self-learning worms by using predators}},
# Journal = {J. Network Security},
# Volume = {15},
# Number = {1},
# Pages = {141-148},
# Note = {},
# Keywords= {Witty Worm},
# Month = {January},
# Year = {2013}
#}
if hash.has_key('SERIAL'):
str += ' Journal = {%s},\n'%hash['SERIAL']
else:
raise ReferenceError( "%s,\n%s\nmandatory key SERIAL missing"%(tiny_bits.whoami(),self.hash) )
if hash.has_key('VOLUME'):
volume = hash['VOLUME']
if '(' in volume and ')' in volume:
str += ' Volume = {%s},\n Number = {%s},\n'%(volume[0:volume.find('(')].strip(),volume[volume.find('(')+1:volume.find(')')].strip())
else:
str += ' Volume = {%s},\n'%volume
if hash.has_key('PAGE'):
str += ' Pages = {%s},\n'%hash['PAGE']
elif btx_type == 'INPROCEEDINGS':
# Required fields: author, title, booktitle, year
# Optional fields: editor, volume/number, series, pages, address, month, organization, publisher, note, key
#@INPROCEEDINGS{
#HDSSP2013,
# Author = {Hofstede, R. and Drago, I. and Sperotto, A. and Sadre, R. and Pras, A.},
# Title = {{Measurement artifacts in netFlow data}},
# BookTitle = {Conf. Passive and Active Measurement (PAM)},
# Keywords= {Passive},
# Month = {March},
# Year = {2013}
#}
if hash.has_key('CTITLE'):
str += ' BookTitle = {%s},\n'%(hash['CTITLE']+(', Ch. '+hash['CHAPTER'] if hash.has_key('CHAPTER') else ''))
else:
raise ReferenceError( "%s,\n%s\nmandatory CTITLE missing"%(tiny_bits.whoami(),self.hash) )
if hash.has_key('EDITOR'):
str += ' Editor = {%s},\n'%self.join_names(key='EDITOR',reverse=True,separator=[' and ',' and '])
if hash.has_key('SERIAL'):
str += ' Series = {%s},\n'%hash['SERIAL']
if hash.has_key('VOLUME'):
str += ' Volume = {%s},\n'%hash['VOLUME']
#volume = hash['VOLUME']
#if '(' in volume and ')' in volume:
# str += ' Volume = {%s},\n Number = {%s},\n'%(volume[0:volume.find('(')].strip(),volume[volume.find('(')+1:volume.find(')')].strip())
#else:
# str += ' Volume = {%s},\n'%volume
if hash.has_key('PAGE'):
str += ' Pages = {%s},\n'%hash['PAGE']
if hash.has_key('PUBLISH'):
str += ' Publisher = {%s},\n'%hash['PUBLISH']
#if hash.has_key('PLACE'):
# str += ' Address = {' +hash['PLACE'] +'},\n'
elif btx_type == 'INBOOK':
# Required fields: author/editor, title, chapter/pages, publisher, year
# Optional fields: volume/number, series, type, address, edition, month, note, key
if hash.has_key('CTITLE'):
str += ' BookTitle = {%s},\n'%(hash['CTITLE']+(', Ch. '+hash['CHAPTER'] if hash.has_key('CHAPTER') else ''))
else:
raise ReferenceError( "%s,\n%s\nmandatory CTITLE missing"%(tiny_bits.whoami(),self.hash) )
if hash.has_key('EDITOR'):
str += ' Editor = {%s},\n'%self.join_names(key='EDITOR',reverse=True,separator=[' and ',' and '])
if hash.has_key('SERIAL'):
str += ' Series = {%s},\n'%hash['SERIAL']
if hash.has_key('VOLUME'):
volume = hash['VOLUME']
if '(' in volume and ')' in volume:
str += ' Volume = {%s},\n Number = {%s},\n'%(volume[0:volume.find('(')].strip(),volume[volume.find('(')+1:volume.find(')')].strip())
else:
str += ' Volume = {%s},\n'%volume
if hash.has_key('PAGE'):
str += ' Pages = {%s},\n'%hash['PAGE']
if hash.has_key('PUBLISH'):
str += ' Publisher = {%s},\n'%hash['PUBLISH']
elif btx_type in ['PHDTHESIS','MASTERSTHESIS']:
# Required fields: author, title, school, year
# Optional fields: type, address, month, note, key
#@PHDTHESIS{
#S2011c,
# Author = {Schear, N.},
# Title = {{Preventing encrypted traffic analysis}},
# School = {Univ. Illinois at Urbana-Champagne},
# Note = {},
# Keywords= {Passive},
# Month = {January},
# Year = {2011}
#}
if hash.has_key('PUBLISH'):
str += ' School = {%s},\n'%hash['PUBLISH']
else:
raise ReferenceError( "%s,\n%s\nmandatory PUBLISH missing"%(tiny_bits.whoami(),self.hash) )
elif btx_type == 'TECHREPORT':
# Required fields: author, title, institution, year
# Optional fields: type, number, address, month, note, key
#@TECHREPORT{
#S2012a,
# Author = {Sherry, J.},
# Title = {{Future architectures for middlebox processing services on the Internet and in the Cloud}},
# Institution = {UC Berkeley},
# Note = {},
# Keywords= {Topology AS Relationships},
# Month = {December},
# Year = {2012}
#}
if hash.has_key('PUBLISH'):
str += ' Institution = {%s},\n'%hash['PUBLISH']
else:
raise ReferenceError( "%s,\n%s\nmandatory PUBLISH missing"%(tiny_bits.whoami(),self.hash) )
elif btx_type == 'MISC':
if hash.has_key('PUBLISH'):
str += ' Institution = {%s},\n'%hash['PUBLISH']
if hash .has_key('DOI'):
str += ' Note = {},\n'%hash['DOI']
elif hash.has_key('URL'):
str += ' Note = {<%s>},\n'%hash['URL']
str += ' Keywords= {%s},\n Month = {%s},\n Year = {%s}\n}'%(
' '.join( keywords.used(self).keys()),
self.timestamp.get(format='Month'),
self.timestamp.get(format='YYYY' )
)
return str
def validate(self):
# Make sure all keys are on the g_REF_KEYS list
bad = [ key for key in self.hash if not g_REF_KEYS.has_key(key) ]
if bad:
print bad
raise ReferenceError( "%s, invalid key(s) '%s'\n%s"%(tiny_bits.whoami(),','.join(bad),self.hash) )
# Check for mandatory keys
mandatory = [ key for key in g_REF_KEYS if g_REF_KEYS[key]['mandatory'] ]
missing = ','.join( [ key for key in mandatory if not self.hash.has_key(key) ] )
if len(missing) > 0:
raise ReferenceError( "%s,\n%s\nmissing types '%s'"%(tiny_bits.whoami(),self.hash,missing) )
return
#+
# NAME:
# Reference.split_names
# PURPOSE:
# Converts AUTHOR or EDITOR fields in reference hash
# to a list of hashes with first and last name of authors
# CALLING SEQUENCE:
# result = ref.split_names( key )
# INPUTS:
# ref Reference object
# OPTIONAL INPUTS:
# key string; 'AUTHOR' or 'EDITOR'
# if omitted then 'AUTHOR' is assumed.
# OUTPUTS:
# results list of hashes with two keys: 'last' and 'first'
# PROCEDURE:
# 'AUTHOR and 'EDITOR' field are stored as a single string
# in ref.hash['AUTHOR'] and ref.hash['EDITOR'] in the form:
# last-1, first-1; last-2, first-2; ... or
# last-1, first-1, last-2, first-2, ...
# The second form is supported for backward compatibility
# (will be phased out at some point).
#-
def split_geoloc(self):
geoloc = []
if self.hash.has_key('GEOLOC'):
line = self.hash['GEOLOC'].split(';')
for entry in line:
if 'unknown' in entry:
loc = { 'country': 'unknown' }
else:
pieces = entry.split(',')
loc = { 'city': pieces[0].strip(), 'country': pieces[-1].strip() }
if len(pieces) == 3:
loc['state'] = pieces[1].strip()
geoloc.append( loc )
return geoloc
def split_names(self, key='AUTHOR'):
author = []
if self.hash.has_key(key):
line = []
for x in self.hash[key].split(';'):
line.extend( x.split(',') )
if len(line)%2 != 0:
raise ReferenceError( '%s, error in %s record\n%s'%(tiny_bits.whoami(),key,self.hash[key]) )
author = [ { 'last': line[i].strip(), 'first': line[i+1].strip() } for i in range(0,len(line),2) ]
if key == 'AUTHOR':
geoloc = self.split_geoloc()
if len(geoloc) == len(author):
for n in range(len(author)):
for k in geoloc[n]:
author[n][k] = geoloc[n][k]
elif len(geoloc) == 1:
for n in range(len(author)):
for k in geoloc[0]:
author[n][k] = geoloc[0][k]
elif len(geoloc) > 0:
raise ReferenceError( '%s,\n%s\n# geolocs does not match # authors'%(tiny_bits.whoami(),self.hash) )
return author
def split_topkey(self):
topkey = []
if self.hash.has_key('TOPKEY'):
topkey = list(set( [ x.strip() for x in self.hash['TOPKEY'].split(',') ] ))
return topkey
def join_names(self, key='AUTHOR', reverse=False, html=False, separator=[', ', ' and ']):
#authors = self.feature['editor'] if key == 'EDITOR' else self.feature['author']
authors = self.split_names(key)
line = ''
for author in authors:
if line != '':
line += separator[ author == authors[-1] ]
first = author['first']
last = author['last' ]
if reverse:
if html:
if '<<' in first: first = first.replace('<<','' )
if '>>' in first: first = first.replace('>>','')
if '<<' in last: last = last.replace('<<','' )
if '>>' in last: last = last.replace('>>','')
line += last+', '+first
else:
if '<<' in last:
if '>>' in last:
last = last.replace('<<','' )
last = last.replace('>>','')
else:
last = last.replace('<<','')
last += ''
if '>>' in first:
if '<<' in first:
first = first.replace('<<','' )
first = first.replace('>>','')
else:
first = first.replace('>>','')
first = ''+first
line += first+' '+last
return line
def all_remarks(self):
return [ self.hash[x] for x in self.hash if 'REMARK' in x ]
def matches_regex( self, regex_map ):
say = tiny_bits.say('Reference.%s'%tiny_bits.whoami())
for key in regex_map:
KEY = key.upper()
if KEY != 'KEY' and KEY != 'CAT':
if not self.hash.has_key(KEY):
return False
#raise ReferenceError( "%s, ref has no key '%s'\n%s'"%(tiny_bits.whoami(),KEY,self.__repr__()) )
val = self.hash[KEY]
m = re.search(regex_map[key],val)
if not m:
say.message( "'%s' does not match '%s'\n%s"%(regex_map[key],val,self.__repr__()), 3 )
return False
self.hash[KEY] = val[0:m.start()]+'<<'+val[m.start():m.end()]+'>>'+val[m.end():]
return True
class ReferenceList():
def __init__(self, input, start_time=None, stop_time=None ):
say = tiny_bits.say('ReferenceList.%s'%tiny_bits.whoami())
if type( input ) == type( "str" ): # Read yaml file
# Read list of references; creates list of reference objects.
# Need way to select subsets
say.say('read '+input)
handle = open( input )
self.atoms = [ Reference( x ) for x in yaml.load_all(handle) ]
handle.close()
self.yaml_file = input
self.print_attribute = dict()
self.has_bibtex = False
elif isinstance( input, (ReferenceList) ):
self.atoms = deepcopy( input.atoms )
self.yaml_file = input.yaml_file
self.print_attribute = input.print_attribute
self.has_bibtex = input.has_bibtex
else: # Neither yaml file, nor dict
raise ReferenceError( "%s, cannot init from %s"%(tiny_bits.whoami(), type( input)) )
say.say('number of references is %s'%len(self.atoms))
self.generate_bibtex()
self.filter_times(start_time, stop_time)
return
def filter_times(self, start_time=None, stop_time=None):
nrefs = len(self.atoms)
# Only retain entries between start and stop time
if start_time != None and not isinstance(start_time, (eon_date)):
start_time = eon_date( start_time )
if stop_time != None and not isinstance(stop_time, (eon_date)):
stop_time = eon_date( stop_time )
self.atoms = [ x for x in self.atoms if x.timestamp.between(start_time,stop_time) ]
if start_time != None or stop_time != None:
say = tiny_bits.say('ReferenceList.%s'%tiny_bits.whoami())
say.say(
'%s/%s references since %s' %(len(self.atoms),nrefs,start_time.get(format='YYYY-MM')) if stop_time == None else \
'%s/%s references before %s' %(len(self.atoms),nrefs, stop_time.get(format='YYYY-MM')) if start_time == None else \
'%s/%s references between %s and %s'%(len(self.atoms),nrefs,start_time.get(format='YYYY-MM'),stop_time.get(format='YYYY-MM'))
)
# Set instance variables
self.start_time = min( [ x.timestamp for x in self.atoms ] ).bop('month') if start_time == None else start_time
self.stop_time = max( [ x.timestamp for x in self.atoms ] ).eop('month') if stop_time == None else stop_time
return
def __repr__(self): # Calls Reference.__repr__ for each Reference object
return '\n'.join( [ '%s'%x.__repr__() for x in self.atoms ] )
def __str__(self): # Calls Reference.__str__ for each Reference object
return '\n'.join( [ '%s'%x.__str__() for x in self.atoms ] )
def len(self):
return len(self.atoms)
def set_html_paragraph(self, value=False ):
self.print_attribute['html_paragraph'] = value;
return
def set_html_list(self, value=False ):
self.print_attribute['html_list'] = value;
return
def set_html_root(self, value='' ):
self.print_attribute['html_root'] = value;
return
def set_html_mode(self, value='' ): # '', 'xinc', 'html'
self.print_attribute['html_mode'] = value;
return
def set_title(self, value='' ):
if value != '':
self.print_attribute['title'] = value;
return
def custom_print(self, html_head='', html_tail=''):
plain_html = self.print_attribute.has_key('html_mode') and self.print_attribute['html_mode'] == 'html'
str = ''
if plain_html:
str += html_head if html_head != '' else \
'\n' + \
'\n' + \
'\n' + \
'\n' + \
'\n' + \
'\n' + \
'
\n'
if len(str) > 0 and 'function TextareaOnOff' not in str:
pos = str.find('')
if pos == -1:
raise ReferenceError( "%s, not found in '%s'"%(tiny_bits.whoami(), str) )
str = str[0:pos]+ \
'\n' + \
str[pos:]
str += '\n'.join( [ '%s'%x.custom_print( self.print_attribute ) for x in self.atoms ] )+'\n'
if plain_html:
str += html_tail if html_tail != '' else "