1 """
2 This file is part of web2py Web Framework (Copyrighted, 2007)
3 Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>
4 License: GPL v2
5 """
6
7 __all__=['SQLDB','SQLField']
8
9 import re, sys, os, types, cPickle, datetime, thread, cStringIO, csv, copy, socket
10
11 try:
12 import hashlib
13 - def hash5(txt): return hashlib.md5(txt).hexdigest()
14 except:
15 import md5
16 - def hash5(txt): return md5.new(txt).hexdigest()
17
18 try: import sqlite3
19 except:
20 try:
21 from pysqlite2 import dbapi2 as sqlite3
22 sys.stderr.write('warning: importing mysqlite3.dbapi2 as sqlite3\n')
23 except:
24 sys.stderr.write('warning: no sqlite3 or dbapi2 driver\n')
25 try: import MySQLdb
26 except: sys.stderr.write('warning: no MySQLdb driver\n')
27 try: import psycopg2
28 except: sys.stderr.write('warning: no psycopg2 driver\n')
29 try:
30 import cx_Oracle
31 sys.stderr.write('warning: support for Oracle is experimental')
32 except: sys.stderr.write('warning: no cx_Oracle driver\n')
33 import portalocker
34 import validators
35
36 sql_locker=thread.allocate_lock()
37 """
38 notes on concurrency....
39 Accessing SQLDB._folders, SQLDB._instances and SQLDB .table files
40 is not thread safe so they are locked with sql_locker
41 Moreover .table files are locked with portalocker to account for multiple
42 parallel processes.
43 """
44
45 """
46 date, time and datetime must be in ISO8601 format: yyyy-mm-dd hh:mm:ss
47 """
48
49
50 SQL_DIALECTS={'sqlite':{'boolean':'CHAR(1)',
51 'string':'CHAR(%(length)s)',
52 'text':'TEXT',
53 'password':'CHAR(%(length)s)',
54 'blob':'BLOB',
55 'upload':'CHAR(64)',
56 'integer':'INTEGER',
57 'double':'DOUBLE',
58 'date':'DATE',
59 'time':'TIME',
60 'datetime':'TIMESTAMP',
61 'id':'INTEGER PRIMARY KEY AUTOINCREMENT',
62 'reference':'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
63 'lower':'LOWER(%(field)s)',
64 'upper':'UPPER(%(field)s)',
65 'is null':'IS NULL',
66 'is not null':'IS NOT NULL',
67 'extract':"web2py_extract('%(name)s',%(field)s)"},
68 'mysql':{'boolean':'CHAR(1)',
69 'string':'VARCHAR(%(length)s)',
70 'text':'TEXT',
71 'password':'VARCHAR(%(length)s)',
72 'blob':'BLOB',
73 'upload':'VARCHAR(64)',
74 'integer':'INT',
75 'double':'DOUBLE',
76 'date':'DATE',
77 'time':'TIME',
78 'datetime':'TIMESTAMP',
79 'id':'INT AUTO_INCREMENT NOT NULL',
80 'reference':'INT NOT NULL, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
81 'lower':'LOWER(%(field)s)',
82 'upper':'UPPER(%(field)s)',
83 'is null':'IS NULL',
84 'is not null':'IS NOT NULL',
85 'extract':'EXTRACT(%(name)s FROM %(field)s)'},
86 'postgres':{'boolean':'CHAR(1)',
87 'string':'VARCHAR(%(length)s)',
88 'text':'TEXT',
89 'password':'VARCHAR(%(length)s)',
90 'blob':'BYTEA',
91 'upload':'VARCHAR(64)',
92 'integer':'INTEGER',
93 'double':'FLOAT8',
94 'date':'DATE',
95 'time':'TIME',
96 'datetime':'TIMESTAMP',
97 'id':'SERIAL PRIMARY KEY',
98 'reference':'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
99 'lower':'LOWER(%(field)s)',
100 'upper':'UPPER(%(field)s)',
101 'is null':'IS NULL',
102 'is not null':'IS NOT NULL',
103 'extract':'EXTRACT(%(name)s FROM %(field)s)'},
104 'oracle':{'boolean':'CHAR(1)',
105 'string':'VARCHAR2(%(length)s)',
106 'text':'CLOB',
107 'password':'VARCHAR2(%(length)s)',
108 'blob':'BLOB',
109 'upload':'VARCHAR2(64)',
110 'integer':'INT',
111 'double':'FLOAT',
112 'date':'DATE',
113 'time':'CHAR(8)',
114 'datetime':'DATE',
115 'id':'NUMBER PRIMARY KEY',
116 'reference':'NUMBER, CONSTRAINT %(field_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
117 'lower':'LOWER(%(field)s)',
118 'upper':'UPPER(%(field)s)',
119 'is null':'IS NULL',
120 'is not null':'IS NOT NULL',
121 'extract':'EXTRACT(%(name)s FROM %(field)s)'}
122 }
123
125 v={'boolean':[],
126 'string':validators.IS_LENGTH(length),
127 'text':[],
128 'password':validators.IS_LENGTH(length),
129 'blob':[],
130 'upload':[],
131 'double':validators.IS_FLOAT_IN_RANGE(-1e100,1e100),
132 'integer':validators.IS_INT_IN_RANGE(-1e100,1e100),
133 'date':validators.IS_DATE(),
134 'time':validators.IS_TIME(),
135 'datetime':validators.IS_DATETIME(),
136 'reference':validators.IS_INT_IN_RANGE(0,1e100)}
137 try: return v[field_type[:9]]
138 except KeyError: return []
139
141 if object is None: return 'NULL'
142 if fieldtype=='boolean':
143 if object and not str(object)[0].upper()=='F': return "'T'"
144 else: return "'F'"
145 if fieldtype[0]=='i': return str(int(object))
146 elif fieldtype[0]=='r': return str(int(object))
147 elif fieldtype=='double': return str(float(object))
148 if isinstance(object,unicode): object=object.encode('utf-8')
149 if fieldtype=='date':
150 if isinstance(object,(datetime.date,datetime.datetime)): object=object.strftime('%Y-%m-%d')
151 else: object=str(object)
152 if dbname=='oracle': return "to_date('%s','yyyy-mm-dd')" % object
153 elif fieldtype=='datetime':
154 if isinstance(object,datetime.datetime): object=object.strftime('%Y-%m-%d %H:%M:%S')
155 elif isinstance(object,datetime.date): object=object.strftime('%Y-%m-%d 00:00:00')
156 else: object=str(object)
157 if dbname=='oracle': return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % object
158 elif fieldtype=='time':
159 if isinstance(object,datetime.time): object=object.strftime('%H:%M:%S')
160 else: object=str(object)
161 else: object=str(object)
162 return "'%s'" % object.replace("'","''").replace('\0','\\0')
163
165 if re.compile('[^0-9a-zA-Z_]').findall(text):
166 raise SyntaxError, 'only [0-9a-zA-Z_] allowed in table and field names'
167 return text
168
170 table={'year':(0,4),'month':(5,7),'day':(8,10),
171 'hour':(11,13),'minutes':(14,16),'seconds':(17,19)}
172 try:
173 i,j=table[lookup]
174 return int(s[i:j])
175 except: return None
176
178 """
179 a dictionary that let you do d['a'] as well as d.a
180 """
183 if self.has_key(key):
184 raise SyntaxError, 'Object exists and cannot be redefined'
185 self[key] = value
187
189 - def __call__(self): return copy.copy(self)
190
191
192
193
194
195
196
198 """
199 an instance of this class represents a database connection
200
201 Example:
202
203 db=SQLDB('sqlite://test.db')
204 db.define_table('tablename',SQLField('fieldname1'),
205 SQLField('fieldname2'))
206
207 """
208
209 _folders={}
210 _instances={}
211 @staticmethod
216
217 @staticmethod
219
220 """ to close cleanly databases in a multithreaded environment """
221 sql_locker.acquire()
222 pid=thread.get_ident()
223 if SQLDB._folders.has_key(pid):
224 del SQLDB._folders[pid]
225 if SQLDB._instances.has_key(pid):
226 instances=SQLDB._instances[pid]
227 while instances:
228 instance=instances.pop()
229 action(instance)
230 instance._connection.close()
231 del SQLDB._instances[pid]
232 sql_locker.release()
233 return
234 @staticmethod
236 if not instances: return
237 instances=enumerate(instances)
238 keys=[]
239 thread_key='%s.%i' % (socket.gethostname(),thread.get_ident())
240 for i,db in instances:
241 keys.append('%s.%i'%(thread_key,i))
242 if not db._dbname=='postgres':
243 raise SyntaxError, "only supported by postgresql"
244 try:
245 for i,db in instances:
246 db._execute("PREPARE TRANSACTION '%s';" % keys[i])
247 except:
248 for i,db in instances:
249 db._execute("ROLLBACK PREPARED '%s';" % keys[i])
250 raise Exception, 'failure to commit distributed transaction'
251 else:
252 for i,db in instances:
253 db._execute("COMMIT PREPARED '%s';" % keys[i])
254 return
255 - def __init__(self,uri='sqlite://dummy.db'):
256 self._uri=uri
257 self['_lastsql']=''
258 self.tables=SQLCallableList()
259 pid=thread.get_ident()
260
261 sql_locker.acquire()
262 if self._folders.has_key(pid): self._folder=self._folders[pid]
263 else: self._folder=self._folders[pid]=''
264 sql_locker.release()
265
266 if self._uri[:9]=='sqlite://':
267 self._dbname='sqlite'
268 if uri[9]!='/':
269 dbpath=os.path.join(self._folder,uri[9:])
270 self._connection=sqlite3.Connection(dbpath)
271 else:
272 self._connection=sqlite3.Connection(uri[9:])
273 self._connection.create_function("web2py_extract",2,
274 sqlite3_web2py_extract)
275 self._cursor=self._connection.cursor()
276 self._execute=lambda *a,**b: self._cursor.execute(*a,**b)
277 elif self._uri[:8]=='mysql://':
278 self._dbname='mysql'
279 m=re.compile('^(?P<user>[^:@]+)(\:(?P<passwd>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self._uri[8:])
280 user=m.group('user')
281 if not user: raise SyntaxError, "User required"
282 passwd=m.group('passwd')
283 if not passwd: passwd=''
284 host=m.group('host')
285 if not host: raise SyntaxError, "Host name required"
286 db=m.group('db')
287 if not db: raise SyntaxError, "Database name required"
288 port=m.group('port')
289 if not port: port='3306'
290 self._connection=MySQLdb.Connection(db=db,
291 user=user,
292 passwd=passwd,
293 host=host,
294 port=int(port),
295 charset='utf8')
296 self._cursor=self._connection.cursor()
297 self._execute=lambda *a,**b: self._cursor.execute(*a,**b)
298 self._execute('SET FOREIGN_KEY_CHECKS=0;')
299 elif self._uri[:11]=='postgres://':
300 self._dbname='postgres'
301 m=re.compile('^(?P<user>[^:@]+)(\:(?P<passwd>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self._uri[11:])
302 user=m.group('user')
303 if not user: raise SyntaxError, "User required"
304 passwd=m.group('passwd')
305 if not passwd: passwd=''
306 host=m.group('host')
307 if not host: raise SyntaxError, "Host name required"
308 db=m.group('db')
309 if not db: raise SyntaxError, "Database name required"
310 port=m.group('port')
311 if not port: port='5432'
312 msg="dbname='%s' user='%s' host='%s' port=%s password='%s'" % (db,user,host,port,passwd)
313 self._connection=psycopg2.connect(msg)
314 self._cursor=self._connection.cursor()
315 self._execute=lambda *a,**b: self._cursor.execute(*a,**b)
316 query='BEGIN;'
317 self['_lastsql']=query
318 self._execute(query)
319 self._execute("SET CLIENT_ENCODING TO 'UNICODE';")
320 elif self._uri[:9]=='oracle://':
321 self._dbname='oracle'
322 self._connection=cx_Oracle.connect(self._uri[9:])
323 self._cursor=self._connection.cursor()
324 self._execute=lambda a: self._cursor.execute(a[:-1])
325 self._execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD';")
326 self._execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
327 else:
328 raise SyntaxError, 'database type not supported'
329 self._translator=SQL_DIALECTS[self._dbname]
330
331 sql_locker.acquire()
332 if self._instances.has_key(pid): self._instances[pid].append(self)
333 else: self._instances[pid]=[self]
334 sql_locker.release()
335 pass
337 if not args.has_key('migrate'): args['migrate']=True
338 if args.keys()!=['migrate']:
339 raise SyntaxError, 'invalid table attribute'
340 tablename=cleanup(tablename)
341 if tablename in dir(self) or tablename[0]=='_':
342 raise SyntaxError, 'invalid table name'
343 if not tablename in self.tables: self.tables.append(tablename)
344 else: raise SyntaxError, "table already defined"
345 t=self[tablename]=SQLTable(self,tablename,*fields)
346 sql_locker.acquire()
347 try:
348 query = t._create(migrate=args['migrate'])
349 except BaseException, e:
350 sql_locker.release()
351 raise e
352 sql_locker.release()
353 return t
361 self['_lastsql']=query
362 self._execute(query)
363 return self._cursor.fetchall()
364
369 s=['%s.%s'%(self.table._tablename,name) for name in self.table.fields]
370 return ', '.join(s)
371
373 """
374 an instance of this class represents a database table
375 Example:
376
377 db=SQLDB(...)
378 db.define_table('users',SQLField('name'))
379 db.users.insert(name='me') # print db.users._insert(...) to see SQL
380 db.users.drop()
381 """
382 - def __init__(self,db,tablename,*fields):
383 self._db=db
384 self._tablename=tablename
385 self.fields=SQLCallableList()
386 self._referenced_by=[]
387 fields=list(fields)
388 fields.insert(0,SQLField('id','id'))
389 for field in fields:
390 self.fields.append(field.name)
391 self[field.name]=field
392 field._tablename=self._tablename
393 field._table=self
394 field._db=self._db
395 self.ALL=SQLALL(self)
397 return '<table %s>' % (self._tablename)
399 fields=[]
400 sql_fields={}
401 for k in self.fields:
402 field=self[k]
403 if field.type[:9]=='reference':
404 referenced=field.type[10:].strip()
405 if not referenced:
406 raise SyntaxError, 'SQLTable: reference to nothing!'
407 if not self._db.has_key(referenced):
408 raise SyntaxError, 'SQLTable: table does not exist'
409 referee=self._db[referenced]
410 ftype=self._db._translator[field.type[:9]] % dict(field_name=field.name,foreign_key=referenced+'(id)',on_delete_action=field.ondelete)
411 if self._tablename in referee.fields:
412 raise SyntaxError, 'SQLField: table name has same name as a field in referenced table'
413 self._db[referenced]._referenced_by.append((self._tablename,field.name))
414 elif not self._db._translator.has_key(field.type):
415 raise SyntaxError, 'SQLField: unkown field type'
416 else:
417 ftype=self._db._translator[field.type] % dict(length=field.length)
418 if not field.type[:9] in ['id','reference']:
419 if field.notnull: ftype+=' NOT NULL'
420 if field.unique: ftype+=' UNIQUE'
421 sql_fields[field.name]=ftype
422 fields.append('%s %s' % (field.name,ftype))
423 other=';'
424 if self._db._dbname=='mysql':
425 fields.append('PRIMARY KEY(id)')
426 other=' ENGINE=InnoDB CHARACTER SET utf8;'
427 fields=',\n\t'.join(fields)
428 query='CREATE TABLE %s(\n\t%s\n)%s' % (self._tablename,fields,other)
429 if not migrate:
430 return query
431 elif isinstance(migrate,str):
432 self._dbt=os.path.join(self._db._folder,migrate)
433 else:
434 self._dbt=os.path.join(self._db._folder,\
435 '%s_%s.table' % (hash5(self._db._uri),self._tablename))
436 logfilename=os.path.join(self._db._folder,'sql.log')
437 logfile=open(logfilename,'a')
438 if not os.access(self._dbt,os.R_OK):
439 logfile.write('timestamp: %s\n' % \
440 datetime.datetime.today().isoformat())
441 logfile.write(query+'\n')
442 self._db['_lastsql']=query
443 self._db._execute(query)
444 if self._db._dbname=='oracle':
445 t=self._tablename
446 self._db._execute('CREATE SEQUENCE %s_sequence START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % t)
447 self._db._execute('CREATE OR REPLACE TRIGGER %s_trigger BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s_sequence.nextval INTO :NEW.id FROM DUAL; END;\n' % (t,t,t))
448 self._db.commit()
449 file=open(self._dbt,'w')
450 portalocker.lock(file, portalocker.LOCK_EX)
451 cPickle.dump(sql_fields,file)
452 file.close()
453 logfile.write('success!\n')
454 else:
455 file=open(self._dbt,'r')
456 portalocker.lock(file, portalocker.LOCK_SH)
457 sql_fields_old=cPickle.load(file)
458 file.close()
459 if sql_fields!=sql_fields_old:
460 self._migrate(sql_fields,sql_fields_old,logfile)
461 return query
462 - def _migrate(self,sql_fields,sql_fields_old,logfile):
463 keys=sql_fields.keys()
464 for key in sql_fields_old.keys():
465 if not key in keys: keys.append(key)
466 for key in keys:
467 if not sql_fields_old.has_key(key):
468 query='ALTER TABLE %s ADD COLUMN %s %s;' % \
469 (self._tablename, key, \
470 sql_fields[key].replace(', ',', ADD '))
471 elif self._db._dbname=='sqlite': query=None
472 elif not sql_fields.has_key(key):
473 query='ALTER TABLE %s DROP COLUMN %s;' % \
474 (self._tablename, key)
475 elif sql_fields[key]!=sql_fields_old[key]:
476
477 t=self._tablename
478 tt=sql_fields[key].replace(', ',', ADD ')
479 query='ALTER TABLE %s ADD %s__tmp %s;\n' % (t,key,tt) +\
480 'UPDATE %s SET %s__tmp=%s;\n' % (t,key,key) +\
481 'ALTER TABLE %s DROP COLUMN %s;\n' % (t,key) +\
482 'ALTER TABLE %s ADD %s %s;\n' % (t,key,tt) +\
483 'UPDATE %s SET %s=%s__tmp;\n' % (t,key,key) +\
484 'ALTER TABLE %s DROP COLUMN %s__tmp;'%(t,key)
485
486 else: query=None
487 if query:
488 logfile.write('timestamp: %s\n' % \
489 datetime.datetime.today().isoformat())
490 logfile.write(query+'\n')
491 self._db['_lastsql']=query
492 self._db._execute(query)
493 if sql_fields.has_key(key): sql_fields_old[key]=sql_fields[key]
494 else: del sql_fields_old[key]
495 logfile.write('success!\n')
496 file=open(self._dbt,'w')
497 portalocker.lock(file, portalocker.LOCK_EX)
498 cPickle.dump(sql_fields_old,file)
499 file.close()
504 t=self._tablename
505 if self._db._dbname=='oracle':
506 return ['DROP TABLE %s;' % t,'DROP SEQUENCE %s_sequence;' % t]
507 return ['DROP TABLE %s;' % t]
509 logfile=open(os.path.join(self._db._folder,'sql.log'),'a')
510 queries=self._drop()
511 self._db['_lastsql']='\n'.join(queries)
512 for query in queries:
513 logfile.write(query+'\n')
514 self._db._execute(query)
515 del self._db[self._tablename]
516 del self._db.tables[self._db.tables.index(self._tablename)]
517 self._db.commit()
518 os.unlink(self._dbt)
519 logfile.write('success!\n')
521 fs,vs=[],[]
522 if [key for key in fields.keys() if not key in self.fields]:
523 raise SyntaxError, 'invalid field name'
524 for fieldname in self.fields:
525 if fieldname=='id': continue
526 field=self[fieldname]
527 ft,fd=field.type,field._db._dbname
528 if fields.has_key(fieldname):
529 fs.append(fieldname)
530 value=fields[fieldname]
531 if value!=None and field.type[:9] in ['integer','reference']:
532 value=int(value)
533 if value!=None and field.type=='double':
534 value=float(value)
535 try: vs.append(sql_represent(value.id,ft,fd))
536 except: vs.append(sql_represent(value,ft,fd))
537 elif field.default!=None:
538 fs.append(fieldname)
539 vs.append(sql_represent(field.default,ft,fd))
540 elif field.required is True:
541 raise SyntaxError, 'SQLTable: missing required field'
542 sql_f=', '.join(fs)
543 sql_v=', '.join(vs)
544 sql_t=self._tablename
545 return 'INSERT INTO %s(%s) VALUES (%s);' % (sql_t,sql_f,sql_v)
547 query=self._insert(**fields)
548 self._db['_lastsql']=query
549 self._db._execute(query)
550 if self._db._dbname=='sqlite':
551 id=self._db._cursor.lastrowid
552 elif self._db._dbname=='postgres':
553 self._db._execute("select currval('%s_id_Seq')" % self._tablename)
554 id=int(self._db._cursor.fetchone()[0])
555 elif self._db._dbname=='mysql':
556 self._db._execute("select last_insert_id();")
557 id=int(self._db._cursor.fetchone()[0])
558 elif self._db._dbname=='oracle':
559 t=self._tablename
560 self._db._execute('SELECT %s_sequence.currval FROM dual;' %t)
561 id=int(self._db._cursor.fetchone()[0])
562 else:
563 id=None
564 return id
566 """
567 import records from csv file. Column headers must have same names as
568 table fields. field 'id' is ignored. If column names read 'table.file'
569 the 'table.' prefix is ignored.
570 """
571 reader = csv.reader(file)
572 colnames=None
573 for line in reader:
574 if not colnames:
575 colnames=[x[x.find('.')+1:] for x in line]
576 c=[i for i in xrange(len(line)) if colnames[i]!='id']
577 else:
578 items=[(colnames[i],line[i]) for i in c]
579 self.insert(**dict(items))
580
582 - def __init__(self,name,type='string',db=None):
583 self.name,self.type,self._db=name,type,db
585 return self.name
587 return SQLXorable(str(self)+', '+str(other),None,None)
589 return SQLXorable(str(self)+' DESC',None,None)
590
599
601 return SQLXorable('%s+%s'%(self,other),'float',None)
603 return SQLXorable('%s-%s'%(self,other),'float',None)
605 return SQLXorable('%s*%s'%(self,other),'float',None)
607 return SQLXorable('%s/%s'%(self,other),'float',None)
608
610 """
611 an instance of this class represents a database field
612
613 example:
614
615 a=SQLField(name,'string',length=32,required=False,default=None,requires=IS_NOT_EMPTY(),notnull=False,unique=False)
616
617 to be used as argument of SQLDB.define_table
618
619 allowed field types:
620 string, boolean, integer, double, text, blob,
621 date, time, datetime, upload, password
622
623 strings must have a length or 32 by default.
624 fields should have a default or they will be required in SQLFORMs
625 the requires argument are used to validate the field input in SQLFORMs
626
627 """
628 - def __init__(self,fieldname,type='string',
629 length=32,default=None,required=False,
630 requires=sqlhtml_validators,ondelete='CASCADE',
631 notnull=False,unique=False):
632 self.name=cleanup(fieldname)
633 if fieldname in dir(SQLTable) or fieldname[0]=='_':
634 raise SyntaxError, 'SQLField: invalid field name'
635 if isinstance(type,SQLTable): type='reference '+type._tablename
636 if not length and type=='string': type='text'
637 elif not length and type=='password': length=32
638 self.type=type
639 if type=='upload': length=64
640 self.length=length
641 self.default=default
642 self.required=required
643 self.ondelete=ondelete.upper()
644 self.notnull=notnull
645 self.unique=unique
646 if requires==sqlhtml_validators: requires=sqlhtml_validators(type,length)
647 elif requires is None: requires=[]
648 self.requires=requires
658 s=self._db._translator["lower"] % dict(field=str(self))
659 return SQLXorable(s,'string',self._db)
661 s=self._db._translator["upper"] % dict(field=str(self))
662 return SQLXorable(s,'string',self._db)
664 s=self._db._translator["extract"] % dict(name='year',field=str(self))
665 return SQLXorable(s,'integer',self._db)
667 s=self._db._translator["extract"] % dict(name='month',field=str(self))
668 return SQLXorable(s,'integer',self._db)
670 s=self._db._translator["extract"] % dict(name='day',field=str(self))
671 return SQLXorable(s,'integer',self._db)
673 s=self._db._translator["extract"] % dict(name='hour',field=str(self))
674 return SQLXorable(s,'integer',self._db)
676 s=self._db._translator["extract"] % dict(name='minutes',field=str(self))
677 return SQLXorable(s,'integer',self._db)
679 s=self._db._translator["extract"] % dict(name='seconds',field=str(self))
680 return SQLXorable(s,'integer',self._db)
681 - def __str__(self): return '%s.%s' % (self._tablename,self.name)
682
684 """
685 a query object necessary to define a set.
686 t can be stored or can be passed to SQLDB.__call__() to obtain a SQLSet
687
688 Example:
689 query=db.users.name=='Max'
690 set=db(query)
691 records=set.select()
692 """
693 _t={'==':'=','!=':'<>','<':'<','>':'>','<=':'<=','>=':'>=','like':' LIKE ','belongs':' IN '}
694 - def __init__(self,left,op=None,right=None):
695 if op is None and right is None: self.sql=left
696 elif right is None:
697 if op=='==':
698 self.sql='%s %s' % (left,left._db._translator['is null'])
699 elif op=='!=':
700 self.sql='%s %s' % (left,left._db._translator['is not null'])
701 else: raise SyntaxError, 'do not know what to do'
702 elif op=='belongs':
703 if isinstance(right,str):
704 self.sql='%s%s(%s)'%(left,self._t[op],right[:-1])
705 elif hasattr(right,'__iter__'):
706 r=','.join([sql_represent(i,left.type,left._db) for i in right])
707 self.sql='%s%s(%s)'%(left,self._t[op],r)
708 else: raise SyntaxError, 'do not know what to do'
709 elif right.__class__==SQLField:
710 self.sql='%s%s%s' % (left,self._t[op],right)
711 else:
712 right=sql_represent(right,left.type,left._db._dbname)
713 self.sql='%s%s%s' % (left,self._t[op],right)
718
719 regex_tables=re.compile('(?P<table>[a-zA-Z]\w*)\.')
720
722 items=regex_tables.findall(re.sub("'[^']*'",'',text))
723 tables={}
724 for item in items: tables[item]=True
725 return tables.keys()
726
728 """
729 sn SQLSet represents a set of records in the database,
730 the records are identified by the where=SQLQuery(...) object.
731 normally the SQLSet is generated by SQLDB.__call__(SQLQuery(...))
732
733 given a set, for example
734 set=db(db.users.name=='Max')
735 you can:
736 set.update(db.users.name='Massimo')
737 set.delete() # all elements in the set
738 set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10))
739 and take subsets:
740 subset=set(db.users.id<5)
741 """
743 self._db=db
744 self._tables=[]
745
746 self.sql_w=str(where)
747
748 self._tables=parse_tablenames(self.sql_w)
749
752 - def _select(self,*fields,**attributes):
753 valid_attributes=['orderby','groupby','limitby','required',
754 'default','requires']
755 if [key for key in attributes.keys() if not key in valid_attributes]:
756 raise SyntaxError, 'invalid select attribute'
757
758 if not fields: fields=[self._db[table].ALL for table in self._tables]
759 sql_f=', '.join([str(f) for f in fields])
760 tablenames=parse_tablenames(self.sql_w+' '+sql_f)
761 if len(tablenames)<1: raise SyntaxError, 'SQLSet: no tables selected'
762 sql_t=', '.join(tablenames)
763 self.colnames=[c.strip() for c in sql_f.split(', ')]
764 if self.sql_w: sql_w=' WHERE '+self.sql_w
765 else: sql_w=''
766 sql_o=''
767 if attributes.has_key('groupby'):
768 sql_o+=' GROUP BY %s'% attributes['groupby']
769 if attributes.has_key('orderby'):
770 sql_o+=' ORDER BY %s'% attributes['orderby']
771 if attributes.has_key('limitby'):
772
773 lmin,lmax=attributes['limitby']
774 if self._db._dbname=='oracle':
775 if not attributes.has_key('orderby'):
776 sql_o+=' ORDER BY %s'%', '.join([t+'.id' for t in tablenames])
777 return "SELECT %s FROM (SELECT _tmp.*, ROWNUM _row FROM (SELECT %s FROM %s%s%s) _tmp WHERE ROWNUM<%i ) WHERE _row>=%i;" %(sql_f,sql_f,sql_t,sql_w,sql_o,lmax,lmin)
778 sql_o+=' LIMIT %i OFFSET %i' % (lmax-lmin,lmin)
779 return 'SELECT %s FROM %s%s%s;'%(sql_f,sql_t,sql_w,sql_o)
780 - def select(self,*fields,**attributes):
781 """
782 Always returns a SQLRows object, even if it may be empty
783 """
784 def response(query):
785 self._db['_lastsql']=query
786 self._db._execute(query)
787 return self._db._cursor.fetchall()
788 if not attributes.has_key('cache'):
789 query=self._select(*fields,**attributes)
790 r=response(query)
791 else:
792 cache_model,time_expire=attributes['cache']
793 del attributes['cache']
794 query=self._select(*fields,**attributes)
795 key=self._db._uri+'/'+query
796 r=cache_model(key,lambda:response(query),time_expire)
797 return SQLRows(self._db,r,*self.colnames)
799 if len(self._tables)!=1:
800 raise SyntaxError, 'SQLSet: unable to determine what to delete'
801 tablename=self._tables[0]
802 if self.sql_w: sql_w=' WHERE '+self.sql_w
803 else: sql_w=''
804 return 'DELETE FROM %s%s;' % (tablename,sql_w)
806 query=self._delete()
807 self._db['_lastsql']=query
808 self._db._execute(query)
810 tablenames=self._tables
811 if len(tablenames)!=1:
812 raise SyntaxError, 'SQLSet: unable to determine what to do'
813 tt,fd=self._db[tablenames[0]],self._db._dbname
814 sql_v='SET '+', '.join(['%s=%s' % (field,sql_represent(value,tt[field].type,fd)) for field,value in fields.items()])
815 sql_t=tablenames[0]
816 if self.sql_w: sql_w=' WHERE '+self.sql_w
817 else: sql_w=''
818 return 'UPDATE %s %s%s;' % (sql_t,sql_v,sql_w)
820 query=self._update(**fields)
821 self._db['_lastsql']=query
822 self._db._execute(query)
823
825 s.update(**a)
826 for key,value in a.items(): t[str(key)]=value
827
829
830 """
831 A wrapper for the retun value of a select. It basically represents a table.
832 It has an iterator and each row is represented as a dictionary.
833 """
834 - def __init__(self,db,response,*colnames):
835 self._db=db
836 self.colnames=colnames
837 self.response=response
839 return len(self.response)
841 if i>=len(self.response) or i<0:
842 raise SyntaxError, 'SQLRows: no such row'
843 if len(self.response[0])!=len(self.colnames):
844 raise SyntaxError, 'SQLRows: internal error'
845 row=SQLStorage()
846 for j in xrange(len(self.colnames)):
847 value=self.response[i][j]
848 if isinstance(value,unicode): value=value.encode('utf-8')
849 packed=self.colnames[j].split('.')
850 try: tablename,fieldname=packed
851 except:
852 if not row.has_key('_extra'): row['_extra']=SQLStorage()
853 row['_extra'][self.colnames[j]]=value
854 continue
855 table=self._db[tablename]
856 field=table[fieldname]
857 if not row.has_key(tablename):
858 row[tablename]=SQLStorage()
859 if field.type[:9]=='reference':
860 referee=field.type[10:].strip()
861 rid=value
862 row[tablename][fieldname]=rid
863
864 elif field.type=='boolean' and value!=None:
865 if value=='T': row[tablename][fieldname]=True
866 else: row[tablename][fieldname]=False
867 elif field.type=='date' and value!=None and not isinstance(value,datetime.date):
868 y,m,d=[int(x) for x in str(value).strip().split('-')]
869 row[tablename][fieldname]=datetime.date(y,m,d)
870 elif field.type=='time' and value!=None and not isinstance(value,datetime.time):
871 time_items=[int(x) for x in str(value).strip().split(':')[:3]]
872 if len(time_items)==3: h,mi,s=time_items
873 else: h,mi,s=time_items+[0]
874 row[tablename][fieldname]=datetime.time(h,mi,s)
875 elif field.type=='datetime' and value!=None and not isinstance(value,datetime.datetime):
876 y,m,d=[int(x) for x in str(value)[:10].strip().split('-')]
877 time_items=[int(x) for x in str(value)[11:].strip().split(':')[:3]]
878 if len(time_items)==3: h,mi,s=time_items
879 else: h,mi,s=time_items+[0]
880 row[tablename][fieldname]=datetime.datetime(y,m,d,h,mi,s)
881 else:
882 row[tablename][fieldname]=value
883 if fieldname=='id':
884 id=row[tablename].id
885 row[tablename].update_record=lambda t=row[tablename], \
886 s=self._db(table.id==id),**a: update_record(t,s,a)
887 for referee_table,referee_name in table._referenced_by:
888 s=self._db[referee_table][referee_name]
889 row[tablename][referee_table]=SQLSet(self._db,s==id)
890 if len(row.keys())==1: return row[row.keys()[0]]
891 return row
893 """
894 iterator over records
895 """
896 for i in xrange(len(self)):
897 yield self[i]
899 """
900 serializes the table into a csv file
901 """
902 s=cStringIO.StringIO()
903 writer = csv.writer(s)
904 writer.writerow(self.colnames)
905 c=len(self.colnames)
906 for i in xrange(len(self)):
907 row=[self.response[i][j] for j in xrange(c)]
908 for k in xrange(c):
909 if isinstance(row[k],unicode): row[k]=row[k].encode('utf-8')
910 writer.writerow(row)
911 return s.getvalue()
913 """
914 serializes the table using sqlhtml.SQLTABLE (if present)
915 """
916 import sqlhtml
917 return sqlhtml.SQLTABLE(self).xml()
918
920 """
921
922 Create a table with all possible field types
923 'sqlite://test.db'
924 'mysql://root:none@localhost/test'
925 'postgres://mdipierro:none@localhost/test'
926
927 >>> if len(sys.argv)<2: db=SQLDB("sqlite://test.db")
928 >>> if len(sys.argv)>1: db=SQLDB(sys.argv[1])
929 >>> tmp=db.define_table('users',\
930 SQLField('stringf','string',length=32,required=True),\
931 SQLField('booleanf','boolean',default=False),\
932 SQLField('passwordf','password',notnull=True),\
933 SQLField('blobf','blob'),\
934 SQLField('uploadf','upload'),\
935 SQLField('integerf','integer',unique=True),\
936 SQLField('doublef','double',unique=True,notnull=True),\
937 SQLField('datef','date',default=datetime.date.today()),\
938 SQLField('timef','time'),\
939 SQLField('datetimef','datetime'),\
940 migrate='test_user.table')
941
942 Insert a field
943
944 >>> db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A',\
945 uploadf=None, integerf=5,doublef=3.14,\
946 datef=datetime.date(2001,1,1),\
947 timef=datetime.time(12,30,15),\
948 datetimef=datetime.datetime(2002,2,2,12,30,15))
949 1
950
951 Drop the table
952
953 >>> db.users.drop()
954
955 Examples of insert, select, update, delete
956
957 >>> tmp=db.define_table('person',\
958 SQLField('name'), \
959 SQLField('birth','date'),\
960 migrate='test_person.table')
961 >>> person_id=db.person.insert(name="Marco",birth='2005-06-22')
962 >>> person_id=db.person.insert(name="Massimo",birth='1971-12-21')
963 >>> len(db().select(db.person.ALL))
964 2
965 >>> me=db(db.person.id==person_id).select()[0] # test select
966 >>> me.name
967 'Massimo'
968 >>> db(db.person.name=='Massimo').update(name='massimo') # test update
969 >>> db(db.person.name=='Marco').delete() # test delete
970
971 Update a single record
972
973 >>> me.update_record(name="Max")
974 >>> me.name
975 'Max'
976
977 Examples of complex search conditions
978
979 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select())
980 1
981 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select())
982 1
983 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select())
984 1
985 >>> me=db(db.person.id==person_id).select(db.person.name)[0]
986 >>> me.name
987 'Max'
988
989 Examples of search conditions using extract from date/datetime/time
990
991 >>> len(db(db.person.birth.month()==12).select())
992 1
993 >>> len(db(db.person.birth.year()>1900).select())
994 1
995
996 Example of usage of NULL
997
998 >>> len(db(db.person.birth==None).select()) ### test NULL
999 0
1000 >>> len(db(db.person.birth!=None).select()) ### test NULL
1001 1
1002
1003 Examples of search consitions using lower, upper, and like
1004
1005 >>> len(db(db.person.name.upper()=='MAX').select())
1006 1
1007 >>> len(db(db.person.name.like('%ax')).select())
1008 1
1009 >>> len(db(db.person.name.upper().like('%AX')).select())
1010 1
1011 >>> len(db(~db.person.name.upper().like('%AX')).select())
1012 0
1013
1014 orderby, groupby and limitby
1015
1016 >>> people=db().select(db.person.name,orderby=db.person.name)
1017 >>> order=db.person.name|~db.person.birth
1018 >>> people=db().select(db.person.name,orderby=order)
1019
1020 >>> people=db().select(db.person.name,orderby=db.person.name,groupby=db.person.name)
1021
1022 >>> people=db().select(db.person.name,orderby=order,limitby=(0,100))
1023
1024 Example of one 2 many relation
1025
1026 >>> tmp=db.define_table('dog', \
1027 SQLField('name'), \
1028 SQLField('birth','date'), \
1029 SQLField('owner',db.person),\
1030 migrate='test_dog.table')
1031 >>> db.dog.insert(name='Snoopy',birth=None,owner=person_id)
1032 1
1033
1034 A simple JOIN
1035
1036 >>> len(db(db.dog.owner==db.person.id).select())
1037 1
1038
1039 Drop tables
1040
1041 >>> db.dog.drop()
1042 >>> db.person.drop()
1043
1044 Example of many 2 many relation and SQLSet
1045
1046 >>> tmp=db.define_table('author',SQLField('name'),\
1047 migrate='test_author.table')
1048 >>> tmp=db.define_table('paper',SQLField('title'),\
1049 migrate='test_paper.table')
1050 >>> tmp=db.define_table('authorship',\
1051 SQLField('author_id',db.author),\
1052 SQLField('paper_id',db.paper),\
1053 migrate='test_authorship.table')
1054 >>> aid=db.author.insert(name='Massimo')
1055 >>> pid=db.paper.insert(title='QCD')
1056 >>> tmp=db.authorship.insert(author_id=aid,paper_id=pid)
1057
1058 Define a SQLSet
1059
1060 >>> authored_papers=db((db.author.id==db.authorship.author_id)&\
1061 (db.paper.id==db.authorship.paper_id))
1062 >>> rows=authored_papers.select(db.author.name,db.paper.title)
1063 >>> for row in rows: print row.author.name, row.paper.title
1064 Massimo QCD
1065
1066 Example of search condition using belongs
1067
1068 >>> set=(1,2,3)
1069 >>> rows=db(db.paper.id.belongs(set)).select(db.paper.ALL)
1070 >>> print rows[0].title
1071 QCD
1072
1073 Example of search condition using nested select
1074
1075 >>> nested_select=db()._select(db.authorship.paper_id)
1076 >>> rows=db(db.paper.id.belongs(nested_select)).select(db.paper.ALL)
1077 >>> print rows[0].title
1078 QCD
1079
1080 Output in csv
1081
1082 >>> str(authored_papers.select(db.author.name,db.paper.title))
1083 'author.name,paper.title\\r\\nMassimo,QCD\\r\\n'
1084
1085 Delete all leftover tables
1086
1087 # >>> SQLDB.distributed_transaction_commit(db)
1088
1089 >>> db.authorship.drop()
1090 >>> db.author.drop()
1091 >>> db.paper.drop()
1092 """
1093
1094 if __name__=='__main__':
1095 import doctest
1096 doctest.testmod()
1097