###############################################################################
#
# data.eagle --
#
# Extensible Adaptable Generalized Logic Engine (Eagle)
# Data Utility Package
#
# Copyright (c) 2007-2012 by Joe Mistachkin. All rights reserved.
#
# See the file "license.terms" for information on usage and redistribution of
# this file, and for a DISCLAIMER OF ALL WARRANTIES.
#
# RCS: @(#) $Id: $
#
###############################################################################
#
# NOTE: Use our own namespace here because even though we do not directly
# support namespaces ourselves, we do not want to pollute the global
# namespace if this script actually ends up being evaluated in Tcl.
#
namespace eval ::Eagle {
#
# NOTE: This procedure is used to report errors during the various data
# processing operations. In "strict" mode, these errors are always
# fatal; otherwise, the errors are kept in a global "errors" variable
# for later reporting.
#
proc report { {message ""} {data ""} {strict 0} } {
if {$strict} then {
error [list message $message data $data]
} else {
lappend ::errors [list message $message data $data]
if {[lindex [info level -1] 0] ne "fail"} then {
if {[string length $message] > 0} then {
host result Error [appendArgs \n "message: " $message \n]
}
if {[string length $data] > 0} then {
host result Error [appendArgs \n "data: " $data \n]
}
}
}
}
proc fail { {error ""} } {
report $error "" 0; # NOTE: Non-strict, report only.
if {[string length $error] > 0} then {
putsStdout $error
}
if {[info exists ::usage]} then {
putsStdout $::usage
}
error $error
}
proc getArchitecture {} {
if {[info exists ::tcl_platform(machine)] && \
[string length $::tcl_platform(machine)] > 0} then {
#
# NOTE: Check for the "amd64" (i.e. "x64") architecture.
#
if {$::tcl_platform(machine) eq "amd64"} then {
return x64
}
#
# NOTE: Check for the "ia32_on_win64" (i.e. "WoW64") architecture.
#
if {$::tcl_platform(machine) eq "ia32_on_win64"} then {
return x86
}
#
# NOTE: Check for the "ia64" architecture.
#
if {$::tcl_platform(machine) eq "ia64"} then {
return ia64
}
#
# NOTE: Check for the "intel" (i.e. "x86") architecture.
#
if {$::tcl_platform(machine) eq "intel"} then {
return x86
}
#
# NOTE: We do not support this architecture.
#
putsStdout [appendArgs "Machine \"" $::tcl_platform(machine) \
"\" is unsupported."]
return unknown
}
putsStdout [appendArgs "Machine detection failed."]
return none
}
#
# NOTE: With the native library pre-loading feature and a proper application
# local deployment of System.Data.SQLite, as shown here:
#
# * <bin>\System.Data.SQLite.dll (managed-only core assembly)
# * <bin>\x86\SQLite.Interop.dll (x86 native interop assembly)
# * <bin>\x64\SQLite.Interop.dll (x64 native interop assembly)
#
# -OR-
#
# * <bin>\System.Data.SQLite.dll (managed-only core assembly)
# * <bin>\x86\sqlite3.dll (x86 native library)
# * <bin>\x64\sqlite3.dll (x64 native library)
#
# Using this procedure is no longer necessary.
#
proc setupForSQLite { path } {
#
# NOTE: The toolPath is the directory where the caller is running from.
#
set toolPath $path
putsStdout [appendArgs "Tool path is \"" $toolPath "\"."]
#
# NOTE: The externalsPath is the parent of the application root directory,
# which should be the Externals directory (i.e. the one containing
# the "sqlite3" and "System.Data.SQLite" directories). If this is
# not the case, append "Externals" to the directory and try there.
#
set externalsPath [file dirname $toolPath]
if {[file tail $externalsPath] ne "Externals"} then {
set externalsPath [file join $externalsPath Externals]
}
putsStdout [appendArgs "Externals path is \"" $externalsPath "\"."]
#
# NOTE: This is the list of file names we need to copy into the
# application binary directory. Currently, this includes the
# "sqlite3.dll" and "System.Data.SQLite.dll" files, which are
# necessary when using SQLite from Eagle.
#
set fileNames [list \
[file join $externalsPath sqlite3 [getArchitecture] sqlite3.dll] \
[file join $externalsPath System.Data.SQLite System.Data.SQLite.dll]]
#
# NOTE: The binaryPath is the directory where the application is running
# from.
#
set binaryPath [info binary]
putsStdout [appendArgs "Binary path is \"" $binaryPath "\"."]
#
# NOTE: Attempt to copy each of the files we need to the application
# binary directory. Hopefully, the CLR will be able to load them
# from there.
#
foreach fileName $fileNames {
if {![file exists $fileName]} then {
#
# NOTE: It seems the source file does not exist, skip it.
#
putsStdout [appendArgs "File \"" $fileName "\" does not exist."]
continue
}
set justFileName [file tail $fileName]
set newFileName [file join $binaryPath $justFileName]
if {$justFileName eq "sqlite3.dll"} then {
set magic 0
set error null
if {![object invoke Eagle._Components.Private.FileOps \
CheckPeFileArchitecture $fileName magic error]} then {
#
# NOTE: The "sqlite3.dll" file does not match the current operating
# system architecture (e.g. 32-bit DLL on 64-bit Windows).
#
fail [object invoke $error ToString]
} else {
putsStdout [appendArgs "File \"" $fileName "\" PE magic OK (" \
[string format "0x{0:X}" $magic] ")."]
}
}
if {![file exists $newFileName]} then {
#
# NOTE: The destination file does not exist, copy it.
#
file copy $fileName $newFileName
putsStdout [appendArgs "Copied \"" $fileName "\" to \"" \
$newFileName "\"."]
} else {
#
# NOTE: It seems the destination file already exists, skip it.
#
putsStdout [appendArgs "File \"" $newFileName "\" exists."]
}
}
}
proc showTime { name script } {
putsStdout [appendArgs "\n\nStarted " $name " at " \
[clock format [clock seconds]] .]
set elapsed [time {uplevel 1 $script}]
putsStdout [appendArgs "\n\nStopped " $name " at " \
[clock format [clock seconds]] .]
putsStdout [appendArgs "Completed in " \
$elapsed .]
}
proc haveChannel { name } {
if {![info exists ::haveChannel($name)]} then {
set ::haveChannel($name) \
[expr {[lsearch -exact [file channels] $name] != -1}]
}
return $::haveChannel($name)
}
proc putsStdout { args } {
#
# NOTE: Is the 'stdout' channel available?
#
if {[haveChannel stdout]} then {
#
# NOTE: Do we need to emit a trailing newline?
#
if {[llength $args] == 2 && \
[lindex $args 0] eq "-nonewline"} then {
#
# NOTE: Output the second argument with no newline.
#
catch {
puts -nonewline stdout [lindex $args 1]
flush stdout
}
} else {
#
# NOTE: Output the first argument with a newline.
#
catch {
puts stdout [lindex $args 0]
flush stdout
}
}
} else {
#
# HACK: Since there is no 'stdout' channel, this procedure is
# totally useless; therefore, we simply redefine it to do
# nothing.
#
proc putsStdout { args } {}
}
}
proc readBadFile { fileName {readProc readFile} } {
#
# HACK: This "text" file (as exported by MySQL) has a bad mixture of
# utf-8 and windows-1252 code points in it. At a bare minimum,
# we want to change the utf-8 code points that are used in the
# data as column and row delimiters and change them to valid
# windows-1252 code points.
#
return [string map [list \xC3\xBF \xFF \xC3\xBE \xFE] \
[$readProc $fileName]]
}
#
# WARNING: Do not use this procedure unless you know exactly what it does.
#
proc readUtf8File { fileName } {
set file_id [open $fileName RDONLY]
fconfigure $file_id -encoding utf-8 -translation auto
set result [read $file_id]
close $file_id
return $result
}
proc executeSQLite { fileName sql {strict 0} } {
try {
set connection [sql open -type SQLite \
[subst {Data Source=${fileName}}]]
if {[catch {sql execute $connection $sql} error] != 0} then {
report [appendArgs "sql statement error: " $error] \
[list $sql] $strict
}
} finally {
if {[info exists connection]} then {
sql close $connection; unset connection
}
}
}
proc scanAsciiChars {
fileName {readProc readFile} {startIndex 0} {skip ""} {strict 0} } {
#
# NOTE: Read all the data from the file into memory using the
# specified procedure.
#
# BUGFIX: *PERF* Use a real System.String object here (via an
# opaque object handle) and query each byte in the loop
# below as necessary. This prevents the whole string
# from being needlessly copied back and forth in memory
# repeatedly (i.e. during command invocation).
#
set data [object create -alias String [$readProc $fileName]]
set length [$data Length]
set chunk 1024
putsStdout [appendArgs "Scanning " $length " bytes of data (" \
$chunk " bytes per chunk)...\n"]
for {set index $startIndex} {$index < $length} {incr index} {
#
# NOTE: Grab the byte value of the specified "character" in
# the string from the opaque object handle.
#
set value [string ordinal [$data get_Chars $index] 0]
if {[lsearch -integer $skip $value] != -1} then {
continue
}
if {$value < 32 || $value == 127} then {
report [appendArgs "found control character " $value " (" \
[string format "0x{0:X}" $value] ") at index " $index] "" \
$strict
} elseif {$value > 126} then {
report [appendArgs "found bad character " $value " (" \
[string format "0x{0:X}" $value] ") at index " $index] "" \
$strict
}
if {$index % $chunk == 0} then {
putsStdout -nonewline 0
}
}
}
proc importDelimited {
fileName tableName {readProc readFile} {columnDelimiter \t}
{rowDelimiter \r\n} {strict 0} } {
#
# NOTE: Read all the data from the file into memory using the
# specified procedure.
#
# BUGFIX: *PERF* Use a real System.String object here (via an
# opaque object handle) and query each byte in the loop
# below as necessary. This prevents the whole string
# from being needlessly copied back and forth in memory
# repeatedly (i.e. during command invocation).
#
set data [object create -alias String [$readProc $fileName]]
#
# HACK: Check for a detached header file. This should contain
# exactly one row (including the trailing row delimter)
# with just the column names [separated by the column
# delimiter].
#
set headerFileName [file join [file dirname $fileName] \
[appendArgs header_ [file tail $fileName]]]
if {[file exists $headerFileName]} then {
putsStdout [appendArgs "Found detached header file \"" \
$headerFileName "\" for data file \"" $fileName \
"\", reading..."]
set headerData [object create -alias String [$readProc \
$headerFileName]]
}
#
# NOTE: Split the data using the row delimiter. We use the
# -string option here to allow for the use of a
# multi-character row delimiters. For data containing
# literal cr/lf characters, a non-cr/lf row delimiter
# must be used.
#
set rowDelimiters [object create -alias String\[\] 1]
$rowDelimiters SetValue $rowDelimiter 0
set lines [$data -create -alias Split $rowDelimiters None]
#
# NOTE: Determine how many rows of data there are. There must
# be more than zero to continue.
#
set rowCount [$lines Length]
#
# NOTE: We cannot proceed if there are no rows of data.
#
if {$rowCount == 0} then {
fail "no rows of data"
}
#
# NOTE: If we read the header data from the detached header file,
# use it; otherwise, grab the first line of the data. This
# line must be the header line (i.e. it must contain the
# column names and nothing else).
#
if {[info exists headerData]} then {
set headerLine [$headerData ToString]
#
# NOTE: All data rows are actually data; therefore, start on
# the first row.
#
set rowIndex 0
} else {
set headerLine [$lines GetValue 0]
#
# NOTE: The first data row is really the header line; therefore,
# start on the second row.
#
set rowIndex 1
}
#
# NOTE: We cannot proceed if the header line is empty.
#
if {[string length $headerLine] == 0} then {
fail "invalid file header"
}
putsStdout [appendArgs "\n\nAttempting to import " $rowCount \
" rows starting at index " $rowIndex "...\n"]
#
# NOTE: Unquote the column name (i.e. removes single and
# double quotation marks). Technically, this may
# be too liberal since it will remove all leading
# and trailing single and double quotes; however,
# these are the column names, not data, and should
# not contain any single or double quotes.
#
set unquote [list [list x] { return [string trim $x '\"] }]
#
# NOTE: Split the header line using the column delimiter.
# We use the -string option here to allow for the
# use of a multi-character column delimiter. For
# data containing literal tab characters, a non-tab
# column delimiter must be used.
#
set headerColumns [map \
[split $headerLine $columnDelimiter -string] \
{apply $unquote}]
set columnCount [llength $headerColumns]
#
# NOTE: We cannot proceed if there are no column names.
#
if {$columnCount == 0} then {
fail "no columns in header"
}
#
# NOTE: Using the table name, access the in-memory table
# from the calling context. By convention, the
# variable name will be "<tableName>_rows".
#
upvar 1 [appendArgs $tableName _rows] table
#
# NOTE: Set the necessary metadata used by the export
# procedure into the table array.
#
set table(columns) $headerColumns
set table(imported) 0
set table(startIndex) $rowIndex
#
# NOTE: There is no loop initializer here, see above.
#
for {} {$rowIndex < $rowCount} {incr rowIndex} {
#
# NOTE: Grab the previous line of data, if available.
#
if {$rowIndex > 0} then {
set previousLine [$lines GetValue [expr {$rowIndex - 1}]]
} else {
set previousLine ""
}
#
# NOTE: Grab the current line of data.
#
set line [$lines GetValue $rowIndex]
#
# NOTE: Is the current line of data empty?
#
if {[string length $line] == 0} then {
#
# NOTE: We do not care if the final row of data is
# empty.
#
if {$rowIndex + 1 < $rowCount} then {
#
# NOTE: An empty row of data could indicate a corrupt
# data dump; however, it is almost always safe
# to simply ignore blank lines.
#
report [appendArgs "row #" $rowIndex " is empty"] \
[list previous $previousLine] $strict
}
continue
}
#
# NOTE: Split the current line using the column delimiter.
# We use the -string option here to allow for the
# use of a multi-character column delimiter. For
# data containing literal tab characters, a non-tab
# column delimiter must be used.
#
set columns [split $line $columnDelimiter -string]
set count [llength $columns]
#
# NOTE: Did we find some columns in the current line of
# data? Given the check for an empty line of data
# above, this should almost always succeed.
#
if {$count == 0} then {
#
# NOTE: A row of data with no columns could indicate a
# corrupt data dump.
#
report [appendArgs "row #" $rowIndex " has no columns"] \
[list previous $previousLine current $line] $strict
continue
}
#
# NOTE: Does the current line of data contain the correct
# number of columns (based on the header)? If the
# data dump is subtly corrupted in some way, this
# is the most likely error to be seen.
#
if {$count != $columnCount} then {
#
# NOTE: A row of data with an incorrect number of columns
# almost certainly indicates at least some level of
# data dump corruption. We can ignore it and proceed;
# however, each of these errors should be individually
# investigated at the very least.
#
report [appendArgs "row #" $rowIndex " has " $count \
" columns, expected " $columnCount] [list current $line] $strict
continue
}
#
# NOTE: Process each column value for this row and add it
# to the in-memory table.
#
set columnIndex 0
for {} {$columnIndex < $count} {incr columnIndex} {
set columnName [lindex $headerColumns $columnIndex]
set columnValue [lindex $columns $columnIndex]
#
# NOTE: Is the first character a single or double quote?
#
if {[string index $columnValue 0] eq "'" || \
[string index $columnValue 0] eq "\""} then {
#
# NOTE: Ok, remove the first character.
#
set columnValue [string range $columnValue 1 end]
}
#
# NOTE: Is the last character a single or double quote?
#
if {[string index $columnValue end] eq "'" || \
[string index $columnValue end] eq "\""} then {
#
# NOTE: Ok, remove the last character.
#
set columnValue [string range $columnValue 0 end-1]
}
set table($rowIndex,$columnName) $columnValue
}
incr table(imported)
putsStdout -nonewline .
}
set table(count) $table(imported)
}
proc exportFixedLength {
tableName fileName fields {maps ""} {regsubs ""} {strict 0} } {
#
# NOTE: Using the table name, access the in-memory table
# from the calling context. By convention, the
# variable name will be "<tableName>_rows".
#
upvar 1 [appendArgs $tableName _rows] table
set headerColumns $table(columns)
set columnCount [llength $headerColumns]
#
# NOTE: So far, we have not exported any rows.
#
set table(exported) 0
#
# NOTE: Grab the necessary metadata from the table array.
#
set rowCount $table(count)
set startIndex $table(startIndex)
set rowIndex $startIndex
putsStdout [appendArgs "\n\nAttempting to export " $rowCount \
" rows starting at index " $rowIndex "...\n"]
#
# NOTE: Process each row in the passed-in array. There is no
# loop initializer here, see above.
#
set data ""
for {} {$rowIndex < $rowCount + $startIndex} {incr rowIndex} {
#
# NOTE: Start out with an empty row value. After all the fields
# are processed, this will be added to the overall data block
# to export.
#
set rowValue ""
#
# NOTE: Process each field in the passed-in list.
#
set fieldIndex 0
for {} {$fieldIndex < [llength $fields]} {incr fieldIndex} {
#
# NOTE: What is the length of this row so far?
#
set rowLength [string length $rowValue]
#
# NOTE: Grab the field [definition] from the list.
#
set field [lindex $fields $fieldIndex]
#
# NOTE: Make sure the field has the required elements.
#
if {[llength $field] < 3} then {
report [appendArgs \
"field #" $fieldIndex " has " [llength $field] \
" elements, expected at least 3"] "" $strict
continue
}
#
# NOTE: Extract and validate the field identifier. This element is
# always required and must be a valid integer.
#
set fieldId [string trim [lindex $field 0]]
if {![string is integer -strict $fieldId]} then {
report [appendArgs \
"field #" $fieldIndex " has an invalid identifier \"" \
$fieldId \"] "" $strict
continue
}
#
# NOTE: Extract and validate the field name. This element is
# always required.
#
set fieldName [string trim [lindex $field 1]]
if {[string length $fieldName] == 0} then {
report [appendArgs \
"field #" $fieldIndex " has an empty name"] "" $strict
continue
}
#
# NOTE: Extract and validate the field width. This element is
# always required and must be a valid integer greater than
# zero.
#
set fieldWidth [string trim [lindex $field 2]]
if {![string is integer -strict $fieldWidth]} then {
report [appendArgs \
"field #" $fieldIndex " has an invalid width \"" \
$fieldWidth \"] "" $strict
continue
}
#
# NOTE: The field width must be positive and greater than zero.
#
if {$fieldWidth <= 0} then {
report [appendArgs \
"field #" $fieldIndex " has width \"" $fieldWidth \
"\", which is less than or equal to zero"] "" $strict
continue
}
#
# NOTE: Extract and validate the field start. This element is
# optional; if specified, it must be an integer.
#
set fieldStart [string trim [lindex $field 3]]
if {[string length $fieldStart] == 0} then {
set fieldStart $rowLength
}
if {![string is integer -strict $fieldStart]} then {
report [appendArgs \
"field #" $fieldIndex " has an invalid start \"" \
$fieldStart \"] "" $strict
continue
}
#
# NOTE: The field start cannot occur before the current position in
# the row being built (i.e. fields are always processed in the
# order they occur in the list provided by the caller).
#
if {$fieldStart < $rowLength} then {
report [appendArgs \
"field #" $fieldIndex " cannot start at \"" $fieldStart \
"\", already beyond that point"] "" $strict
continue
}
#
# NOTE: Extract and validate the field alignment. This element is
# optional; if specified, it must be either "left" or "right".
#
set fieldAlignment [string trim [lindex $field 4]]
if {[string length $fieldAlignment] == 0} then {
set fieldAlignment right
}
if {$fieldAlignment ni [list left right]} then {
report [appendArgs \
"field #" $fieldIndex " has an invalid alignment \"" \
$fieldAlignment "\", must be \"left\" or \"right\""] "" \
$strict
continue
}
#
# NOTE: Extract and validate the field type. This element is
# optional; if specified, it must be either "string",
# "number", or "datetime".
#
set fieldType [string trim [lindex $field 5]]
if {[string length $fieldType] == 0} then {
set fieldType string
}
if {$fieldType ni [list string number datetime]} then {
report [appendArgs \
"field #" $fieldIndex " has an invalid type \"" $fieldType \
"\", must be \"string\", \"number\", or \"datetime\""] "" \
$strict
continue
}
#
# NOTE: Extract and validate the field format. This element is
# optional.
#
set fieldFormat [lindex $field 6]; # NOTE: No trim.
#
# NOTE: Search for the column in the list of columns. If it cannot
# be found, use an empty string for the column name and value.
# We cannot simply skip the column because the format string
# may simply be a literal string that does not require the
# column value.
#
set columnIndex [lsearch -exact $headerColumns $fieldName]
if {$columnIndex != -1} then {
set columnName [lindex $headerColumns $columnIndex]
set columnValue $table($rowIndex,$columnName)
} else {
set columnName ""
set columnValue ""
}
#
# HACK: Perform any replacements specified by the caller. This is
# done in two phases. Typically, the first phase is used to
# escape characters (e.g. by converting them to HTML entities)
# and the second phase is [optionally] used to undo any double
# escapes that may have been created during the first phase.
#
if {[llength $maps] > 0} then {
foreach map $maps {
if {[llength $map] > 0} then {
set columnValue [string map $map $columnValue]
}
}
}
#
# HACK: Perform any regular expression replacements specified by the
# caller.
#
if {[llength $regsubs] > 0} then {
foreach regsub $regsubs {
#
# NOTE: Each element in the list must have exactly 2 elements.
# The first element must be the regular expression. The
# second element must be the replacement pattern.
#
if {[llength $regsub] == 2} then {
regsub -all -- [lindex $regsub 0] $columnValue \
[lindex $regsub 1] columnValue
}
}
}
#
# NOTE: Check if an explicit format string was specified. If so,
# use the appropriate formatting command for the data type.
#
if {[string length $fieldFormat] > 0} then {
switch -exact -- $fieldType {
string {
set columnValue [object invoke String Format \
$fieldFormat $columnValue]
}
number {
set columnValue [format $fieldFormat $columnValue]
}
datetime {
if {[string length $columnValue] > 0 && \
![string is integer -strict $columnValue]} then {
#
# NOTE: The value is not an integer; therefore,
# try to scan it as a date and/or time.
#
set columnValue [clock scan $columnValue]
}
set columnValue [clock format $columnValue \
-format $fieldFormat]
}
default {
report [appendArgs \
"field #" $fieldIndex " has bad type \"" \
$fieldAlignment \"] "" $strict
continue
}
}
}
#
# NOTE: Check the formatted column length against the field width.
# If the formatted column length is greater, it must be
# truncated. Otherwise, if the formatted column length is
# less, it must be padded according to the field alignment.
#
set columnLength [string length $columnValue]
if {$columnLength > $fieldWidth} then {
#
# NOTE: Truncate the string; otherwise, it will not fit within
# the field.
#
if {$fieldAlignment eq "left"} then {
set columnValue [string range $columnValue \
[expr {$columnLength - $fieldWidth}] end]
} else {
set columnValue [string range $columnValue \
0 [expr {$fieldWidth - 1}]]
}
report [appendArgs \
"column \"" $columnName "\" value at row #" $rowIndex \
" (length " $columnLength ") exceeds width of field #" \
$fieldIndex " (width " $fieldWidth "), truncated"] "" \
$strict
} else {
set padding [string repeat " " \
[expr {$fieldWidth - $columnLength}]]
if {$fieldAlignment eq "left"} then {
set columnValue [appendArgs $columnValue $padding]
} else {
set columnValue [appendArgs $padding $columnValue]
}
}
#
# NOTE: If this field starts at some point after the end of the
# row value built so far, pad it.
#
if {$fieldStart > $rowLength} then {
set padding [string repeat " " \
[expr {$fieldStart - $rowLength}]]
} else {
set padding ""
}
#
# NOTE: Append the necessary padding and the final column value
# to the row value.
#
append rowValue $padding $columnValue
}
#
# NOTE: Append the row value to the overall data block to export.
#
append data $rowValue
incr table(exported)
putsStdout -nonewline *
}
writeFile $fileName $data
}
proc exportSQLite {
tableName fileName {sql ""} {maps ""} {regsubs ""} {strict 0} } {
#
# NOTE: Export the in-memory table to the specified SQLite
# database file.
#
try {
set connection [sql open -type SQLite \
[subst {Data Source=${fileName}}]]
#
# NOTE: If custom SQL was specified, execute it against
# the database connection now.
#
if {[string length $sql] > 0} then {
sql execute $connection $sql
}
#
# NOTE: Using the table name, access the in-memory table
# from the calling context. By convention, the
# variable name will be "<tableName>_rows".
#
upvar 1 [appendArgs $tableName _rows] table
set headerColumns $table(columns)
set columnCount [llength $headerColumns]
#
# NOTE: Wraps the column name in square brackets.
#
set wrap [list [list x] { return [appendArgs \[ $x \]] }]
#
# NOTE: Build the parameterized SQL statement to execute an
# INSERT (or UPDATE) of a single row.
#
set rowSql [appendArgs \
"INSERT OR REPLACE INTO \[" $tableName "\] (" \
[join [map $headerColumns {apply $wrap}] ,] ") VALUES(" \
[join [lrepeat $columnCount ?] ,] ");"]
#
# NOTE: Build the per-row script to evaluate for adding or
# updating a row in the database.
#
set script {eval sql execute -verbatim \
[list $connection] [list $rowSql] $columnParams}
#
# NOTE: So far, we have not exported any rows.
#
set table(exported) 0
#
# NOTE: Grab the necessary metadata from the table array.
#
set rowCount $table(count)
set startIndex $table(startIndex)
set rowIndex $startIndex
putsStdout [appendArgs "\n\nAttempting to export " $rowCount \
" rows starting at index " $rowIndex "...\n"]
#
# NOTE: Process each row in the passed-in array. There is no loop
# initializer here, see above.
#
for {} {$rowIndex < $rowCount + $startIndex} {incr rowIndex} {
set columnParams [list]
#
# NOTE: Process each column value for this row and add it to the
# list of parameters for the SQL statement to execute.
#
set columnIndex 0
for {} {$columnIndex < $columnCount} {incr columnIndex} {
set columnName [lindex $headerColumns $columnIndex]
set columnValue $table($rowIndex,$columnName)
#
# HACK: Perform any replacements specified by the caller. This is
# done in two phases. Typically, the first phase is used to
# escape characters (e.g. by converting them to HTML entities)
# and the second phase is [optionally] used to undo any double
# escapes that may have been created during the first phase.
#
if {[llength $maps] > 0} then {
foreach map $maps {
if {[llength $map] > 0} then {
set columnValue [string map $map $columnValue]
}
}
}
#
# HACK: Perform any regular expression replacements specified by the
# caller.
#
if {[llength $regsubs] > 0} then {
foreach regsub $regsubs {
#
# NOTE: Each element in the list must have exactly 2 elements.
# The first element must be the regular expression. The
# second element must be the replacement pattern.
#
if {[llength $regsub] == 2} then {
regsub -all -- [lindex $regsub 0] $columnValue \
[lindex $regsub 1] columnValue
}
}
}
#
# HACK: Make dates conform to the format needed by SQLite.
#
if {[regexp -- {^\d{4}/\d{1,2}/\d{1,2}$} $columnValue]} then {
set dateTime [object invoke -alias DateTime Parse $columnValue]
set columnValue [$dateTime ToString yyyy-MM-dd]
}
#
# NOTE: Make sure to omit the parameter value if the column value
# needs to be a literal NULL.
#
set columnParam [list [appendArgs param $columnIndex] String]
if {$columnValue ne "NULL"} then {
lappend columnParam $columnValue
}
#
# NOTE: Add the parameter for this column to the list of parameters
# to pass to the command.
#
lappend columnParams $columnParam
}
#
# NOTE: Evaluate the per-row script used to actually insert or
# update a row in the database. Catch and report on any
# errors we encounter.
#
if {[catch $script error] != 0} then {
report [appendArgs "sql statement error: " $error] \
[list $rowSql $columnParams] $strict
} else {
#
# NOTE: We successfully imported another row.
#
incr table(exported)
}
putsStdout -nonewline *
}
} finally {
if {[info exists connection]} then {
sql close $connection; unset connection
}
}
}
#
# NOTE: Provide the Eagle library package to the interpreter.
#
package provide Eagle.Data \
[expr {[isEagle] ? [info engine PatchLevel] : "1.0"}]
}