.\" Automatically generated by Pod::Man 4.14 (Pod::Simple 3.43) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is >0, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{\ . if \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{\ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" ======================================================================== .\" .IX Title "Table 3pm" .TH Table 3pm "2022-11-19" "perl v5.36.0" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" Data::Table \- Data type related to database tables, spreadsheets, CSV/TSV files, HTML table displays, etc. .SH "SYNOPSIS" .IX Header "SYNOPSIS" .Vb 2 \& News: The package now includes "Perl Data::Table Cookbook" (PDF), which may serve as a better learning material. \& To download the free Cookbook, visit https://sites.google.com/site/easydatabase/ \& \& # some cool ways to use Table.pm \& use Data::Table; \& \& $header = ["name", "age"]; \& $data = [ \& ["John", 20], \& ["Kate", 18], \& ["Mike", 23] \& ]; \& $t = Data::Table\->new($data, $header, 0); # Construct a table object with \& # $data, $header, $type=0 (consider \& # $data as the rows of the table). \& print $t\->csv; # Print out the table as a csv file. \& \& $t = Data::Table::fromCSV("aaa.csv"); # Read a csv file into a table object \& ### Since version 1.51, a new method fromFile can automatically guess the correct file format \& # either CSV or TSV file, file with or without a column header line \& # e.g. \& # $t = Data::Table::fromFile("aaa.csv"); \& # is equivalent. \& print $t\->html; # Display a \*(Aqportrait\*(Aq HTML TABLE on web. \& \& use DBI; \& $dbh= DBI\->connect("DBI:mysql:test", "test", "") or die $DBI::errstr; \& my $minAge = 10; \& $t = Data::Table::fromSQL($dbh, "select * from mytable where age >= ?", [$minAge]); \& # Construct a table form an SQL \& # database query. \& \& $t\->sort("age", 0, 0); # Sort by col \*(Aqage\*(Aq,numerical,ascending \& print $t\->html2; # Print out a \*(Aqlandscape\*(Aq HTML Table. \& \& $row = $t\->delRow(2); # Delete the third row (index=2). \& $t\->addRow($row, 4); # Add the deleted row back as fifth row. \& @rows = $t\->delRows([0..2]); # Delete three rows (row 0 to 2). \& $col = $t\->delCol("age"); # Delete column \*(Aqage\*(Aq. \& $t\->addCol($col, "age",2); # Add column \*(Aqage\*(Aq as the third column \& @cols = $t\->delCols(["name","phone","ssn"]); \& # Delete 3 columns at the same time. \& \& $name = $t\->elm(2,"name"); # Element access \& $t2=$t\->subTable([1, 3..4],[\*(Aqage\*(Aq, \*(Aqname\*(Aq]); \& # Extract a sub\-table \& \& $t\->rename("Entry", "New Entry"); # Rename column \*(AqEntry\*(Aq by \*(AqNew Entry\*(Aq \& $t\->replace("Entry", [1..$t\->nofRow()], "New Entry"); \& # Replace column \*(AqEntry\*(Aq by an array of \& # numbers and rename it as \*(AqNew Entry\*(Aq \& $t\->swap("age","ssn"); # Swap the positions of column \*(Aqage\*(Aq \& # with column \*(Aqssn\*(Aq in the table. \& \& $t\->colMap(\*(Aqname\*(Aq, sub {return uc}); # Map a function to a column \& $t\->sort(\*(Aqage\*(Aq,0,0,\*(Aqname\*(Aq,1,0); # Sort table first by the numerical \& # column \*(Aqage\*(Aq and then by the \& # string column \*(Aqname\*(Aq in ascending \& # order \& $t2=$t\->match_pattern(\*(Aq$_\->[0] =~ /^L/ && $_\->[3]<0.2\*(Aq); \& # Select the rows that matched the \& # pattern specified \& $t2=$t\->match_pattern_hash(\*(Aq$_{"Amino acid"} =~ /^L\-a/ && $_{"Grams \e"(a.a.)\e""}<0.2\*(Aq)); \& # use column name in the pattern, method added in 1.62 \& $t2=$t\->match_string(\*(AqJohn\*(Aq); # Select the rows that matches \*(AqJohn\*(Aq \& # in any column \& \& $t2=$t\->clone(); # Make a copy of the table. \& $t\->rowMerge($t2); # Merge two tables \& $t\->colMerge($t2); \& \& $t = Data::Table\->new( # create an employ salary table \& [ \& [\*(AqTom\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 65000], \& [\*(AqJohn\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 75000], \& [\*(AqTom\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 65000], \& [\*(AqJohn\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 75000], \& [\*(AqPeter\*(Aq, \*(Aqmale\*(Aq, \*(AqHR\*(Aq, 85000], \& [\*(AqMary\*(Aq, \*(Aqfemale\*(Aq, \*(AqHR\*(Aq, 80000], \& [\*(AqNancy\*(Aq, \*(Aqfemale\*(Aq, \*(AqIT\*(Aq, 55000], \& [\*(AqJack\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 88000], \& [\*(AqSusan\*(Aq, \*(Aqfemale\*(Aq, \*(AqHR\*(Aq, 92000] \& ], \& [\*(AqName\*(Aq, \*(AqSex\*(Aq, \*(AqDepartment\*(Aq, \*(AqSalary\*(Aq], 0); \& \& sub average { # this is an subroutine calculate mathematical average, ignore NULL \& my @data = @_; \& my ($sum, $n) = (0, 0); \& foreach $x (@data) { \& next unless $x; \& $sum += $x; $n++; \& } \& return ($n>0)?$sum/$n:undef; \& } \& \& $t2 = $t\->group(["Department","Sex"],["Name", "Salary"], [sub {scalar @_}, \e&average], ["Nof Employee", "Average Salary"]); \& # For each (Department,Sex) pair, calculate the number of employees and average salary \& $t2 = $t2\->pivot("Sex", 0, "Average Salary", ["Department"]); \& # Show average salary information in a Department by Sex spreadsheet .Ve .SH "ABSTRACT" .IX Header "ABSTRACT" This perl package uses perl5 objects to make it easy for manipulating spreadsheet data among disk files, database, and Web publishing. .PP A table object contains a header and a two-dimensional array of scalars. Four class methods Data::fromFile, Data::Table::fromCSV, Data::Table::fromTSV, and Data::Table::fromSQL allow users to create a table object from a \s-1CSV/TSV\s0 file or a database \s-1SQL\s0 selection in a snap. .PP Table methods provide basic access, add, delete row(s) or column(s) operations, as well as more advanced sub-table extraction, table sorting, record matching via keywords or patterns, table merging, and web publishing. Data::Table class also provides a straightforward interface to other popular Perl modules such as \s-1DBI\s0 and GD::Graph. .PP The most updated version of the Perl Data::Table Cookbook is available at https://sites.google.com/site/easydatabase/ .PP We use Data::Table instead of Table, because Table.pm has already been used inside PerlQt module in \s-1CPAN.\s0 .SH "INTRODUCTION" .IX Header "INTRODUCTION" A table object has three data members: .ie n .IP "1. $data:" 4 .el .IP "1. \f(CW$data:\fR" 4 .IX Item "1. $data:" a reference to an array of array-references. It's basically a reference to a two-dimensional array. .ie n .IP "2. $header:" 4 .el .IP "2. \f(CW$header:\fR" 4 .IX Item "2. $header:" a reference to a string array. The array contains all the column names. .ie n .IP "3. $type = 1 or 0." 4 .el .IP "3. \f(CW$type\fR = 1 or 0." 4 .IX Item "3. $type = 1 or 0." 1 means that @$data is an array of table columns (fields) (column-based); 0 means that @$data is an array of table rows (records) (row-based); .PP Row\-based/Column\-based are two internal implementations for a table object. E.g., if a spreadsheet consists of two columns lastname and age. In a row-based table, \f(CW$data\fR = [ ['Smith', 29], ['Dole', 32] ]. In a column-based table, \f(CW$data\fR = [ ['Smith', 'Dole'], [29, 32] ]. .PP Two implementations have their pros and cons for different operations. Row-based implementation is better for sorting and pattern matching, while column-based one is better for adding/deleting/swapping columns. .PP Users only need to specify the implementation type of the table upon its creation via Data::Table::new, and can forget about it afterwards. Implementation type of a table should be considered volatile, because methods switch table objects from one type into another internally. Be advised that row/column/element references gained via table::rowRef, table::rowRefs, table::colRef, table::colRefs, or table::elmRef may become stale after other method calls afterwards. .PP For those who want to inherit from the Data::Table class, internal method table::rotate is used to switch from one implementation type into another. There is an additional internal assistant data structure called colHash in our current implementation. This hash table stores all column names and their corresponding column index number as key-value pairs for fast conversion. This gives users an option to use column name wherever a column \s-1ID\s0 is expected, so that user don't have to use table::colIndex all the time. E.g., you may say \&\f(CW$t\fR\->rename('oldColName', 'newColName') instead of \f(CW$t\fR\->rename($t\->colIndex('oldColName'), 'newColIdx'). .SH "DESCRIPTION" .IX Header "DESCRIPTION" .SS "Field Summary" .IX Subsection "Field Summary" .IP "data refto_arrayof_refto_array" 4 .IX Item "data refto_arrayof_refto_array" contains a two-dimensional spreadsheet data. .IP "header refto_array" 4 .IX Item "header refto_array" contains all column names. .IP "type 0/1" 4 .IX Item "type 0/1" 0 is row-based, 1 is column-based, describe the orientation of @$data. .SS "Package Variables" .IX Subsection "Package Variables" .ie n .IP "$Data::Table::VERSION" 4 .el .IP "\f(CW$Data::Table::VERSION\fR" 4 .IX Item "$Data::Table::VERSION" .PD 0 .ie n .IP "@Data::Table::OK" 4 .el .IP "\f(CW@Data::Table::OK\fR" 4 .IX Item "@Data::Table::OK" .PD see table::match_string, table::match_pattern, and table::match_pattern_hash Since 1.62, we recommend you to use \f(CW$table\fR\->{\s-1OK\s0} instead, which is a local array reference. .ie n .IP "@Data::Table::MATCH" 4 .el .IP "\f(CW@Data::Table::MATCH\fR" 4 .IX Item "@Data::Table::MATCH" see table::match_string, table::match_pattern, and table::match_pattern_hash Since 1.67, we return the matched row indices in an array. Data::Table::MATCH is this array reference. Here is an example of setting a max price of 20 to all items with UnitPrice > 20. .Sp .Vb 2 \& $t_product\->match_pattern_hash(\*(Aq$_{UnitPrice} > 20\*(Aq); \& $t_product\->setElm($t_product\->{MATCH}, \*(AqUnitPrice\*(Aq, 20); .Ve .ie n .IP "%Data::Table::DEFAULTS" 4 .el .IP "\f(CW%Data::Table::DEFAULTS\fR" 4 .IX Item "%Data::Table::DEFAULTS" Store default settings, currently it contains \s-1CSV_DELIMITER\s0 (set to ','), \s-1CSV_QUALIFER\s0 (set to '"'), and \s-1OS\s0 (set to 0). see table::fromCSV, table::csv, table::fromTSV, table::tsv for details. .SS "Class Methods" .IX Subsection "Class Methods" Syntax: return_type method_name ( [ parameter [ = default_value ]] [, parameter [ = default_value ]] ) .PP If method_name starts with table::, this is an instance method, it can be used as \f(CW$t\fR\->method( parameters ), where \f(CW$t\fR is a table reference. .PP If method_name starts with Data::Table::, this is a class method, it should be called as Data::Table::method, e.g., \f(CW$t\fR = Data::Table::fromCSV(\*(L"filename.csv\*(R"). .PP Conventions for local variables: .PP .Vb 8 \& colID: either a numerical column index or a column name; \& rowIdx: numerical row index; \& rowIDsRef: reference to an array of column IDs; \& rowIdcsRef: reference to an array of row indices; \& rowRef, colRef: reference to an array of scalars; \& data: ref_to_array_of_ref_to_array of data values; \& header: ref to array of column headers; \& table: a table object, a blessed reference. .Ve .SS "Table Creation" .IX Subsection "Table Creation" .ie n .IP "table Data::Table::new ( $data = [], $header = [], $type = 0, $enforceCheck = 1)" 4 .el .IP "table Data::Table::new ( \f(CW$data\fR = [], \f(CW$header\fR = [], \f(CW$type\fR = 0, \f(CW$enforceCheck\fR = 1)" 4 .IX Item "table Data::Table::new ( $data = [], $header = [], $type = 0, $enforceCheck = 1)" create a new table. It returns a table object upon success, undef otherwise. \&\f(CW$data:\fR points to the spreadsheet data. \&\f(CW$header:\fR points to an array of column names. Before version 1.69, a column name must have at least one non-digit character. Since version 1 \&.69, this is relaxed. Although integer and numeric column names can now be accepted, when accessing a column by integer, it is first interpr eted as a column name. \&\f(CW$type:\fR 0 or 1 for row\-based/column\-based spreadsheet. \&\f(CW$enforceCheck:\fR 1/0 to turn on/off initial checking on the size of each row/column to make sure the data argument indeed points to a valid s tructure. In 1.63, we introduce constants Data::Table::ROW_BASED and Data::Table::COL_BASED as synonyms for \f(CW$type\fR. To create an empty Data::Table, use new Data::Table([], [], Data::Table::ROW_BASED); .ie n .IP "table table::subTable ($rowIdcsRef, $colIDsRef, $arg_ref)" 4 .el .IP "table table::subTable ($rowIdcsRef, \f(CW$colIDsRef\fR, \f(CW$arg_ref\fR)" 4 .IX Item "table table::subTable ($rowIdcsRef, $colIDsRef, $arg_ref)" create a new table, which is a subset of the original. It returns a table object. \&\f(CW$rowIdcsRef:\fR points to an array of row indices (or a true/false row mask array). \&\f(CW$colIDsRef:\fR points to an array of column IDs. The function make a copy of selected elements from the original table. Undefined \f(CW$rowIdcsRef\fR or \f(CW$colIDsRef\fR is interpreted as all rows or all columns. The elements in \f(CW$colIDsRef\fR may be modified as a side effect before version 1.62, fixed in 1.62. If \f(CW$arg_ref\fR\->{useRowMask} is set to 1, \f(CW$rowIdcsRef\fR is a true/false row mask array, where rows marked as \s-1TRUE\s0 will be returned. Row mask array is typically the Data::Table::OK set by match_string/match_pattern/match_pattern_hash methods. .IP "table table::clone" 4 .IX Item "table table::clone" make a clone of the original. It return a table object, equivalent to table::subTable(undef,undef). .ie n .IP "table Data::Table::fromCSV ($name_or_handler, $includeHeader = 1, $header = [""col1"", ... ], {OS=>$Data::Table::DEFAULTS{'\s-1OS\s0'}, delimiter=>$Data::Table::DEFAULTS{'\s-1CSV_DELIMITER\s0'}, qualifier=>$Data::Table::DEFAULTS{'\s-1CSV_QUALIFIER\s0'}, skip_lines=>0, skip_pattern=>undef, encoding=>$Data::Table::DEFAULTS{'\s-1ENCODING\s0'}})" 4 .el .IP "table Data::Table::fromCSV ($name_or_handler, \f(CW$includeHeader\fR = 1, \f(CW$header\fR = [``col1'', ... ], {OS=>$Data::Table::DEFAULTS{'\s-1OS\s0'}, delimiter=>$Data::Table::DEFAULTS{'\s-1CSV_DELIMITER\s0'}, qualifier=>$Data::Table::DEFAULTS{'\s-1CSV_QUALIFIER\s0'}, skip_lines=>0, skip_pattern=>undef, encoding=>$Data::Table::DEFAULTS{'\s-1ENCODING\s0'}})" 4 .IX Item "table Data::Table::fromCSV ($name_or_handler, $includeHeader = 1, $header = [col1, ... ], {OS=>$Data::Table::DEFAULTS{'OS'}, delimiter=>$Data::Table::DEFAULTS{'CSV_DELIMITER'}, qualifier=>$Data::Table::DEFAULTS{'CSV_QUALIFIER'}, skip_lines=>0, skip_pattern=>undef, encoding=>$Data::Table::DEFAULTS{'ENCODING'}})" create a table from a \s-1CSV\s0 file. return a table object. \&\f(CW$name_or_handler:\fR the \s-1CSV\s0 file name or an already opened file handler. If a handler is used, it's not closed upon return. To read from \s-1STDIN,\s0 use Data::Table::fromCSV(\e*STDIN, 1). \&\f(CW$includeHeader:\fR 0 or 1 to ignore/interpret the first line in the file as column names, If it is set to 0, the array in \f(CW$header\fR is used. If \f(CW$header\fR is not supplied, the default column names are \*(L"col1\*(R", \*(L"col2\*(R", ... optional named argument \s-1OS\s0 specifies under which operating system the \s-1CSV\s0 file was generated. 0 for \s-1UNIX, 1\s0 for \s-1PC\s0 and 2 for \s-1MAC.\s0 If not specified, \f(CW$Data::Table::DEFAULTS\fR{'\s-1OS\s0'} is used, which defaults to \s-1UNIX.\s0 Basically linebreak is defined as \*(L"\en\*(R", \*(L"\er\en\*(R" and \*(L"\er\*(R" for three systems, respectively. .Sp optional name argument delimiter and qualifier let user replace comma and double-quote by other meaningful single characters. Exception: if the delimiter or the qualifier is a special symbol in regular expression, you must escape it by '\e'. For example, in order to use pipe symbol as the delimiter, you must specify the delimiter as '\e|'. .Sp optional name argument skip_lines let you specify how many lines in the csv file should be skipped, before the data are interpretted. .Sp optional name argument skip_pattern let you specify a regular expression. Lines that match the regular expression will be skipped. .Sp optional name argument encoding let you specify an encoding method of the csv file. This option is added to fromCSV, fromTSV, fromFile since version 1.69. .Sp The following example reads a \s-1DOS\s0 format \s-1CSV\s0 file and writes a \s-1MAC\s0 format: .Sp .Vb 5 \& $t = Data::Table:fromCSV(\*(AqA_DOS_CSV_FILE.csv\*(Aq, 1, undef, {OS=>1}); \& $t\->csv(1, {OS=>2, file=>\*(AqA_MAC_CSV_FILE.csv\*(Aq}); \& open(SRC, \*(AqA_DOS_CSV_FILE.csv\*(Aq) or die "Cannot open A_DOS_CSV_FILE.csv to read!"; \& $t = Data::Table::fromCSV(\e*SRC, 1); \& close(SRC); .Ve .Sp The following example reads a non-standard \s-1CSV\s0 file with : as the delimiter, ' as the qaulifier .Sp .Vb 10 \& my $s="col_A:col_B:col_C\en1:2, 3 or 5:3.5\enone:\*(Aqone:two\*(Aq:\*(Aqdouble\e", single\*(Aq\*(Aq\*(Aq"; \& open my $fh, "<", \e$s or die "Cannot open in\-memory file\en"; \& my $t_fh=Data::Table::fromCSV($fh, 1, undef, {delimiter=>\*(Aq:\*(Aq, qualifier=>"\*(Aq"}); \& close($fh); \& print $t_fh\->csv; \& # convert to the standard CSV (comma as the delimiter, double quote as the qualifier) \& # col_A,col_B,col_C \& # 1,"2, 3 or 5",3.5 \& # one,one:two,"double"", single\*(Aq" \& print $t\->csv(1, {delimiter=>\*(Aq:\*(Aq, qualifier=>"\*(Aq"}); # prints the csv file use the original definition .Ve .Sp The following example reads bbb.csv file (included in the package) by skipping the first line (skip_lines=>1), then treats any line that starts with '#' (or space comma) as comments (skip_pattern=>'^\es*#'), use ':' as the delimiter. .Sp .Vb 1 \& $t = Data::Table::fromCSV("bbb.csv", 1, undef, {skip_lines=>1, delimiter=>\*(Aq:\*(Aq, skip_pattern=>\*(Aq^\es*#\*(Aq}); .Ve .Sp Use the optional name argument encoding to specify file encoding method. \f(CW$t\fR = Data::Table::fromCSV(\*(L"bbb.csv\*(R", 1, undef, {encoding=>'\s-1UTF\-8\s0'}); .ie n .IP "table table::fromCSVi ($name, $includeHeader = 1, $header = [""col1"", ... ])" 4 .el .IP "table table::fromCSVi ($name, \f(CW$includeHeader\fR = 1, \f(CW$header\fR = [``col1'', ... ])" 4 .IX Item "table table::fromCSVi ($name, $includeHeader = 1, $header = [col1, ... ])" Same as Data::Table::fromCSV. However, this is an instant method (that's what 'i' stands for), which can be inherited. .ie n .IP "table Data::Table::fromTSV ($name, $includeHeader = 1, $header = [""col1"", ... ], {OS=>$Data::Table::DEFAULTS{'\s-1OS\s0'}, skip_lines=>0, skip_pattern=>undef, transform_element=>1, encoding=>$Data::Table::DEFAULTS{'\s-1ENCODING\s0'}})" 4 .el .IP "table Data::Table::fromTSV ($name, \f(CW$includeHeader\fR = 1, \f(CW$header\fR = [``col1'', ... ], {OS=>$Data::Table::DEFAULTS{'\s-1OS\s0'}, skip_lines=>0, skip_pattern=>undef, transform_element=>1, encoding=>$Data::Table::DEFAULTS{'\s-1ENCODING\s0'}})" 4 .IX Item "table Data::Table::fromTSV ($name, $includeHeader = 1, $header = [col1, ... ], {OS=>$Data::Table::DEFAULTS{'OS'}, skip_lines=>0, skip_pattern=>undef, transform_element=>1, encoding=>$Data::Table::DEFAULTS{'ENCODING'}})" create a table from a \s-1TSV\s0 file. return a table object. \&\f(CW$name:\fR the \s-1TSV\s0 file name or an already opened file handler. If a handler is used, it's not closed upon return. To read from \s-1STDIN,\s0 use Data::Table::fromTSV(\e*STDIN, 1). \&\f(CW$includeHeader:\fR 0 or 1 to ignore/interpret the first line in the file as column names, If it is set to 0, the array in \f(CW$header\fR is used. If \f(CW$header\fR is not supplied, the default column names are \*(L"col1\*(R", \*(L"col2\*(R", ... optional named argument \s-1OS\s0 specifies under which operating system the \s-1TSV\s0 file was generated. 0 for \s-1UNIX, 1\s0 for P C and 2 for \s-1MAC.\s0 If not specified, \f(CW$Data::Table::DEFAULTS\fR{'\s-1OS\s0'} is used, which defaults to \s-1UNIX.\s0 Basically linebreak is defined as \*(L"\en\*(R", \*(L"\er\en\*(R" and \*(L"\er\*(R" for three systems, respectively. Exception: if the delimiter or the qualifier is a special symbol in regular expression, you must escape it by '\e'. For example, in order to use pipe symbol as the delimiter, you must specify the delimiter as '\e|'. .Sp optional name argument skip_lines let you specify how many lines in the csv file should be skipped, before the data are interpretted. .Sp optional name argument skip_pattern let you specify a regular expression. Lines that match the regular expression will be skipped. .Sp optional name argument transform_element let you switch on/off \et to tab, \eN to undef (etc.) transformation. See \s-1TSV FORMAT\s0 for details. However, elements are always transformed when export table to tsv format, because not escaping an element containing a tab will be disasterous. .Sp optional name argument encoding enables one to provide an encoding method when open the tsv file. .Sp See similar examples under Data::Table::fromCSV; .Sp Note: read \*(L"\s-1TSV FORMAT\*(R"\s0 section for details. .ie n .IP "table table::fromTSVi ($name, $includeHeader = 1, $header = [""col1"", ... ])" 4 .el .IP "table table::fromTSVi ($name, \f(CW$includeHeader\fR = 1, \f(CW$header\fR = [``col1'', ... ])" 4 .IX Item "table table::fromTSVi ($name, $includeHeader = 1, $header = [col1, ... ])" Same as Data::Table::fromTSV. However, this is an instant method (that's what 'i' stands for), which can be inherited. .ie n .IP "table Data::Table::fromFile ($file_name, $arg_ref = {linesChecked=>2, allowNumericHeader=>0, encoding=>$Data::Table::DEFAULTS{'\s-1ENCODING\s0'}})" 4 .el .IP "table Data::Table::fromFile ($file_name, \f(CW$arg_ref\fR = {linesChecked=>2, allowNumericHeader=>0, encoding=>$Data::Table::DEFAULTS{'\s-1ENCODING\s0'}})" 4 .IX Item "table Data::Table::fromFile ($file_name, $arg_ref = {linesChecked=>2, allowNumericHeader=>0, encoding=>$Data::Table::DEFAULTS{'ENCODING'}})" create a table from a text file. return a table object. \&\f(CW$file_name:\fR the file name (cannot take a file handler). linesChecked: the first number of lines used for guessing the input format. The delimiter will have to produce the same number of columns for these lines. By default only check the first 2 lines, 0 means all lines in the file. \&\f(CW$arg_ref\fR can take additional parameters, such as \s-1OS,\s0 has_header, delimiter, transform_element, etc. Encoding allows one to specify encoding methods used to open the file, which defaults to \s-1UTF\-8.\s0 .Sp fromFile is added after version 1.51. It relies on the following new methods to automatically figure out the correct file format in order to call fromCSV or fromTSV internally: .Sp .Vb 8 \& fromFileGuessOS($file_name, {encoding=>\*(AqUTF\-8\*(Aq}) \& returns integer, 0 for UNIX, 1 for PC, 2 for MAC \& fromFileGetTopLines($file_name, $os, $lineNumber, {encoding=>\*(AqUTF\-8\*(Aq}) # $os defaults to fromFileGuessOS($file_name), if not specified \& returns an array of strings, each string represents each row with linebreak removed. \& fromFileGuessDelimiter($lineArrayRef) # guess delimiter from ",", "\et", ":"; \& returns the guessed delimiter string. \& fromFileIsHeader($line_concent, $delimiter, $allowNumericHeader) # $delimiter defaults to $Data::Table::DEFAULTS{\*(AqCSV_DELIMITER\*(Aq} \& returns 1 or 0. .Ve .Sp It first ask fromFileGuessOS to figure out which \s-1OS\s0 (\s-1UNIX, PC\s0 or \s-1MAC\s0) generated the input file. The fetch the first linesChecked lines using fromFileGetTopLines. It then guesses the best delimiter using fromFileGuessDelimiter, then it checks if the first line looks like a column header row using fromFileIsHeader. Since fromFileGuessOS and fromFileGetTopLines needs to open/close the input file, these methods can only take file name, not file handler. If user specify formatting parameters in \f(CW$arg_ref\fR, the routine will skip the corresponding guess work. At the end, fromFile simply calls either fromCSV or fromTSV with \f(CW$arg_ref\fR forwarded. So if you call fromFile({transform_element=>0}) on a \s-1TSV\s0 file, transform_elment will be passed onto fromTSV calls internally. .Sp fromFileGuessOS finds the linebreak that gives shortest first line (in the priority of \s-1UNIX, PC, MAC\s0 upon tie). fromFileGuessDelimiter works based on the assumption that the correct delimiter will produce equal number of columns for the given rows. If multiple matches, it chooses the delimiter that gives maximum number of columns. If none matches, it returns the default delimiter. fromFileIsHeader works based on the assumption that no column header can be empty or numeric values. However, if we allow numeric column names (especially integer column names), set {allowNumericHeader => 1} .ie n .IP "table Data::Table::fromSQL ($dbh, $sql, $vars)" 4 .el .IP "table Data::Table::fromSQL ($dbh, \f(CW$sql\fR, \f(CW$vars\fR)" 4 .IX Item "table Data::Table::fromSQL ($dbh, $sql, $vars)" create a table from the result of an \s-1SQL\s0 selection query. It returns a table object upon success or undef otherwise. \&\f(CW$dbh:\fR a valid database handler. Typically \f(CW$dbh\fR is obtained from \s-1DBI\-\s0>connect, see \*(L"Interface to Database\*(R" or \s-1DBI\s0.pm. \&\f(CW$sql:\fR an \s-1SQL\s0 query string or a DBI::st object (starting in version 1.61). \&\f(CW$vars:\fR optional reference to an array of variable values, required if \f(CW$sql\fR contains '?'s which need to be replaced by the corresponding variable values upon execution, see \s-1DBI\s0.pm for details. Hint: in MySQL, Data::Table::fromSQL($dbh, 'show tables from test') will also create a valid table object. .Sp Data::Table::fromSQL now can take DBI::st instead of a \s-1SQL\s0 string. This is introduced, so that variable binding (such as \s-1CLOB/BLOB\s0) can be done outside the method, for example: .Sp .Vb 4 \& $sql = \*(Aqinsert into test_table (id, blob_data) values (1, :val)\*(Aq; \& $sth = $dbh\->prepare($sql); \& $sth\->bind_param(\*(Aq:val\*(Aq, $blob, {ora_type => SQLT_BIN}); \& Data::Table::fromSQL($dbh, $sth); .Ve .ie n .IP "table Data::Table::fromSQLi ($dbh, $sql, $vars)" 4 .el .IP "table Data::Table::fromSQLi ($dbh, \f(CW$sql\fR, \f(CW$vars\fR)" 4 .IX Item "table Data::Table::fromSQLi ($dbh, $sql, $vars)" Same as Data::Table::fromSQL. However, this is an instant method (that's what 'i' stands for), whic h can be inherited. .SS "Table Access and Properties" .IX Subsection "Table Access and Properties" .IP "int table::colIndex ($colID)" 4 .IX Item "int table::colIndex ($colID)" translate a column name into its numerical position, the first column has index 0 as in as any perl array. return \-1 for invalid column names. .Sp Since 1.69, we allow integer to be used as a column header. The integer \f(CW$colID\fR will first be checked against column names, if matched, the corresponding column index is returned. E.g., if column name for the 3rd column is \*(L"1\*(R", \fBcolIndex\fR\|(1) will return 2 instead of 1! In such case, if one need to access the second column, one has to access it by column name, i.e., \f(CW$t\fR\->col(($t\->header)[1]). .IP "int table::nofCol" 4 .IX Item "int table::nofCol" return number of columns. .IP "int table::nofRow" 4 .IX Item "int table::nofRow" return number of rows. .IP "int table::lastCol" 4 .IX Item "int table::lastCol" return the index of the last columns, i.e., nofCol \- 1. .IP "int table::lastRow" 4 .IX Item "int table::lastRow" return the index of the last rows, i.e., nofRow \- 1; This is syntax sugar. .Sp .Vb 3 \& # these two are equivalent \& foreach my $i (0 .. $t\->lastRow) \& foreach my $i (0 .. $t\->nofRow \- 1) .Ve .IP "bool table::isEmpty" 4 .IX Item "bool table::isEmpty" return whether the table has any column, introduced in 1.63. .IP "bool table::hasCol($colID)" 4 .IX Item "bool table::hasCol($colID)" returns whether the colID is a table column, introduced in 1.63. .IP "bool table::colName($colNumericIndex)" 4 .IX Item "bool table::colName($colNumericIndex)" returns the column name for a numeric column index, notice the first column has an index of 0. Introduced in 1.68. .ie n .IP "scalar table::elm ($rowIdx, $colID)" 4 .el .IP "scalar table::elm ($rowIdx, \f(CW$colID\fR)" 4 .IX Item "scalar table::elm ($rowIdx, $colID)" return the value of a table element at [$rowIdx, \f(CW$colID\fR], undef if \f(CW$rowIdx\fR or \f(CW$colID\fR is invalid. .ie n .IP "refto_scalar table::elmRef ($rowIdx, $colID)" 4 .el .IP "refto_scalar table::elmRef ($rowIdx, \f(CW$colID\fR)" 4 .IX Item "refto_scalar table::elmRef ($rowIdx, $colID)" return the reference to a table element at [$rowIdx, \f(CW$colID\fR], to allow possible modification. It returns undef for invalid \f(CW$rowIdx\fR or \f(CW$colID\fR. .IP "array table::header ($header)" 4 .IX Item "array table::header ($header)" Without argument, it returns an array of column names. Otherwise, use the new header. .IP "int table::type" 4 .IX Item "int table::type" return the implementation type of the table (row\-based/column\-based) at the time, be aware that the type of a table should be considered as volatile during method calls. .SS "Table Formatting" .IX Subsection "Table Formatting" .IP "string table::csv ($header, {OS=>$Data::Table::DEFAULTS{'\s-1OS\s0'}, file=>undef, delimiter=>$Data::Table::DEFAULTS{'\s-1CSV_DELIMITER\s0'}, qualifier=>$Data::Table::DEFAULTS{'\s-1CSV_QAULIFIER\s0'}})" 4 .IX Item "string table::csv ($header, {OS=>$Data::Table::DEFAULTS{'OS'}, file=>undef, delimiter=>$Data::Table::DEFAULTS{'CSV_DELIMITER'}, qualifier=>$Data::Table::DEFAULTS{'CSV_QAULIFIER'}})" return a string corresponding to the \s-1CSV\s0 representation of the table. \&\f(CW$header\fR controls whether to print the header line, 1 for yes, 0 for no. optional named argument \s-1OS\s0 specifies for which operating system the \s-1CSV\s0 file is generated. 0 for \s-1UNIX, 1\s0 for P C and 2 for \s-1MAC.\s0 If not specified, \f(CW$Data::Table::DEFAULTS\fR{'\s-1OS\s0'} is used. Basically linebreak is defined as \*(L"\en\*(R", \*(L"\er\en\*(R" and \*(L"\er\*(R" for three systems, respectively. if 'file' is given, the csv content will be written into it, besides returning the string. One may specify custom delimiter and qualifier if the other than default are desired. .IP "string table::tsv" 4 .IX Item "string table::tsv" return a string corresponding to the \s-1TSV\s0 representation of the table. \&\f(CW$header\fR controls whether to print the header line, 1 for yes, 0 for no. optional named argument \s-1OS\s0 specifies for which operating system the \s-1TSV\s0 file is generated. 0 for \s-1UNIX, 1\s0 for P C and 2 for \s-1MAC.\s0 If not specified, \f(CW$Data::Table::DEFAULTS\fR{'\s-1OS\s0'} is used. Basically linebreak is defined as \*(L"\en\*(R", \*(L"\er\en\*(R" and \*(L"\er\*(R" for three systems, respectively. if 'file' is given, the tsv content will be written into it, besides returning the string. .Sp Note: read \*(L"\s-1TSV FORMAT\*(R"\s0 section for details. .ie n .IP "string table::html ($colorArrayRef_or_colorHashRef = [""#D4D4BF"",""#ECECE4"",""#CCCC99""], $tag_tbl = {border => '1'}, $tag_tr = {align => 'left'}, $tag_th = {align => 'center'}, $tag_td = {col3 => 'align=""right"" valign=""bottom""', 4 => 'align=""left""'}, $l_portrait = 1, $callback = undef )" 4 .el .IP "string table::html ($colorArrayRef_or_colorHashRef = [``#D4D4BF'',``#ECECE4'',``#CCCC99''], \f(CW$tag_tbl\fR = {border => '1'}, \f(CW$tag_tr\fR = {align => 'left'}, \f(CW$tag_th\fR = {align => 'center'}, \f(CW$tag_td\fR = {col3 => 'align=``right'' valign=``bottom''', 4 => 'align=``left'''}, \f(CW$l_portrait\fR = 1, \f(CW$callback\fR = undef )" 4 .IX Item "string table::html ($colorArrayRef_or_colorHashRef = [#D4D4BF,#ECECE4,#CCCC99], $tag_tbl = {border => '1'}, $tag_tr = {align => 'left'}, $tag_th = {align => 'center'}, $tag_td = {col3 => 'align=right valign=bottom', 4 => 'align=left'}, $l_portrait = 1, $callback = undef )" return a string corresponding to a 'Portrait/Landscape'\-style html-tagged table. \&\f(CW$colorArrayRef_or_colorHashRef:\fR If a hash reference is provided, it will take three \s-1CSS\s0 class names for odd data rows, even data rows and for the header row. The default hash is {even=>\*(L"data_table_even\*(R", odd=>\*(L"data_table_odd\*(R", header=>\*(L"data_table_header\*(R"). If a hash reference is not found, a reference to an array of three color strings is expected to provided for backgrounds for even-row records, odd-row records, and \-der row, respectively. A default color array (\*(L"#D4D4BF\*(R",\*(L"#ECECE4\*(R",\*(L"#CCCC99\*(R") will be used if \f(CW$colors\fR isn't defined. .Sp Since version 1.74, users can prevent default coloring by passing in a color array reference ["\*(L", \*(R"\*(L", \*(R""]. .Sp Before version 1.59, the parameter can only accept an array reference. .Sp \&\f(CW$tag_tbl:\fR a reference to a hash that specifies any legal attributes such as name, border, id, class, etc. for the \s-1TABLE\s0 tag. .Sp \&\f(CW$tag_tr:\fR a reference to a hash that specifies any legal attributes for the \s-1TR\s0 tag. .Sp \&\f(CW$tag_th:\fR a reference to a hash that specifies any legal attributes for the \s-1TH\s0 tag. .Sp \&\f(CW$tag_td:\fR a reference to a hash that specifies any legal attributes for the \s-1TD\s0 tag. .Sp Notice \f(CW$tag_tr\fR and \f(CW$tag_th\fR controls all the rows and columns of the whole table. The keys of the hash are the attribute names in these cases. However, \f(CW$tag_td\fR is column specific, i.e., you should specify \s-1TD\s0 attributes for every column separately. The key of %$tag_td are either column names or column indices, the value is a reference to a hash. E.g., \f(CW$tag_td\fR = {col3 => {'style'=>'background\-color:#cccc99;'}}. However, before version 1.74, the value is the full string to be inserted into the \s-1TD\s0 tag. E.g., \f(CW$tag_td\fR = {col3 => 'align=right valign=bottom} only change the \s-1TD\s0 tag in \*(L"col3\*(R" to be <\s-1TD\s0 align=right valign=bottom>;. This format is still supported for backward compatibility. .Sp \&\f(CW$portrait\fR controls the layout of the table. The default is 1, i.e., the table is shown in the \&\*(L"Portrait\*(R" style, like in Excel. 0 means \*(L"Landscape\*(R". Since version 1.59, tbody and thead tags are added to the portrait mode output. .Sp Since version 1.74, \f(CW$callback\fR is introduced to give users fine control on the tag for each cell, i.e., for each th/td cells. \f(CW$callback\fR is a subroutine reference, where the sub is expected to take parameters ($tag, \f(CW$row_index\fR, \f(CW$col_index\fR, \f(CW$col_name\fR, \f(CW$table\fR), \f(CW$tag\fR is reference to a hash containing existing \s-1TH/TD\s0 tags, the sub will return a new tag. The rest of the parameters give sub access to the identity of the table cell, as well as the table itself. .Sp If the following example, the callback function colors each UnitPrice cell based on whether its value is >=20 or <20. It colors each Discontinued cell based on whether its value is \s-1TRUE\s0 or \s-1FALSE.\s0 One can also control the column header cells, which has row index of \-1. That is the reason we use \*(L"$row >=0 \*(R" within callback to make sure it cell is not a column header. .Sp .Vb 11 \& $t=Data::Table::fromCSV("Data\-Table\-1.74/Product.csv",1,undef, {\*(AqOS\*(Aq=>1}); \& my $callback = sub { \& my ($tag, $row, $col, $colName, $table) = @_; \& if ($row >=0 && $colName eq \*(AqUnitPrice\*(Aq) { \& $tag\->{\*(Aqstyle\*(Aq} = \*(Aqbackground\-color:\*(Aq. (($table\->elm($row, $col)>=20) ? \*(Aq#fc8d59\*(Aq:\*(Aq#91bfdb\*(Aq) . \*(Aq;\*(Aq; \& } \& if ($row >=0 && $colName eq \*(AqDiscontinued\*(Aq) { \& $tag\->{\*(Aqstyle\*(Aq} = \*(Aqbackground\-color:\*(Aq. (($table\->elm($row, $col) eq \*(AqTRUE\*(Aq) ? \*(Aq#999999\*(Aq:\*(Aq#af8dc3\*(Aq) .\*(Aq;\*(Aq; \& } \& return $tag; \& }; \& \& print $t\->html(undef, undef, undef, undef, undef, undef, $callback); .Ve .Sp Attention: You will have to escape HTML-Entities yourself (for example '<' as '<'), if you have characters in you table which need to be escaped. You can do this for example with the escapeHTML-function from \s-1CGI\s0.pm (or the HTML::Entities module). .Sp .Vb 3 \& use CGI qw(escapeHTML); \& [...] \& $t\->colMap($columnname, sub{escapeHTML($_)}); # for every column, where HTML\-Entities occur. .Ve .ie n .IP "string table::html2 ($colors = [""#D4D4BF"",""#ECECE4"",""#CCCC99""], $specs = {'name' => '', 'border' => '1', ...})" 4 .el .IP "string table::html2 ($colors = [``#D4D4BF'',``#ECECE4'',``#CCCC99''], \f(CW$specs\fR = {'name' => '', 'border' => '1', ...})" 4 .IX Item "string table::html2 ($colors = [#D4D4BF,#ECECE4,#CCCC99], $specs = {'name' => '', 'border' => '1', ...})" This method is deprecated. It's here for compatibility. It now simple call html method with \f(CW$portrait\fR = 0, see previous description. .Sp return a string corresponding to a \*(L"Landscape\*(R" html-tagged table. This is useful to present a table with many columns, but very few entries. Check the above table::html for parameter descriptions. .IP "string table::wiki(...)" 4 .IX Item "string table::wiki(...)" This method accepts the same parameters as table::html, returns a wikitable instead. .IP "string table::wiki2(...)" 4 .IX Item "string table::wiki2(...)" This method accepts the same parameters as table::html2, returns a wikitable instead in landscape orientation. .SS "Table Operations" .IX Subsection "Table Operations" .ie n .IP "int table::setElm ($rowIdx, $colID, $val)" 4 .el .IP "int table::setElm ($rowIdx, \f(CW$colID\fR, \f(CW$val\fR)" 4 .IX Item "int table::setElm ($rowIdx, $colID, $val)" modify the value of a table element at [$rowIdx, \f(CW$colID\fR] to a new value \f(CW$val\fR. It returns 1 upon success, undef otherwise. In 1.68, setElm can manipulate multiple elements, i.e., \f(CW$rowIdx\fR and \f(CW$colIdx\fR can be references to an index array, and \fBsetElm()\fR will modifies all cells defined by the grid. .Sp .Vb 2 \& $t\->setElm([0..2], [\*(AqColA\*(Aq, \*(AqColB\*(Aq], \*(Aqnew value\*(Aq); \& $t\->setElm(0, [1..2], \*(Aqnew value\*(Aq); \& \& # puts a limit on the price of all expensive items \& $t_product\->match_pattern_hash(\*(Aq$_{UnitPrice} > 20\*(Aq); \& $t_product\->setElm($t_product\->{MATCH}, \*(AqUnitPrice\*(Aq, 20); .Ve .ie n .IP "int table::addRow ($rowRef, $rowIdx = table::nofRow, $arg_ref = {addNewCol => 0})" 4 .el .IP "int table::addRow ($rowRef, \f(CW$rowIdx\fR = table::nofRow, \f(CW$arg_ref\fR = {addNewCol => 0})" 4 .IX Item "int table::addRow ($rowRef, $rowIdx = table::nofRow, $arg_ref = {addNewCol => 0})" add a new row ($rowRef may point to the actual list of scalars, or it can be a hash_ref (supported since version 1.60)). If \f(CW$rowRef\fR points to a hash, the method will lookup the value of a field by ts column name: \f(CW$rowRef\fR\->{colName}, if not found, undef is used for that field. The new row will be referred as \f(CW$rowIdx\fR as the result. E.g., addRow($aRow, 0) will put the new row as the very first row. By default, it appends a row to the end. In 1.67, we support {addNewCol => 1}, if specified, a new column will be automatically created for each new element encountered in the \f(CW$rowRef\fR. .Sp .Vb 4 \& # automatically add a new column "aNewColumn" to $t, in order to hold the new value \& $t\->addRow({anExistingColumn => 123, aNewColumn => "XYZ"}, undef, {addNewCol => 1}); \& # $t only had one column, after this call, it will contain a new column \*(Aqcol2\*(Aq, in order to hold the new value \& $t\->addRow([123, "XYZ"], undef, {addNewCol => 1}); .Ve .Sp It returns 1 upon success, undef otherwise. .ie n .IP "refto_array table::delRow ( $rowIdx )" 4 .el .IP "refto_array table::delRow ( \f(CW$rowIdx\fR )" 4 .IX Item "refto_array table::delRow ( $rowIdx )" delete a row at \f(CW$rowIdx\fR. It will the reference to the deleted row. .ie n .IP "refto_array table::delRows ( $rowIdcsRef )" 4 .el .IP "refto_array table::delRows ( \f(CW$rowIdcsRef\fR )" 4 .IX Item "refto_array table::delRows ( $rowIdcsRef )" delete rows in @$rowIdcsRef. It will return an array of deleted rows in the same order of \f(CW$rowIdcsRef\fR upon success. upon success. .ie n .IP "int table::addCol ($colRef, $colName, $colIdx = numCol)" 4 .el .IP "int table::addCol ($colRef, \f(CW$colName\fR, \f(CW$colIdx\fR = numCol)" 4 .IX Item "int table::addCol ($colRef, $colName, $colIdx = numCol)" add a new column ($colRef points to the actual data), the new column will be referred as \f(CW$colName\fR or \f(CW$colIdx\fR as the result. E.g., addCol($aCol, 'newCol', 0) will put the new column as the very first column. By default, append a column to the end. It will return 1 upon success or undef otherwise. In 1.68, \f(CW$colRef\fR can be a scalar, which is the default value that can be used to create the new column. E.g., to create a new column with default value of undef, 0, 'default', respectively, one can do: .Sp .Vb 3 \& $t\->addCol(undef, \*(AqNewCol\*(Aq); \& $t\->addCol(0, \*(AqNewIntCol\*(Aq); \& $t\->addCol(\*(Aqdefault\*(Aq, \*(AqNewStringCol\*(Aq); .Ve .IP "refto_array table::delCol ($colID)" 4 .IX Item "refto_array table::delCol ($colID)" delete a column at \f(CW$colID\fR return the reference to the deleted column. .IP "arrayof_refto_array table::delCols ($colIDsRef)" 4 .IX Item "arrayof_refto_array table::delCols ($colIDsRef)" delete a list of columns, pointed by \f(CW$colIDsRef\fR. It will return an array of deleted columns in the same order of \f(CW$colIDsRef\fR upon success. .IP "refto_array table::rowRef ($rowIdx)" 4 .IX Item "refto_array table::rowRef ($rowIdx)" return a reference to the row at \f(CW$rowIdx\fR upon success or undef otherwise. .IP "refto_arrayof_refto_array table::rowRefs ($rowIdcsRef)" 4 .IX Item "refto_arrayof_refto_array table::rowRefs ($rowIdcsRef)" return a reference to array of row references upon success, undef otherwise. .IP "array table::row ($rowIdx)" 4 .IX Item "array table::row ($rowIdx)" return a copy of the row at \f(CW$rowIdx\fR upon success or undef otherwise. .IP "refto_hash table::rowHashRef ($rowIdx)" 4 .IX Item "refto_hash table::rowHashRef ($rowIdx)" return a reference to a hash, which contains a copy of the row at \f(CW$rowIdx\fR, upon success or undef otherwise. The keys in the hash are column names, and the values are corresponding elements in that row. The hash is a copy, therefore modifying the hash values doesn't change the original table. .IP "refto_array table::colRef ($colID)" 4 .IX Item "refto_array table::colRef ($colID)" return a reference to the column at \f(CW$colID\fR upon success. .IP "refto_arrayof_refto_array table::colRefs ($colIDsRef)" 4 .IX Item "refto_arrayof_refto_array table::colRefs ($colIDsRef)" return a reference to array of column references upon success. .IP "array table::col ($colID)" 4 .IX Item "array table::col ($colID)" return a copy to the column at \f(CW$colID\fR upon success or undef otherwise. .ie n .IP "int table::rename ($colID, $newName)" 4 .el .IP "int table::rename ($colID, \f(CW$newName\fR)" 4 .IX Item "int table::rename ($colID, $newName)" rename the column at \f(CW$colID\fR to a \f(CW$newName\fR (the newName must be valid, and should not be identical to any other existing column names). It returns 1 upon success or undef otherwise. .ie n .IP "refto_array table::replace ($oldColID, $newColRef, $newName)" 4 .el .IP "refto_array table::replace ($oldColID, \f(CW$newColRef\fR, \f(CW$newName\fR)" 4 .IX Item "refto_array table::replace ($oldColID, $newColRef, $newName)" replace the column at \f(CW$oldColID\fR by the array pointed by \f(CW$newColRef\fR, and renamed it to \f(CW$newName\fR. \f(CW$newName\fR is optional if you don't want to rename the column. It returns 1 upon success or undef otherwise. .ie n .IP "int table::swap ($colID1, $colID2)" 4 .el .IP "int table::swap ($colID1, \f(CW$colID2\fR)" 4 .IX Item "int table::swap ($colID1, $colID2)" swap two columns referred by \f(CW$colID1\fR and \f(CW$colID2\fR. It returns 1 upon success or undef otherwise. .ie n .IP "int table::moveCol($colID, $colIdx, $newColName)" 4 .el .IP "int table::moveCol($colID, \f(CW$colIdx\fR, \f(CW$newColName\fR)" 4 .IX Item "int table::moveCol($colID, $colIdx, $newColName)" move column referred by \f(CW$colID\fR to a new location \f(CW$colIdx\fR. If \f(CW$newColName\fR is specified, the column will be renamed as well. It returns 1 upon success or undef otherwise. .ie n .IP "int table::reorder($colIDRefs, $arg_ref)" 4 .el .IP "int table::reorder($colIDRefs, \f(CW$arg_ref\fR)" 4 .IX Item "int table::reorder($colIDRefs, $arg_ref)" Rearrange the columns according to the order specified in \f(CW$colIDRef\fR. Columns not specified in the reference array will be appended to the end! If one would like to drop columns not specified, set \f(CW$arg_ref\fR to {keepRest => 0}. \&\fBreorder()\fR changes the table itself, while subTable(undef, \f(CW$colIDRefs\fR) will return a new table. \fBreorder()\fR might also runs faster than subTable, as elements may not need to be copied. .ie n .IP "int table::colMap ($colID, $fun)" 4 .el .IP "int table::colMap ($colID, \f(CW$fun\fR)" 4 .IX Item "int table::colMap ($colID, $fun)" foreach element in column \f(CW$colID\fR, map a function \f(CW$fun\fR to it. It returns 1 upon success or undef otherwise. This is a handy way to format a column. E.g. if a column named \s-1URL\s0 contains \s-1URL\s0 strings, colMap(\*(L"\s-1URL\*(R",\s0 sub {\*(L"$_\*(R"}) before \fBhtml()\fR will change each \s-1URL\s0 into a clickable hyper link while displayed in a web browser. .IP "int table::colsMap ($fun)" 4 .IX Item "int table::colsMap ($fun)" foreach row in the table, map a function \f(CW$fun\fR to it. It can do whatever colMap can do and more. It returns 1 upon success or undef otherwise. colMap function only give \f(CW$fun\fR access to the particular element per row, while colsMap give \f(CW$fun\fR full access to all elements per row. E.g. if two columns named duration and unit ([\*(L"2\*(R", \*(L"hrs\*(R"], [\*(L"30\*(R", \*(L"sec\*(R"]). colsMap(sub {$_\->[0] .= \*(L" (\*(R".$_\->[1].\*(L")\*(R"; } will change each row into ([\*(L"2 hrs\*(R", \*(L"hrs\*(R"], [\*(L"30 sec\*(R", \*(L"sec\*(R"]). As show, in the \f(CW$func\fR, a column element should be referred as \f(CW$_\fR\->[$colIndex]. .ie n .IP "int table::sort($colID1, $type1, $order1, $colID2, $type2, $order2, ... )" 4 .el .IP "int table::sort($colID1, \f(CW$type1\fR, \f(CW$order1\fR, \f(CW$colID2\fR, \f(CW$type2\fR, \f(CW$order2\fR, ... )" 4 .IX Item "int table::sort($colID1, $type1, $order1, $colID2, $type2, $order2, ... )" sort a table in place. First sort by column \f(CW$colID1\fR in \f(CW$order1\fR as \f(CW$type1\fR, then sort by \f(CW$colID2\fR in \f(CW$order2\fR as \f(CW$type2\fR, ... \&\f(CW$type\fR is 0 for numerical and 1 for others; \&\f(CW$order\fR is 0 for ascending and 1 for descending; .Sp In 1.62, instead of memorize these numbers, you can use constants instead (notice constants do not start with '$'). Data::Table::NUMBER Data::Table::STRING Data::Table::ASC Data::Table::DESC .Sp Sorting is done in the priority of colID1, colID2, ... It returns 1 upon success or undef otherwise. Notice the table is rearranged as a result! This is different from perl's list sort, which returns a sorted copy while leave the original list untouched, the authors feel inplace sorting is more natural. .Sp table::sort can take a user supplied operator, this is useful when neither numerical nor alphabetic order is correct. .Sp .Vb 10 \& $Well=["A_1", "A_2", "A_11", "A_12", "B_1", "B_2", "B_11", "B_12"]; \& $t = Data::Table\->new([$Well], ["PlateWell"], 1); \& $t\->sort("PlateWell", 1, 0); \& print join(" ", $t\->col("PlateWell")); \& # prints: A_1 A_11 A_12 A_2 B_1 B_11 B_12 B_2 \& # in string sorting, "A_11" and "A_12" appears before "A_2"; \& my $my_sort_func = sub { \& my @a = split /_/, $_[0]; \& my @b = split /_/, $_[1]; \& my $res = ($a[0] cmp $b[0]) || (int($a[1]) <=> int($b[1])); \& }; \& $t\->sort("PlateWell", $my_sort_func, 0); \& print join(" ", $t\->col("PlateWell")); \& # prints the correct order: A_1 A_2 A_11 A_12 B_1 B_2 B_11 B_12 .Ve .ie n .IP "table table::match_pattern ($pattern, $countOnly)" 4 .el .IP "table table::match_pattern ($pattern, \f(CW$countOnly\fR)" 4 .IX Item "table table::match_pattern ($pattern, $countOnly)" return a new table consisting those rows evaluated to be true by \f(CW$pattern\fR upon success or undef otherwise. If \f(CW$countOnly\fR is set to 1, it simply returns the number of rows that matches the string without making a new copy of table. \f(CW$countOnly\fR is 0 by default. .Sp Side effect: \f(CW@Data::Table::OK\fR (should use \f(CW$t\fR\->{\s-1OK\s0} after 1.62) stores a true/false array for the original table rows. Using it, users can find out what are the rows being selected/unselected. Side effect: \f(CW@Data::Table::MATCH\fR stores a reference to an array containing all row indices for matched rows. .Sp In the \f(CW$pattern\fR string, a column element should be referred as \f(CW$_\fR\->[$colIndex]. E.g., match_pattern('$_\->[0]>3 && \f(CW$_\fR\->[1]=~/^L') retrieve all the rows where its first column is greater than 3 and second column starts with letter 'L'. Notice it only takes colIndex, column names are not acceptable here! .ie n .IP "table table::match_pattern_hash ($pattern, $countOnly)" 4 .el .IP "table table::match_pattern_hash ($pattern, \f(CW$countOnly\fR)" 4 .IX Item "table table::match_pattern_hash ($pattern, $countOnly)" return a new table consisting those rows evaluated to be true by \f(CW$pattern\fR upon success or undef otherwise. If \f(CW$countOnly\fR is set to 1, it simply returns the number of rows that matches the string without making a new copy of table. \f(CW$countOnly\fR is 0 by default. .Sp Side effect: \f(CW@Data::Table::OK\fR stores a reference to a true/false array for the original table rows. Using it, users can find out what are the rows being selected/unselected. Side effect: \f(CW@Data::Table::MATCH\fR stores a reference to an array containing all row indices for matched rows. .Sp In the \f(CW$pattern\fR string, a column element should be referred as ${column_name}. \&\fBmatch_pattern_hash()\fR is added in 1.62. The difference between this method and match_pattern is each row is fed to the pattern as a hash \f(CW%_\fR. In the case of match_pattern, each row is fed as an array ref \f(CW$_\fR. The pattern for \fBmatch_pattern_hash()\fR becomes much cleaner. .Sp If a table has two columns: Col_A as the 1st column and Col_B as the 2nd column, a filter \*(L"Col_A > 2 \s-1AND\s0 Col_B < 2\*(R" is written before as \f(CW$t\fR\->match_pattern('$_\->[0] > 2 && \f(CW$_\fR\->[1] <2'); where we need to figure out \f(CW$t\fR\->colIndex('Col_A') is 0 and \f(CW$t\fR\->colIndex('Col_B') is 1, in order to build the pattern. Now you can use column name directly in the pattern: \f(CW$t\fR\->match_pattern_hash('$_{Col_A} >2 && \f(CW$_\fR{Col_B} <2'); This method creates \f(CW$t\fR\->{\s-1OK\s0}, as well as \f(CW@Data::Table::OK\fR, same as \fBmatch_pattern()\fR. .Sp Simple boolean operators such as and/or can be directly put into the pattern string. More complex logic can also be supported in the example below: .Sp .Vb 5 \& my $t= Data::Table\->new([[2,5,\*(AqJan\*(Aq], [1,6,\*(AqFeb\*(Aq], [\-3,2,\*(AqApr\*(Aq], [6,\-4,\*(AqDec\*(Aq]], [\*(AqX\*(Aq,\*(AqY\*(Aq,\*(AqMonth\*(Aq], 0); \& # we need to use our instead of my, so that %Q1 is accessible within match_pattern_hash \& our %Q1 = (\*(AqJan\*(Aq=>1, \*(AqFeb\*(Aq=>1, \*(AqMar\*(Aq=>1); \& # find records belongin to Q1 months, we need to use %::Q1 to access the Q1 defined outside Data::Table \& $t2=$t\->match_pattern_hash(\*(Aqexists $::Q1{$_{Month}}\*(Aq); .Ve .Sp similarly, subroutines can be accessed inside match_pattern_hash using \*(L"::\*(R": .Sp .Vb 5 \& sub in_Q1 { \& my $x = shift; \& return ($x eq \*(AqJan\*(Aq or $x eq \*(AqFeb\*(Aq or $x eq \*(AqMar\*(Aq); \& } \& $t2=$t\->match_pattern_hash(\*(Aq::in_Q1($_{Month})\*(Aq); .Ve .Sp However, such usage is discouraged, as \fBmatch_pattern_hash()\fR does not throw errors when the pattern is invalid. For complex filtering logic, we strongly recommend you stick to row-based looping. .ie n .IP "table table::match_string ($s, $caseIgnore, $countOnly)" 4 .el .IP "table table::match_string ($s, \f(CW$caseIgnore\fR, \f(CW$countOnly\fR)" 4 .IX Item "table table::match_string ($s, $caseIgnore, $countOnly)" return a new table consisting those rows contains string \f(CW$s\fR in any of its fields upon success, undef otherwise. if \f(CW$caseIgnore\fR evaluated to true, case will is be ignored (s/$s/i). If \f(CW$countOnly\fR is set to 1, it simply returns the number of rows that matches the string without making a new copy of table. \f(CW$countOnly\fR is 0 by default. .Sp Side effect: \f(CW@Data::Table::OK\fR stores a reference to a true/false array for the original table rows. Side effect: \f(CW@Data::Table::MATCH\fR stores a reference to an array containing all row indices for matched rows. Using it, users can find out what are the rows being selected/unselected. The \f(CW$s\fR string is actually treated as a regular expression and applied to each row element, therefore one can actually specify several keywords by saying, for instance, match_string('One|Other'). .ie n .IP "table table::rowMask($mask, $complement)" 4 .el .IP "table table::rowMask($mask, \f(CW$complement\fR)" 4 .IX Item "table table::rowMask($mask, $complement)" mask is reference to an array, where elements are evaluated to be true or false. The size of the mask must be equal to the nofRow of the table. return a new table consisting those rows where the corresponding mask element is true (or false, when complement is set to true). .Sp E.g., \f(CW$t1\fR=$tbl\->match_string('keyword'); \f(CW$t2\fR=$tbl\->rowMask(\e@Data::Table::OK, 1) creates two new tables. \f(CW$t1\fR contains all rows match 'keyword', while \f(CW$t2\fR contains all other rows. .Sp mask is reference to an array, where elements are evaluated to be true or false. The size of the mask must be equal to the nofRow of the table. return a new table consisting those rows where the corresponding mask element is true (or false, when complement is set to true). .Sp E.g., \f(CW$t1\fR=$tbl\->match_string('keyword'); \f(CW$t2\fR=$tbl\->rowMask(\e@Data::Table::OK, 1) creates two new tables. \f(CW$t1\fR contains all rows match 'keyword', while \&\f(CW$t2\fR contains all other rows. .IP "table table::iterator({$reverse => 0})" 4 .IX Item "table table::iterator({$reverse => 0})" Returns a reference to a enumerator routine, which enables one to loop through each table row. If \f(CW$reverse\fR is set to 1, it will enumerate backward. The convenience here is each row is fetch as a rowHashRef, so one can easily access row elements by name. .Sp .Vb 5 \& my $next = $t_product\->iterator(); \& while (my $row = $next\->()) { \& # have access to a row as a hash reference, access row number by &$next(1); \& $t_product\->setElm($next\->(1), \*(AqProductName\*(Aq, \*(AqNew! \*(Aq.$row\->{ProductName}); \& } .Ve .Sp In this example, each \f(CW$row\fR is fetched as a hash reference, so one can access the elements by \f(CW$row\fR\->{colName}. Be aware that the elements in the hash is a copy of the original table elements, so modifying \f(CW$row\fR\->{colName} does not modify the original table. If table modification is intended, one needs to obtain the row index of the returned row. \f(CW$next\fR\->(1) call with a non-empty argument returns the row index of the record that was previously fetched with \f(CW$next\fR\->(). In this example, one uses the row index to modify the original table. .ie n .IP "table table::each_group($colsToGroupBy, $funsToApply)" 4 .el .IP "table table::each_group($colsToGroupBy, \f(CW$funsToApply\fR)" 4 .IX Item "table table::each_group($colsToGroupBy, $funsToApply)" Primary key columns are specified in \f(CW$colsToGroupBy\fR. All rows are grouped by primary keys first (keys sorted as string). Then for each group, subroutines \f(CW$funToAppy\fR is applied to corresponding rows. \&\f(CW$funToApply\fR are passed with two parameters ($tableRef, \f(CW$rowIDsRef\fR). All rows sharing the key are passed in as a Data::Table object (with all columns and in the order of ascending row index) in the first parameter. The second optional parameter contains an array of row indices of the group members. Since all rows in the passed-in table contains the same keys, the key value can be obtained from its first table row. .ie n .IP "table table::group($colsToGroupBy, $colsToCalculate, $funsToApply, $newColNames, $keepRestCols)" 4 .el .IP "table table::group($colsToGroupBy, \f(CW$colsToCalculate\fR, \f(CW$funsToApply\fR, \f(CW$newColNames\fR, \f(CW$keepRestCols\fR)" 4 .IX Item "table table::group($colsToGroupBy, $colsToCalculate, $funsToApply, $newColNames, $keepRestCols)" Primary key columns are specified in \f(CW$colsToGroupBy\fR. All rows are grouped by primary keys first. Then for each group, an array of subroutines (in \f(CW$funsToAppy\fR) are applied to corresponding columns and yield a list of new columns (specified in \f(CW$newColNames\fR). .Sp \&\f(CW$colsToGroupBy\fR, \f(CW$colsToCalculate\fR are references to array of colIDs. \f(CW$funsToApply\fR is a reference to array of subroutine references. \f(CW$newColNames\fR are a reference to array of new column name strings. If specified, the size of arrays pointed by \f(CW$colsToCalculate\fR, \f(CW$funsToApply\fR and \f(CW$newColNames\fR should be i dentical. A column may be used more than once in \f(CW$colsToCalculate\fR. .Sp \&\f(CW$keepRestCols\fR is default to 1 (was introduced as 0 in 1.64, changed to 1 in 1.66 for backward compatibility) introduced in 1.64), otherwise, the remaining columns are returned with the first encountered value of that group. .Sp E.g., an employee salary table \f(CW$t\fR contains the following columns: Name, Sex, Department, Salary. (see examples in the \s-1SYNOPSIS\s0) .Sp .Vb 1 \& $t2 = $t\->group(["Department","Sex"],["Name", "Salary"], [sub {scalar @_}, \e&average], ["Nof Employee", "Average Salary"], 0); .Ve .Sp Department, Sex are used together as the primary key columns, a new column \*(L"Nof Employee\*(R" is created by counting the number of employee names in each group, a new column \*(L"Average Salary\*(R" is created by averaging the Salary data falled into each group. As the result, we have the head count and average salary information for each (Department, Sex) pair. With your own functions (such as sum, product, average, standard deviation, etc), group method is very handy for accounting purpose. If primary key columns are not defined, all records will be treated as one group. .Sp .Vb 1 \& $t2 = $t\->group(undef,["Name", "Salary"], [sub {scalar @_}, \e&average], ["Nof Employee", "Average Salary"], 0); .Ve .Sp The above statement will output the total number of employees and their average salary as one line. .ie n .IP "table table::pivot($colToSplit, $colToSplitIsStringOrNumeric, $colToFill, $colsToGroupBy, $keepRestCols)" 4 .el .IP "table table::pivot($colToSplit, \f(CW$colToSplitIsStringOrNumeric\fR, \f(CW$colToFill\fR, \f(CW$colsToGroupBy\fR, \f(CW$keepRestCols\fR)" 4 .IX Item "table table::pivot($colToSplit, $colToSplitIsStringOrNumeric, $colToFill, $colsToGroupBy, $keepRestCols)" Every unique values in a column (specified by \f(CW$colToSplit\fR) become a new column. undef value become \*(L"\s-1NULL\*(R".\s0 \f(CW$colToSplitIsStringOrNumeric\fR is set to numeric (0 or Data::Table:NUMBER), the new column names are prefixed by \*(L"oldColumnName=\*(R". The new cell element is filled by the value specified by \f(CW$colToFill\fR (was 1/0 before version 1.63). .Sp Note: yes, it seems I made an incompatible change in version 1.64, where \f(CW$colToSplitIsStringOrNumber\fR used to be \f(CW$colToSplitIsNumeric\fR, where 0 meant \s-1STRING\s0 and 1 meant \s-1NUMBER.\s0 Now it is opposite. However, I also added auto-type detection code, that this parameter essentially is auto-guessed and most old code should behave the same as before. .Sp When primary key columns are specified by \f(CW$colsToGroupBy\fR, all records sharing the same primary key collapse into one row, with values in \f(CW$colToFill\fR filling the corresponding new columns. If \f(CW$colToFill\fR is not specified, a cell is filled with the number of records fall into that cell. .Sp \&\f(CW$colToSplit\fR and \f(CW$colToFill\fR are colIDs. \f(CW$colToSplitIsNumeric\fR is 1/0. \f(CW$colsToGroupBy\fR is a reference to array of colIDs. \f(CW$keepRestCols\fR is 1/0, by default is 0. If \f(CW$keepRestCols\fR is off, only primary key columns and new columns are exported, otherwise, all the rest columns are exported as well. .Sp E.g., applying pivot method to the resultant table of the example of the group method. .Sp .Vb 1 \& $t2\->pivot("Sex", 0, "Average Salary",["Department"]); .Ve .Sp This creates a 2x3 table, where Departments are use as row keys, Sex (female and male) become two new columns. \*(L"Average Salary\*(R" values are used to fill the new table elements. Used together with group method, pivot method is very handy for accounting type of analysis. If \f(CW$colsToGroupBy\fR is left as undef, all rows are treated as one group. If \f(CW$colToSplit\fR is left as undef, the method will generate a column named \*(L"(all)\*(R" that matches all records share the corresponding primary key. .ie n .IP "table table::melt($keyCols, $variableCols, $arg_ref)" 4 .el .IP "table table::melt($keyCols, \f(CW$variableCols\fR, \f(CW$arg_ref\fR)" 4 .IX Item "table table::melt($keyCols, $variableCols, $arg_ref)" The idea of \fBmelt()\fR and \fBcast()\fR are taken from Hadley Wickham's Reshape package in R language. A table is first \fBmelt()\fR into a tall-skiny format, where measurements are stored in the format of a variable-value pair per row. Such a format can then be easily \fBcast()\fR into various contingency tables. .Sp One needs to specify the columns consisting of primary keys, columns that are consider as variable columns. The output variable column is named 'variable' unless specified by \f(CW$arg_ref\fR{variableColName}. The output value column is named 'value', unless specified in \f(CW$arg_ref\fR{valueColName}. By default \s-1NULL\s0 values are not output, unless \f(CW$arg_ref\fR{skip_NULL} is set to false. By default empty string values are kept, unless one sets skip_empty to `. .Sp .Vb 7 \& For each object (id), we measure variable x1 and x2 at two time points \& $t = new Data::Table([[1,1,5,6], [1,2,3,5], [2,1,6,1], [2,2,2,4]], [\*(Aqid\*(Aq,\*(Aqtime\*(Aq,\*(Aqx1\*(Aq,\*(Aqx2\*(Aq], Data::Table::ROW_BASED); \& # id time x1 x2 \& # 1 1 5 6 \& # 1 2 3 5 \& # 2 1 6 1 \& # 2 2 2 4 \& \& # melting a table into a tall\-and\-skinny table \& $t2 = $t\->melt([\*(Aqid\*(Aq,\*(Aqtime\*(Aq]); \& #id time variable value \& # 1 1 x1 5 \& # 1 1 x2 6 \& # 1 2 x1 3 \& # 1 2 x2 5 \& # 2 1 x1 6 \& # 2 1 x2 1 \& # 2 2 x1 2 \& # 2 2 x2 4 \& \& # casting the table, &average is a method to calculate mean \& # for each object (id), we calculate average value of x1 and x2 over time \& $t3 = $t2\->cast([\*(Aqid\*(Aq],\*(Aqvariable\*(Aq,Data::Table::STRING,\*(Aqvalue\*(Aq, \e&average); \& # id x1 x2 \& # 1 4 5.5 \& # 2 4 2.5 .Ve .ie n .IP "table table::cast($colsToGroupBy, $colToSplit, $colToSplitIsStringOrNumeric, $colToCalculate, $funToApply)" 4 .el .IP "table table::cast($colsToGroupBy, \f(CW$colToSplit\fR, \f(CW$colToSplitIsStringOrNumeric\fR, \f(CW$colToCalculate\fR, \f(CW$funToApply\fR)" 4 .IX Item "table table::cast($colsToGroupBy, $colToSplit, $colToSplitIsStringOrNumeric, $colToCalculate, $funToApply)" see \fBmelt()\fR, as \fBmelt()\fR and \fBcast()\fR are meant to use together. .Sp The table has been melten before. \fBcast()\fR group the table according to primary keys specified in \f(CW$colsToGroupBy\fR. For each group of objects sharing the same id, it further groups values (specified by \f(CW$colToCalculate\fR) according to unique variable names (specified by \f(CW$colToSplit\fR). Then it applies subroutine \f(CW$funToApply\fR to obtain an aggregate value. For the output, each unique primary key will be a row, each unique variable name will become a column, the cells are the calculated aggregated value. .Sp If \f(CW$colsToGroupBy\fR is undef, all rows are treated as within the same group. If \f(CW$colToSplit\fR is undef, a new column \*(L"(all)\*(R" is used to hold the results. .Sp .Vb 10 \& $t = Data::Table\->new( # create an employ salary table \& [ \& [\*(AqTom\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 65000], \& [\*(AqJohn\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 75000], \& [\*(AqTom\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 65000], \& [\*(AqJohn\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 75000], \& [\*(AqPeter\*(Aq, \*(Aqmale\*(Aq, \*(AqHR\*(Aq, 85000], \& [\*(AqMary\*(Aq, \*(Aqfemale\*(Aq, \*(AqHR\*(Aq, 80000], \& [\*(AqNancy\*(Aq, \*(Aqfemale\*(Aq, \*(AqIT\*(Aq, 55000], \& [\*(AqJack\*(Aq, \*(Aqmale\*(Aq, \*(AqIT\*(Aq, 88000], \& [\*(AqSusan\*(Aq, \*(Aqfemale\*(Aq, \*(AqHR\*(Aq, 92000] \& ], \& [\*(AqName\*(Aq, \*(AqSex\*(Aq, \*(AqDepartment\*(Aq, \*(AqSalary\*(Aq], Data::Table::ROW_BASED); \& \& # get a Department x Sex contingency table, get average salary across all four groups \& print $t\->cast([\*(AqDepartment\*(Aq], \*(AqSex\*(Aq, Data::Table::STRING, \*(AqSalary\*(Aq, \e&average)\->csv(1); \& Department,female,male \& IT,55000,73600 \& HR,86000,85000 \& # get average salary for each department \& print $t\->cast([\*(AqDepartment\*(Aq], undef, Data::Table::STRING, \*(AqSalary\*(Aq, \e&average)\->csv(1); \& Department,(all) \& IT,70500 \& HR,85666.6666666667 \& \& # get average salary for each gender \& print $t\->cast([\*(AqSex\*(Aq], undef, Data::Table::STRING, \*(AqSalary\*(Aq, \e&average)\->csv(1); \& Sex,(all) \& male,75500 \& female,75666.6666666667 \& \& # get average salary for all records \& print $t\->cast(undef, undef, Data::Table::STRING, \*(AqSalary\*(Aq, \e&average)\->csv(1); \& (all) \& 75555.5555555556 .Ve .SS "Table-Table Manipulations" .IX Subsection "Table-Table Manipulations" .ie n .IP "int table::rowMerge ($tbl, $argRef)" 4 .el .IP "int table::rowMerge ($tbl, \f(CW$argRef\fR)" 4 .IX Item "int table::rowMerge ($tbl, $argRef)" Append all the rows in the table object \f(CW$tbl\fR to the original rows. Before 1.62, the merging table \f(CW$tbl\fR must have the same number of columns as the original, as well as the columns are in exactly the same order. It returns 1 upon success, undef otherwise. The table object \f(CW$tbl\fR should not be used afterwards, since it becomes part of the new table. .Sp Since 1.62, you may provide {byName =>1, addNewCol=>1} as \f(CW$argRef\fR. If byName is set to 1, the columns in in \f(CW$tbl\fR do not need to be in the same order as they are in the first table, instead the column name is used for the matching. If addNewCol is set to 1, if \f(CW$tbl\fR contains a new column name that does not already exist in the first table, this new column will be automatically added to the resultant table. Typically, you want to specify there two options simultaneously. .ie n .IP "int table::colMerge ($tbl, $argRef)" 4 .el .IP "int table::colMerge ($tbl, \f(CW$argRef\fR)" 4 .IX Item "int table::colMerge ($tbl, $argRef)" Append all the columns in table object \f(CW$tbl\fR to the original columns. Table \f(CW$tbl\fR must have the same number of rows as the original. It returns 1 upon success, undef otherwise. Table \f(CW$tbl\fR should not be used afterwards, since it becomes part of the new table. .Sp Since 1.62, you can specify {renameCol => 1} as \f(CW$argRef\fR. This is to auto fix any column name collision. If \f(CW$tbl\fR contains a column that already exists in the first table, it will be renamed (by a suffix _2) to avoid the collision. .ie n .IP "table table::join ($tbl, $type, $cols1, $cols2, $argRef)" 4 .el .IP "table table::join ($tbl, \f(CW$type\fR, \f(CW$cols1\fR, \f(CW$cols2\fR, \f(CW$argRef\fR)" 4 .IX Item "table table::join ($tbl, $type, $cols1, $cols2, $argRef)" Join two tables. The following join types are supported (defined by \f(CW$type\fR): .Sp 0: inner join 1: left outer join 2: right outer join 3: full outer join .Sp In 1.62, instead of memorize these numbers, you can use constants instead (notice constants do not start with '$'). Data::Table::INNER_JOIN Data::Table::LEFT_JOIN Data::Table::RIGHT_JOIN Data::Table::FULL_JOIN .Sp \&\f(CW$cols1\fR and \f(CW$cols2\fR are references to array of colIDs, where rows with the same elements in all listed columns are merged. As the result table, columns listed in \f(CW$cols2\fR are deleted, before a new table is returned. .Sp The implementation is hash-join, the running time should be linear with respect to the sum of number of rows in the two tables (assume both tables fit in memory). .Sp If the non-key columns of the two tables share the same name, the routine will fail, as the result table cannot contain two columns of the same name. In 1.62, one can specify {renameCol=>1} as \f(CW$argRef\fR, so that the second column will be automatically renamed (with suffix _2) to avoid collision. .Sp If you would like to treat the NULLs in the key columns as empty string, set {NULLasEmpty => 1}. If you do not want to treat NULLs as empty strings, but you still like the NULLs in two tables to be considered as equal (but not equal to ''), set {matchNULL => 1}. Obviously if NULLasEmpty is set to 1, matchNULL will have no effect. .SS "Internal Methods" .IX Subsection "Internal Methods" All internal methods are mainly implemented for used by other methods in the Table class. Users should avoid using them. Nevertheless, they are listed here for developers who would like to understand the code and may derive a new class from Data::Table. .IP "int table::rotate" 4 .IX Item "int table::rotate" convert the internal structure of a table between row-based and column-based. return 1 upon success, undef otherwise. .IP "string csvEscape($string, {delimiter=>, qualifier})" 4 .IX Item "string csvEscape($string, {delimiter=>, qualifier})" Encode a scalar into a CSV-formatted field. .Sp optional named arguments: delimiter and qualifier, in case user wants to use characters other than the defaults. The default delimiter and qualifier is taken from \f(CW$Data::Table::DEFAULTS\fR{'\s-1CSV_DELIMITER\s0'} (defaults to ',') and \f(CW$Data::Table::DEFAULTS\fR{'\s-1CSV_QUALIFIER\s0'} (defaults to '"'), respectively. .Sp Please note that this function only escape one element in a table. To escape the whole table row, you need to join($delimiter, map {csvEscape($_)} \f(CW@row\fR . \f(CW$endl\fR; \&\f(CW$endl\fR refers to End-of-Line, which you may or may not want to add, and it is OS-dependent. Therefore, csvEscape method is kept to the simplest form as an element transformer. .IP "refto_array parseCSV($string)" 4 .IX Item "refto_array parseCSV($string)" Break a \s-1CSV\s0 encoded string to an array of scalars (check it out, we did it the cool way). .Sp optional argument size: specify the expected number of fields after csv-split. optional named arguments: delimiter and qualifier, in case user wants to use characters other than the defaults. respectively. The default delimiter and qualifier is taken from \f(CW$Data::Table::DEFAULTS\fR{'\s-1CSV_DELIMITER\s0'} (defaults to ',') and \f(CW$Data::Table::DEFAULTS\fR{'\s-1CSV_QUALIFIER\s0'} (defaults to '"'), respectively. .IP "string tsvEscape($rowRef)" 4 .IX Item "string tsvEscape($rowRef)" Encode a scalar into a TSV-formatted string. .SH "TSV FORMAT" .IX Header "TSV FORMAT" There is no standard for \s-1TSV\s0 format as far as we know. \s-1CSV\s0 format can't handle binary data very well, therefore, we choose the \s-1TSV\s0 format to overcome this limitation. .PP We define \s-1TSV\s0 based on MySQL convention. .PP .Vb 3 \& "\e0", "\en", "\et", "\er", "\eb", "\*(Aq", "\e"", and "\e\e" are all escaped by \*(Aq\e\*(Aq in the TSV file. \& (Warning: MySQL treats \*(Aq\ef\*(Aq as \*(Aqf\*(Aq, and it\*(Aqs not escaped here) \& Undefined values are represented as \*(Aq\eN\*(Aq. .Ve .PP However, you can switch off this transformation by setting {transform_element => 0} in the fromTSV or tsv method. Before if a cell reads 'A line break is \en', it is read in as 'A link break is [return]' in memory. When use tsv method to export, it is transformed back to 'A line break is \en'. However, if it is exported as a csv, the [return] will break the format. Now if transform_element is set to 0, the cell is stored as 'A line break is \en' in memory, so that csv export will be correct. However, do remember to set {transform_element => 0} in tsv export method, otherwise, the cell will become 'A line break is \e\en'. Be aware that trasform_element controls column headers as well. .SH "INTERFACE TO OTHER SOFTWARES" .IX Header "INTERFACE TO OTHER SOFTWARES" Spreadsheet is a very generic type, therefore Data::Table class provides an easy interface between databases, web pages, \s-1CSV/TSV\s0 files, graphics packages, etc. .PP Here is a summary (partially repeat) of some classic usages of Data::Table. .SS "Interface to Database and Web" .IX Subsection "Interface to Database and Web" .Vb 1 \& use DBI; \& \& $dbh= DBI\->connect("DBI:mysql:test", "test", "") or die $DBI::errstr; \& my $minAge = 10; \& $t = Data::Table::fromSQL($dbh, "select * from mytable where age >= ?", [$minAge]); \& print $t\->html; .Ve .SS "Interface to \s-1CSV/TSV\s0" .IX Subsection "Interface to CSV/TSV" .Vb 2 \& $t = fromFile("mydata.csv"); # after version 1.51 \& $t = fromFile("mydata.tsv"); # after version 1.51 \& \& $t = fromCSV("mydata.csv"); \& $t\->sort(1,1,0); \& print $t\->csv; \& \& Same for TSV .Ve .SS "Interface to Excel \s-1XLS/XLSX\s0" .IX Subsection "Interface to Excel XLS/XLSX" Read in two tables from NorthWind.xls file, writes them out to \s-1XLSX\s0 format. See Data::Table::Excel module for details. .PP .Vb 1 \& use Data::Table::Excel; \& \& my ($tableObjects, $tableNames)=xls2tables("NorthWind.xls"); \& $t_category = $tableObjects[0]; \& $t_product = $tableObjects[1]; \& \& tables2xlsx("NorthWind.xlsx", [$t_category, $t_product]); .Ve .SS "Interface to Graphics Package" .IX Subsection "Interface to Graphics Package" .Vb 1 \& use GD::Graph::points; \& \& $graph = GD::Graph::points\->new(400, 300); \& $t2 = $t\->match(\*(Aq$_\->[1] > 20 && $_\->[3] < 35.7\*(Aq); \& my $gd = $graph\->plot($t\->colRefs([0,2])); \& open(IMG, \*(Aq>mygraph.png\*(Aq) or die $!; \& binmode IMG; \& print IMG $gd\->png; \& close IMG; .Ve .SH "AUTHOR" .IX Header "AUTHOR" Copyright 1998\-2008, Yingyao Zhou & Guangzhou Zou. All rights reserved. .PP It was first written by Zhou in 1998, significantly improved and maintained by Zou since 1999. The authors thank Tong Peng and Yongchuang Tao for valuable suggestions. We also thank those who kindly reported bugs, some of them are acknowledged in the \*(L"Changes\*(R" file. .PP This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself. .PP Please send bug reports and comments to: easydatabase at gmail dot com. When sending bug reports, please provide the version of Table.pm, the version of Perl. .SH "SEE ALSO" .IX Header "SEE ALSO" .Vb 1 \& DBI, GD::Graph, Data::Table::Excel. .Ve