From 00e5c66a78483b00ecd67b0270dc90d9f7465c68 Mon Sep 17 00:00:00 2001 From: Robert Soulliere Date: Wed, 16 Feb 2011 13:01:40 -0500 Subject: [PATCH 1/1] Fix SQL in migrating data. Bug https://bugs.launchpad.net/evergreen/+bug/720068 reported by Demian Katz. --- 1.6/admin/migratingdata_1.6.xml | 59 ++++++++++++++------------- 2.0/admin/Upgrading-Evergreen-2.0.xml | 55 ++++++++++++------------- 2.0/admin/migratingdata_2.0.xml | 49 +++++++++++----------- 3 files changed, 82 insertions(+), 81 deletions(-) diff --git a/1.6/admin/migratingdata_1.6.xml b/1.6/admin/migratingdata_1.6.xml index 305f916f9f..cb665ae48b 100644 --- a/1.6/admin/migratingdata_1.6.xml +++ b/1.6/admin/migratingdata_1.6.xml @@ -308,7 +308,7 @@ extract_holdings --marcfile=clean.marc.xml --holding 999 --copyid 999i --map hol The option holdings.map refers to a file to be used for mapping subfields to the holdings data you would like extracted. Here is an example based on mapping holdings data to the 999 data field: TRUNCATE TABLE staging_items; -INSERT INTO staging_items (egid, hseq, l_call_num, l_barcode, +INSERT INTO staging_items (egid, hseq, l_callnum, l_barcode, l_location, l_owning_lib, l_circ_modifier FROM stdin; 40 0 HD3616.K853 U54 1997 30731100751928 STACKS FENNELL BOOK 41 1 HV6548.C3 S984 1998 30731100826613 STACKS FENNELL BOOK @@ -356,14 +356,14 @@ l_location, l_owning_lib, l_circ_modifier FROM stdin; Create a staging_items staging table to hold the holdings data: CREATE TABLE staging_items ( - callnum text, -- call number label + l_callnum text, -- call number label hseq int, -- call number label egid int, -- biblio.record_entry_id createdate date, - location text, - barcode text, - item_type text, - owning_lib text -- actor.org_unit.shortname + l_location text, + l_barcode text, + l_circ_modifier text, + l_owning_lib text -- actor.org_unit.shortname ); @@ -380,31 +380,31 @@ CREATE TABLE staging_items ( Generate shelving locations from your staging table. INSERT INTO asset.copy_location (name, owning_lib) -SELECT DISTINCT l.location, ou.id +SELECT DISTINCT l.l_location, ou.id FROM staging_items l - JOIN actor.org_unit ou ON (l.owning_lib = ou.shortname); + JOIN actor.org_unit ou ON (l.l_owning_lib = ou.shortname); Generate circulation modifiers from your staging table. INSERT INTO config.circ_modifier (code, name, description, sip2_media_type, magnetic_media) - SELECT DISTINCT item_type AS code, - item_type AS name, - LOWER(item_type) AS description, + SELECT DISTINCT l_circ_modifier AS code, + l_circ_modifier AS name, + LOWER(l_circ_modifier) AS description, '001' AS sip2_media_type, FALSE AS magnetic_media FROM staging_items - WHERE item_type NOT IN (SELECT code FROM config.circ_modifier); + WHERE l_circ_modifier NOT IN (SELECT code FROM config.circ_modifier); Generate call numbers from your staging table: INSERT INTO asset.call_number (creator,editor,record,label,owning_lib) -SELECT DISTINCT 1, 1, b.id, l.callnum, ou.id -FROM staging.staging_items l -JOIN actor.org_unit ou ON (l.owning_lib = ou.shortname); + SELECT DISTINCT 1, 1, l.egid, l.l_callnum, ou.id + FROM staging_items l + JOIN actor.org_unit ou ON (l.l_owning_lib = ou.shortname); @@ -417,27 +417,30 @@ STATUS, location, loan_duration, fine_level, circ_modifier, deposit, ref, call_n SELECT DISTINCT ou.id AS circ_lib, 1 AS creator, 1 AS editor, - l.createdate AS create_date, - l.barcode AS barcode, + l.l_createdate AS create_date, + l.l_barcode AS barcode, 0 AS STATUS, cl.id AS location, 2 AS loan_duration, 2 AS fine_level, - l.item_type AS circ_modifier, + l.l_circ_modifier AS circ_modifier, FALSE AS deposit, CASE - WHEN l.item_type = 'REFERENCE' THEN TRUE + WHEN l.l_circ_modifier = 'REFERENCE' THEN TRUE ELSE FALSE END AS ref, cn.id AS call_number - FROM staging.staging_items l - JOIN actor.org_unit ou - ON (l.owning_lib = ou.shortname) - JOIN asset.copy_location cl - ON (ou.id = cl.owning_lib AND l.location = cl.name) - JOIN asset.call_number cn - ON (ou.id = cn.owning_lib - AND l.callnum = cn.label); + FROM staging_items l + JOIN actor.org_unit ou + ON (l.owning_lib = ou.shortname) + JOIN asset.copy_location cl + ON (ou.id = cl.owning_lib AND l.l_location = cl.name) + JOIN metabib.real_full_rec m + ON (m.value = l.egid) + JOIN asset.call_number cn + ON (ou.id = cn.owning_lib + AND m.record = cn.record + AND l.l_callnum = cn.label) You should now have copies in your Evergreen database and should be able to search and find the bibliographic records with attached copies. diff --git a/2.0/admin/Upgrading-Evergreen-2.0.xml b/2.0/admin/Upgrading-Evergreen-2.0.xml index 284723e344..68c9f69dc8 100644 --- a/2.0/admin/Upgrading-Evergreen-2.0.xml +++ b/2.0/admin/Upgrading-Evergreen-2.0.xml @@ -8,9 +8,18 @@ to 2.0, including steps to upgrade OpenSRF. Before upgrading, it is important to carefully plan an upgrade strategy to minimize system downtime and service interruptions. All of the steps in this chapter are to be completed from the command line. + - In the following instructions, you are asked to perform certain steps as either the root or opensrf user. + Evergreen 2.0 has several software requirements: + + PostgreSQL: Version 8.4 is the minimum supported version of PostgreSQL. + Linux: Evergreen 2.0 has been tested on Debian Squeeze (6.0) and Ubuntu Lucid Lynx (10.04). If you are runnung an older version of these distributions, + you may want to upgrade befor installing Evergreen 2.0. For instructions om upgrading these distribuitions, visit the + Debian or Ubuntu websites. + + In the following instructions, you are asked to perform certain steps as either the root or + opensrf user. Debian: To become the root user, issue the su command and enter the password of the root user. @@ -67,27 +76,21 @@ cd /home/opensrf/OpenSRF-1.6.2 Replace below with the following value for your distribution: - - - for Debian Lenny (5.0) - for Debian Squeeze (6.0) LinuxDebian - - for Ubuntu Hardy Heron (8.04) - LinuxUbuntu - for Ubuntu Lucid Lynx - (10.04) + (10.04)LinuxUbuntu - for CentOS 5 + for CentOS 5 + LinuxCentOS - for Red Hat Enterprise Linux 5 + for Red Hat Enterprise Linux 5 + LinuxRed Hat @@ -151,13 +154,15 @@ srfsh#
Upgrade <application>Evergreen</application> from 1.6.1 to 2.0 + 8.4 is the minimum supported version of PostgreSQL. Evergreen 2.0 has been tested on Debian Squeeze (6.0) and Ubuntu Lucid (10.04). If you are runnung an older version of + these distributions, you may want to upgrade befor installing Evergreen 2.0. For instructions om upgrading these distribuitions, visit the + Debian or Ubuntu websites. As the opensrf user, download and extract Evergreen 2.0 -wget http://www.open-ils.org/downloads/exit -uEvergreen-ILS-2.0.1.tar.gz +wget http://www.open-ils.org/downloads/Evergreen-ILS-2.0.1.tar.gz tar xzf Evergreen-ILS-2.0.1.tar.gz For the latest edition of Evergreen 2.0, check the Evergreen download page at @@ -203,26 +208,16 @@ tar xzf Evergreen-ILS-2.0.1.tar.gz opensrf user and group: chown -R opensrf:opensrf /openils - - As the root user, build live-db-setup.pl for the cgi-bin - bootstrapping scripts and offline-config.pl for the offline staff client data uploader: - -cd /home/opensrf/Evergreen-ILS-2.0.1 -perl Open-ILS/src/support-scripts/eg_db_config.pl --create-bootstrap --create-offline \ ---user evergreen --password evergreen --hostname localhost --port 5432 \ ---database evergreen - - As the opensrf user, update server symlink in /openils/var/web/xul/: cd /openils/var/web/xul/ rm server -ln -s rel_1_6_1_5/server +ln -s rel_2_0_1/server - Update the Evergreen database: + Update the evergreen database: It is recommended that you back up your Evergreen database in order to restore your data if anything goes wrong. @@ -232,9 +227,9 @@ psql -U evergreen -h localhost -f Open-ILS/src/sql/Pg/2.0.0-2.0.1-upgrade-db.sql - Run the reingest-1.6-2.0.pl script to generate an sql script. Then use the sql file to reingest bib records into your - evergreen database. This is required to make the new facet sidebar in OPAC search results work and to upgrade the keyword indexes to use - the revised NACO normalization routine + Run the reingest-1.6-2.0.pl script to generate an sql script. Then use the sql file to reingest bib records into your + evergreen database. This is required to make the new facet sidebar in OPAC search results work and to upgrade the keyword indexes to use + the revised NACO normalization routine. If you are running a large Evergreen installation, it is recommend that you examine the script first. Reingesting a large number of bibliographic records may take several hours. perl Open-ILS/src/sql/Pg/reingest-1.6-2.0.pl @@ -258,7 +253,7 @@ psql -U evergreen -h localhost -f Open-ILS/src/sql/Pg/2.0.0-2.0.1-upgrade-db.sql Update opensrf.xml with the database connection info: -perl Open-ILS/src/support-scripts/eg_db_config.pl --update-config --service all --user evergreen \ +perl Open-ILS/src/support-scripts/eg_db_config.pl --update-config --service all --create-offline --user evergreen \ --password evergreen --hostname localhost --port 5432 --database evergreen diff --git a/2.0/admin/migratingdata_2.0.xml b/2.0/admin/migratingdata_2.0.xml index f1121f0008..8cd94c3e63 100644 --- a/2.0/admin/migratingdata_2.0.xml +++ b/2.0/admin/migratingdata_2.0.xml @@ -327,7 +327,7 @@ BEGIN; TRUNCATE TABLE staging_items; -INSERT INTO staging_items (egid, hseq, l_call_num, l_barcode, l_location, +INSERT INTO staging_items (egid, hseq, l_callnum, l_barcode, l_location, l_owning_lib, l_circ_modifier FROM stdin; 40 0 HD3616.K853 U54 1997 30731100751928 STACKS FENNELL BOOK 41 1 HV6548.C3 S984 1998 30731100826613 STACKS FENNELL BOOK @@ -351,14 +351,14 @@ l_owning_lib, l_circ_modifier FROM stdin; Create a staging_items staging table to hold the holdings data: CREATE TABLE staging_items ( - callnum text, -- call number label + l_callnum text, -- call number label hseq int, -- call number label egid int, -- biblio.record_entry_id createdate date, - location text, - barcode text, - item_type text, - owning_lib text -- actor.org_unit.shortname + l_location text, + l_barcode text, + l_circ_modifier text, + l_owning_lib text -- actor.org_unit.shortname ); @@ -384,20 +384,20 @@ FROM staging_items l Generate circulation modifiers from your staging table. INSERT INTO config.circ_modifier (code, name, description, sip2_media_type, magnetic_media) - SELECT DISTINCT item_type AS code, - item_type AS name, - LOWER(item_type) AS description, + SELECT DISTINCT l_circ_modifier AS code, + l_circ_modifier AS name, + LOWER(l_circ_modifier) AS description, '001' AS sip2_media_type, FALSE AS magnetic_media FROM staging_items - WHERE item_type NOT IN (SELECT code FROM config.circ_modifier); + WHERE l_circ_modifier NOT IN (SELECT code FROM config.circ_modifier); Generate call numbers from your staging table: INSERT INTO asset.call_number (creator,editor,record,label,owning_lib) -SELECT DISTINCT 1, 1, b.id, l.callnum, ou.id +SELECT DISTINCT 1, 1, egid, l.callnum, ou.id FROM staging.staging_items l JOIN actor.org_unit ou ON (l.owning_lib = ou.shortname); @@ -412,27 +412,30 @@ STATUS, location, loan_duration, fine_level, circ_modifier, deposit, ref, call_n SELECT DISTINCT ou.id AS circ_lib, 1 AS creator, 1 AS editor, - l.createdate AS create_date, - l.barcode AS barcode, + l.l_createdate AS create_date, + l.l_barcode AS barcode, 0 AS STATUS, cl.id AS location, 2 AS loan_duration, 2 AS fine_level, - l.item_type AS circ_modifier, + l.l_circ_modifier AS circ_modifier, FALSE AS deposit, CASE - WHEN l.item_type = 'REFERENCE' THEN TRUE + WHEN l.l_circ_modifier = 'REFERENCE' THEN TRUE ELSE FALSE END AS ref, cn.id AS call_number - FROM staging.staging_items l - JOIN actor.org_unit ou - ON (l.owning_lib = ou.shortname) - JOIN asset.copy_location cl - ON (ou.id = cl.owning_lib AND l.location = cl.name) - JOIN asset.call_number cn - ON (ou.id = cn.owning_lib - AND l.callnum = cn.label); + FROM staging_items l + JOIN actor.org_unit ou + ON (l.owning_lib = ou.shortname) + JOIN asset.copy_location cl + ON (ou.id = cl.owning_lib AND l.l_location = cl.name) + JOIN metabib.real_full_rec m + ON (m.value = l.egid) + JOIN asset.call_number cn + ON (ou.id = cn.owning_lib + AND m.record = cn.record + AND l.l_callnum = cn.label) You should now have copies in your Evergreen database and should be able to search and find the bibliographic records with attached copies. -- 2.43.2