Docs: incorporating offline circ docs
authorJane Sandberg <sandbej@linnbenton.edu>
Thu, 9 Nov 2017 16:24:59 +0000 (10:24 -0600)
committerJane Sandberg <sandbej@linnbenton.edu>
Thu, 9 Nov 2017 16:24:59 +0000 (10:24 -0600)
Signed-off-by: Jane Sandberg <sandbej@linnbenton.edu>
288 files changed:
Open-ILS/examples/fm_IDL.xml
Open-ILS/src/c-apps/oils_auth.c
Open-ILS/src/c-apps/oils_sql.c
Open-ILS/src/perlmods/lib/OpenILS/Application/AppUtils.pm
Open-ILS/src/perlmods/lib/OpenILS/Application/Circ.pm
Open-ILS/src/perlmods/lib/OpenILS/Application/Search/Biblio.pm
Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Driver/Pg/QueryParser.pm
Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Publisher/metabib.pm
Open-ILS/src/perlmods/lib/OpenILS/WWW/EGCatLoader/Record.pm
Open-ILS/src/sql/Pg/002.schema.config.sql
Open-ILS/src/sql/Pg/010.schema.biblio.sql
Open-ILS/src/sql/Pg/011.schema.authority.sql
Open-ILS/src/sql/Pg/040.schema.asset.sql
Open-ILS/src/sql/Pg/070.schema.container.sql
Open-ILS/src/sql/Pg/090.schema.action.sql
Open-ILS/src/sql/Pg/210.schema.serials.sql
Open-ILS/src/sql/Pg/300.schema.staged_search.sql
Open-ILS/src/sql/Pg/800.fkeys.sql
Open-ILS/src/sql/Pg/999.functions.global.sql
Open-ILS/src/sql/Pg/build-db.sh
Open-ILS/src/sql/Pg/t/lp1145213_test_func_asset.merge_record_assets.pg [new file with mode: 0644]
Open-ILS/src/sql/Pg/upgrade/1078.data.add_possibly_missing_billing_types.sql [new file with mode: 0644]
Open-ILS/src/sql/Pg/upgrade/1079.schema.fix_asset_merge.sql [new file with mode: 0644]
Open-ILS/src/sql/Pg/upgrade/1080.schema.lp1724246_cache_copy_visibility.sql [new file with mode: 0644]
Open-ILS/src/sql/Pg/upgrade/1081.schema.lp1724223-re-inherited.sql [new file with mode: 0644]
Open-ILS/src/sql/Pg/upgrade/1082.data.lp1731281-acvac-cleanup.sql [new file with mode: 0644]
Open-ILS/src/sql/Pg/version-upgrade/2.12.5-3.0-beta1-upgrade-db.sql [deleted file]
Open-ILS/src/sql/Pg/version-upgrade/2.12.6-2.12.7-upgrade-db.sql [new file with mode: 0644]
Open-ILS/src/sql/Pg/version-upgrade/2.12.6-3.0.0-upgrade-db.sql [new file with mode: 0644]
Open-ILS/src/sql/Pg/version-upgrade/3.0.0-3.0.1-upgrade-db.sql [new file with mode: 0644]
Open-ILS/src/support-scripts/sitemap_generator
Open-ILS/src/templates/opac/parts/place_hold.tt2
Open-ILS/src/templates/opac/parts/record/copy_table.tt2
Open-ILS/src/templates/opac/parts/result/table.tt2
Open-ILS/src/templates/staff/base.tt2
Open-ILS/src/templates/staff/base_js.tt2
Open-ILS/src/templates/staff/cat/bucket/copy/t_pending.tt2
Open-ILS/src/templates/staff/cat/bucket/copy/t_view.tt2
Open-ILS/src/templates/staff/cat/catalog/index.tt2
Open-ILS/src/templates/staff/cat/catalog/t_holds.tt2
Open-ILS/src/templates/staff/cat/share/z3950_strings.tt2
Open-ILS/src/templates/staff/cat/volcopy/t_attr_edit.tt2
Open-ILS/src/templates/staff/cat/volcopy/t_defaults.tt2
Open-ILS/src/templates/staff/cat/volcopy/t_edit.tt2
Open-ILS/src/templates/staff/circ/checkin/index.tt2
Open-ILS/src/templates/staff/circ/checkin/t_checkin_table.tt2
Open-ILS/src/templates/staff/circ/holds/index.tt2
Open-ILS/src/templates/staff/circ/holds/t_shelf_list.tt2
Open-ILS/src/templates/staff/circ/in_house_use/index.tt2
Open-ILS/src/templates/staff/circ/patron/index.tt2
Open-ILS/src/templates/staff/circ/patron/t_bill_history_xacts.tt2
Open-ILS/src/templates/staff/circ/patron/t_checkout.tt2
Open-ILS/src/templates/staff/circ/patron/t_edit.tt2
Open-ILS/src/templates/staff/circ/patron/t_holds_list.tt2
Open-ILS/src/templates/staff/circ/patron/t_items_out.tt2
Open-ILS/src/templates/staff/circ/patron/t_messages.tt2
Open-ILS/src/templates/staff/circ/patron/t_summary.tt2
Open-ILS/src/templates/staff/circ/patron/t_surveys.tt2
Open-ILS/src/templates/staff/circ/patron/t_xact_details.tt2
Open-ILS/src/templates/staff/circ/renew/t_renew.tt2
Open-ILS/src/templates/staff/circ/share/t_hold_shelf_dialog.tt2
Open-ILS/src/templates/staff/circ/share/t_mark_damaged.tt2 [new file with mode: 0644]
Open-ILS/src/templates/staff/circ/share/t_transit_dialog.tt2
Open-ILS/src/templates/staff/circ/transits/t_list.tt2
Open-ILS/src/templates/staff/css/style.css.tt2
Open-ILS/src/templates/staff/navbar.tt2
Open-ILS/src/templates/staff/share/print_templates/t_checkout.tt2
Open-ILS/src/templates/staff/share/print_templates/t_grid_html.tt2 [new file with mode: 0644]
Open-ILS/src/templates/staff/share/print_templates/t_hold_shelf_slip.tt2
Open-ILS/src/templates/staff/share/t_autogrid.tt2
Open-ILS/src/templates/staff/t_about.tt2 [new file with mode: 0644]
Open-ILS/src/templates/vandelay/vandelay.tt2
Open-ILS/tests/datasets/sql/assets_extras.sql
Open-ILS/tests/datasets/sql/load_all.sql
Open-ILS/tests/datasets/sql/surveys.sql [new file with mode: 0644]
Open-ILS/web/js/ui/default/opac/copyloc.js
Open-ILS/web/js/ui/default/staff/acq/app.js
Open-ILS/web/js/ui/default/staff/admin/acq/app.js
Open-ILS/web/js/ui/default/staff/admin/actor/app.js
Open-ILS/web/js/ui/default/staff/admin/actor/org_unit/app.js
Open-ILS/web/js/ui/default/staff/admin/local/app.js
Open-ILS/web/js/ui/default/staff/admin/local/asset/copy_tag.js
Open-ILS/web/js/ui/default/staff/admin/local/rating/badge.js
Open-ILS/web/js/ui/default/staff/admin/serials/app.js
Open-ILS/web/js/ui/default/staff/admin/serials/pattern_template.js
Open-ILS/web/js/ui/default/staff/admin/server/app.js
Open-ILS/web/js/ui/default/staff/admin/server/authority/heading_field.js
Open-ILS/web/js/ui/default/staff/admin/server/config/copy_tag_type.js
Open-ILS/web/js/ui/default/staff/admin/server/config/marc_field.js
Open-ILS/web/js/ui/default/staff/admin/user_perms.js
Open-ILS/web/js/ui/default/staff/admin/workstation/app.js
Open-ILS/web/js/ui/default/staff/admin/workstation/log.js
Open-ILS/web/js/ui/default/staff/app.js
Open-ILS/web/js/ui/default/staff/cat/bucket/copy/app.js
Open-ILS/web/js/ui/default/staff/cat/bucket/record/app.js
Open-ILS/web/js/ui/default/staff/cat/catalog/app.js
Open-ILS/web/js/ui/default/staff/cat/item/app.js
Open-ILS/web/js/ui/default/staff/cat/printlabels/app.js
Open-ILS/web/js/ui/default/staff/cat/services/holdings.js
Open-ILS/web/js/ui/default/staff/cat/services/marcedit.js
Open-ILS/web/js/ui/default/staff/cat/volcopy/app.js
Open-ILS/web/js/ui/default/staff/cat/z3950/app.js
Open-ILS/web/js/ui/default/staff/circ/checkin/app.js
Open-ILS/web/js/ui/default/staff/circ/in_house_use/app.js
Open-ILS/web/js/ui/default/staff/circ/patron/app.js
Open-ILS/web/js/ui/default/staff/circ/patron/bills.js
Open-ILS/web/js/ui/default/staff/circ/patron/bucket/app.js
Open-ILS/web/js/ui/default/staff/circ/patron/checkout.js
Open-ILS/web/js/ui/default/staff/circ/patron/items_out.js
Open-ILS/web/js/ui/default/staff/circ/patron/pending.js
Open-ILS/web/js/ui/default/staff/circ/patron/regctl.js
Open-ILS/web/js/ui/default/staff/circ/patron/register.js
Open-ILS/web/js/ui/default/staff/circ/renew/app.js
Open-ILS/web/js/ui/default/staff/circ/services/billing.js
Open-ILS/web/js/ui/default/staff/circ/services/circ.js
Open-ILS/web/js/ui/default/staff/circ/services/holds.js
Open-ILS/web/js/ui/default/staff/circ/services/item.js
Open-ILS/web/js/ui/default/staff/circ/services/patrons.js
Open-ILS/web/js/ui/default/staff/circ/services/transits.js
Open-ILS/web/js/ui/default/staff/circ/transits/list.js
Open-ILS/web/js/ui/default/staff/offline.js
Open-ILS/web/js/ui/default/staff/reporter/template/app.js
Open-ILS/web/js/ui/default/staff/serials/app.js
Open-ILS/web/js/ui/default/staff/serials/directives/mfhd_manager.js
Open-ILS/web/js/ui/default/staff/serials/directives/prediction_manager.js
Open-ILS/web/js/ui/default/staff/serials/directives/subscription_manager.js
Open-ILS/web/js/ui/default/staff/serials/services/core.js
Open-ILS/web/js/ui/default/staff/services/auth.js
Open-ILS/web/js/ui/default/staff/services/fm_record_editor.js
Open-ILS/web/js/ui/default/staff/services/grid.js
Open-ILS/web/js/ui/default/staff/services/mfhd.js
Open-ILS/web/js/ui/default/staff/services/op_change.js
Open-ILS/web/js/ui/default/staff/services/startup.js
Open-ILS/web/js/ui/default/staff/services/strings.js
Open-ILS/web/js/ui/default/staff/services/ui.js
Open-ILS/web/js/ui/default/vandelay/vandelay.js
build/i18n/po/AutoFieldWidget.js/cs-CZ.po
build/i18n/po/PCrudFilterPane.js/cs-CZ.po
build/i18n/po/URLVerify.js/cs-CZ.po
build/i18n/po/acq.js/cs-CZ.po
build/i18n/po/acq/cs-CZ.po
build/i18n/po/actor/cs-CZ.po
build/i18n/po/admin.properties/cs-CZ.po
build/i18n/po/authority.js/cs-CZ.po
build/i18n/po/cat.properties/ar-JO.po
build/i18n/po/cat.properties/cs-CZ.po
build/i18n/po/cat/cs-CZ.po
build/i18n/po/circ.properties/ar-JO.po
build/i18n/po/circ.properties/cs-CZ.po
build/i18n/po/circ/cs-CZ.po
build/i18n/po/common.properties/cs-CZ.po
build/i18n/po/conify.js/cs-CZ.po
build/i18n/po/conify/cs-CZ.po
build/i18n/po/db.seed/ar-JO.po
build/i18n/po/db.seed/cs-CZ.po
build/i18n/po/fm_IDL.dtd/ar-JO.po
build/i18n/po/fm_IDL.dtd/cs-CZ.po
build/i18n/po/fm_IDL.dtd/fm_IDL.dtd.pot
build/i18n/po/ils_events.xml/ar-JO.po
build/i18n/po/ils_events.xml/cs-CZ.po
build/i18n/po/kpac/cs-CZ.po
build/i18n/po/lang.dtd/ar-JO.po
build/i18n/po/lang.dtd/cs-CZ.po
build/i18n/po/match_set.js/cs-CZ.po
build/i18n/po/offline.properties/cs-CZ.po
build/i18n/po/opac.dtd/cs-CZ.po
build/i18n/po/opac.js/cs-CZ.po
build/i18n/po/patron.properties/cs-CZ.po
build/i18n/po/reports.dtd/ar-JO.po
build/i18n/po/reports.dtd/cs-CZ.po
build/i18n/po/reports.js/cs-CZ.po
build/i18n/po/reservation.js/cs-CZ.po
build/i18n/po/serial.properties/cs-CZ.po
build/i18n/po/serial/cs-CZ.po
build/i18n/po/tpac/ar-JO.po
build/i18n/po/tpac/cs-CZ.po
build/i18n/po/tpac/de-DE.po
build/i18n/po/tpac/en-CA.po
build/i18n/po/tpac/en-GB.po
build/i18n/po/tpac/es-ES.po
build/i18n/po/tpac/fi-FI.po
build/i18n/po/tpac/fr-CA.po
build/i18n/po/tpac/he-IL.po
build/i18n/po/tpac/hu-HU.po
build/i18n/po/tpac/hy-AM.po
build/i18n/po/tpac/oc-FR.po
build/i18n/po/tpac/pt-BR.po
build/i18n/po/tpac/ru-RU.po
build/i18n/po/tpac/sv-SE.po
build/i18n/po/tpac/tr-TR.po
build/i18n/po/urlverify/cs-CZ.po
build/i18n/po/vandelay.dtd/cs-CZ.po
build/i18n/po/webstaff/ar-JO.po
build/i18n/po/webstaff/cs-CZ.po
build/i18n/po/webstaff/es-ES.po
build/i18n/po/webstaff/hy-AM.po
build/i18n/po/webstaff/ru-RU.po
build/i18n/po/webstaff/webstaff.pot
docs/RELEASE_NOTES_2_12.adoc
docs/RELEASE_NOTES_3_0.adoc
docs/RELEASE_NOTES_NEXT/Architecture/Sample_Data_Includes_Surveys.adoc [new file with mode: 0644]
docs/RELEASE_NOTES_NEXT/Circulation/Patron_Email_Addresses_Now_Clickable.adoc [new file with mode: 0644]
docs/RELEASE_NOTES_NEXT/OPAC/copy_location_filter_changes.adoc [new file with mode: 0644]
docs/acquisitions/selection_lists_po.adoc
docs/acquisitions/vandelay_acquisitions_integration.adoc
docs/admin/Best_Hold_Selection_Sort_Order.adoc
docs/admin/MARC_Import_Remove_Fields.adoc
docs/admin/MARC_RAD_MVF_CRA.adoc
docs/admin/Org_Unit_Proximity_Adjustments.adoc
docs/admin/SMS_messaging.adoc
docs/admin/acquisitions_admin.adoc
docs/admin/actiontriggers.adoc
docs/admin/age_hold_protection.adoc
docs/admin/aged_circs.adoc
docs/admin/authentication_proxy.adoc
docs/admin/authorities.adoc
docs/admin/auto_suggest_search.adoc
docs/admin/backups.adoc [new file with mode: 0644]
docs/admin/booking-admin.adoc
docs/admin/circulation_limit_groups.adoc
docs/admin/closed_dates.adoc [new file with mode: 0644]
docs/admin/cn_prefixes_and_suffixes.adoc
docs/admin/copy_locations.adoc [new file with mode: 0644]
docs/admin/ebook_api.adoc
docs/admin/ebook_api_service.adoc [new file with mode: 0644]
docs/admin/hold_driven_recalls.adoc
docs/admin/hold_targeter_service.adoc [new file with mode: 0644]
docs/admin/hours.adoc [new file with mode: 0644]
docs/admin/lsa-address_alert.adoc
docs/admin/lsa-barcode_completion.adoc
docs/admin/lsa-statcat.adoc
docs/admin/lsa-work_log.adoc
docs/admin/popularity_badges_web_client.adoc
docs/admin/qstore_service.adoc [new file with mode: 0644]
docs/admin/recent_staff_searches.adoc
docs/admin/restrict_Z39.50_sources_by_perm_group.adoc
docs/admin/search_settings_web_client.adoc [new file with mode: 0644]
docs/admin/security.adoc [new file with mode: 0644]
docs/admin/staff_client-button_bar_toolbar.adoc
docs/admin/staff_client-recent_searches.adoc
docs/admin/user_activity_type.adoc
docs/admin/web-client-browser-best-practices.adoc [new file with mode: 0644]
docs/admin/web_client-browser-tab-shortcuts.adoc [deleted file]
docs/admin/web_client-login.adoc
docs/admin/workstation_admin.adoc
docs/admin/workstation_admin_receipt_template_editor.adoc
docs/admin_initial_setup/borrowing_items.adoc
docs/admin_initial_setup/describing_your_organization.adoc
docs/admin_initial_setup/designing_your_catalog.adoc
docs/admin_initial_setup/hard_due_dates.adoc
docs/admin_initial_setup/ordering_materials.adoc
docs/cataloging/MARC_batch_edit.adoc
docs/cataloging/cataloging_electronic_resources.adoc
docs/cataloging/copy-buckets_web_client.adoc
docs/cataloging/holdings_templates.adoc [new file with mode: 0644]
docs/cataloging/record_buckets.adoc [new file with mode: 0755]
docs/cataloging/specific_variable_fields.adoc [new file with mode: 0644]
docs/cataloging/z39.50_search_enhancements.adoc
docs/circulation/advanced_holds.adoc
docs/circulation/circulating_items.adoc
docs/circulation/circulating_items_web_client.adoc
docs/circulation/circulation_patron_records.adoc
docs/circulation/circulation_patron_records_web_client.adoc
docs/circulation/user_buckets.adoc [new file with mode: 0644]
docs/development/support_scripts.adoc
docs/installation/edi_setup.adoc
docs/media/booking-create-resourcetype_webclient-1.png [new file with mode: 0644]
docs/media/web_client_workstation_registration.png [new file with mode: 0644]
docs/opac/catalog_browse.adoc
docs/opac/ebook_transactions.adoc [deleted file]
docs/opac/linked_libraries.adoc
docs/opac/my_account.adoc [new file with mode: 0644]
docs/opac/search_url.adoc [new file with mode: 0644]
docs/opac/using_the_public_access_catalog.adoc
docs/reports/reporter_view_output.adoc
docs/root.adoc
docs/root_acquisitions.adoc
docs/root_cataloging.adoc
docs/root_circulation.adoc
docs/root_command_line_admin.adoc
docs/root_opac.adoc
docs/root_serials.adoc
docs/root_staff_client_admin.adoc
docs/serials/B-copy_template.adoc
docs/serials/D-subscription-ASCV.adoc
docs/serials/F-Receiving.adoc
docs/serials/Group_Serials_Issues_in_the_OPAC_2.2.adoc
docs/shared/workstation_settings.adoc [new file with mode: 0644]

index 4e4e969..535cee6 100644 (file)
@@ -4031,13 +4031,13 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
 
        <class id="csp" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="config::standing_penalty" oils_persist:tablename="config.standing_penalty" reporter:label="Standing Penalty">
                <fields oils_persist:primary="id" oils_persist:sequence="config.standing_penalty_id_seq">
-                       <field name="id" reporter:selector="name" reporter:datatype="id"/>
-                       <field name="name"  reporter:datatype="text"/>
-                       <field name="label"  reporter:datatype="text" oils_persist:i18n="true"/>
-                       <field name="block_list" reporter:datatype="text"/>
-                       <field name="staff_alert" reporter:datatype="bool"/>
-                       <field name="org_depth" reporter:datatype="int"/>
-                       <field name="ignore_proximity" reporter:datatype="int"/>
+                       <field reporter:label="Penalty ID" name="id" reporter:selector="name" reporter:datatype="id"/>
+                       <field reporter:label="Name" name="name"  reporter:datatype="text"/>
+                       <field reporter:label="Label" name="label"  reporter:datatype="text" oils_persist:i18n="true"/>
+                       <field reporter:label="Block List" name="block_list" reporter:datatype="text"/>
+                       <field reporter:label="Staff Alert" name="staff_alert" reporter:datatype="bool"/>
+                       <field reporter:label="Org Depth" name="org_depth" reporter:datatype="int"/>
+                       <field reporter:label="Ignore Proximity" name="ignore_proximity" reporter:datatype="int"/>
                </fields>
                <links/>
         <permacrud xmlns="http://open-ils.org/spec/opensrf/IDL/permacrud/v1">
@@ -7594,7 +7594,7 @@ SELECT  usr,
                        </actions>
                </permacrud>
        </class>
-       <class id="mbp" controller="open-ils.cstore" oils_obj:fieldmapper="money::bnm_payment" oils_persist:tablename="money.bnm_payment_view" reporter:core="true" reporter:label="Payments: Brick-and-mortar">
+       <class id="mbp" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="money::bnm_payment" oils_persist:tablename="money.bnm_payment_view" reporter:core="true" reporter:label="Payments: Brick-and-mortar">
                <fields oils_persist:primary="id" oils_persist:sequence="">
                        <field reporter:label="Amount" name="amount" reporter:datatype="money" />
                        <field reporter:label="Payment ID" name="id" reporter:datatype="id" />
@@ -7625,6 +7625,13 @@ SELECT  usr,
                        <link field="xact" reltype="has_a" key="id" map="" class="mbt"/>
                        <link field="accepting_usr" reltype="has_a" key="id" map="" class="au"/>
                </links>
+               <permacrud xmlns="http://open-ils.org/spec/opensrf/IDL/permacrud/v1">
+                       <actions>
+                               <retrieve permission="VIEW_USER_TRANSACTIONS">
+                                       <context link="xact" jump="usr" field="home_ou"/>
+                               </retrieve>
+                       </actions>
+               </permacrud>
        </class>
        <class id="mndp" controller="open-ils.reporter" oils_obj:fieldmapper="money::non_drawer_payment" oils_persist:tablename="money.non_drawer_payment_view" reporter:core="true" reporter:label="Payments: Non-drawer Staff">
                <fields oils_persist:primary="id" oils_persist:sequence="">
index 4afb36c..b87d1ca 100644 (file)
@@ -98,10 +98,13 @@ int osrfAppInitialize() {
                MODULENAME,
                "open-ils.auth.session.retrieve",
                "oilsAuthSessionRetrieve",
-               "Pass in the auth token and this retrieves the user object.  The auth "
-               "timeout is reset when this call is made "
+               "Pass in the auth token and this retrieves the user object.  By "
+               "default, the auth timeout is reset when this call is made.  If "
+               "a second non-zero parameter is passed, the auth timeout info is "
+               "returned to the caller along with the user object.  If a 3rd "
+               "non-zero parameter is passed, the auth timeout will not be reset."
                "Returns the user object (password blanked) for the given login session "
-               "PARAMS( authToken )", 1, 0 );
+               "PARAMS( authToken[, returnTime[, doNotResetSession]] )", 1, 0 );
 
        osrfAppRegisterMethod(
                MODULENAME,
@@ -1206,6 +1209,7 @@ int oilsAuthResetTimeout( osrfMethodContext* ctx ) {
 int oilsAuthSessionRetrieve( osrfMethodContext* ctx ) {
        OSRF_METHOD_VERIFY_CONTEXT(ctx);
     bool returnFull = false;
+    bool noTimeoutReset = false;
 
        const char* authToken = jsonObjectGetString( jsonObjectGetIndex(ctx->params, 0));
 
@@ -1214,6 +1218,14 @@ int oilsAuthSessionRetrieve( osrfMethodContext* ctx ) {
         const char* rt = jsonObjectGetString(jsonObjectGetIndex(ctx->params, 1));
         if(rt && strcmp(rt, "0") != 0) 
             returnFull = true;
+
+        if (ctx->params->size > 2) {
+            // Avoid resetting the auth session timeout.
+            const char* noReset = 
+                jsonObjectGetString(jsonObjectGetIndex(ctx->params, 2));
+            if (noReset && strcmp(noReset, "0") != 0) 
+                noTimeoutReset = true;
+        }
     }
 
        jsonObject* cacheObj = NULL;
@@ -1222,7 +1234,8 @@ int oilsAuthSessionRetrieve( osrfMethodContext* ctx ) {
        if( authToken ){
 
                // Reset the timeout to keep the session alive
-               evt = _oilsAuthResetTimeout(authToken, 0);
+        if (!noTimeoutReset) 
+                   evt = _oilsAuthResetTimeout(authToken, 0);
 
                if( evt && strcmp(evt->event, OILS_EVENT_SUCCESS) ) {
                        osrfAppRespondComplete( ctx, oilsEventToJSON( evt ));    // can't reset timeout
index c33a3af..ac2f054 100644 (file)
@@ -3278,258 +3278,323 @@ join : {
        }
 }
 
+  Or, to specify join order:
+
+join : [
+    {mrd:{field:'record', type:'inner'}},
+    {acn:{field:'record', type:'left'}}
+]
+
 */
 
 static char* searchJOIN( const jsonObject* join_hash, const ClassInfo* left_info ) {
 
-       const jsonObject* working_hash;
+       jsonObject* working_hash;
        jsonObject* freeable_hash = NULL;
 
-       if( join_hash->type == JSON_HASH ) {
-               working_hash = join_hash;
-       } else if( join_hash->type == JSON_STRING ) {
-               // turn it into a JSON_HASH by creating a wrapper
-               // around a copy of the original
-               const char* _tmp = jsonObjectGetString( join_hash );
-               freeable_hash = jsonNewObjectType( JSON_HASH );
-               jsonObjectSetKey( freeable_hash, _tmp, NULL );
-               working_hash = freeable_hash;
+       jsonObject* working_array;
+       jsonObject* freeable_array = NULL;
+
+       if( join_hash->type == JSON_ARRAY ) {
+               working_array = (jsonObject*)join_hash;
        } else {
-               osrfLogError(
-                       OSRF_LOG_MARK,
-                       "%s: JOIN failed; expected JSON object type not found",
-                       modulename
-               );
-               return NULL;
+               working_array = jsonNewObjectType( JSON_ARRAY );
+
+               if( join_hash->type == JSON_HASH ) {
+                       working_hash = (jsonObject*)join_hash;
+               } else if( join_hash->type == JSON_STRING ) {
+                   freeable_array = working_array;
+                       // turn it into a JSON_HASH by creating a wrapper
+                       // around a copy of the original
+                       const char* _tmp = jsonObjectGetString( join_hash );
+                       freeable_hash = jsonNewObjectType( JSON_HASH );
+                       jsonObjectSetKey( freeable_hash, _tmp, NULL );
+                       working_hash = freeable_hash;
+               } else {
+                       osrfLogError(
+                               OSRF_LOG_MARK,
+                               "%s: JOIN failed; expected JSON object type not found",
+                               modulename
+                       );
+                       return NULL;
+               }
+
+               jsonObjectPush( working_array, working_hash );
        }
 
        growing_buffer* join_buf = buffer_init( 128 );
        const char* leftclass = left_info->class_name;
 
-       jsonObject* snode = NULL;
-       jsonIterator* search_itr = jsonNewIterator( working_hash );
-
-       while ( (snode = jsonIteratorNext( search_itr )) ) {
-               const char* right_alias = search_itr->key;
-               const char* class =
-                               jsonObjectGetString( jsonObjectGetKeyConst( snode, "class" ) );
-               if( ! class )
-                       class = right_alias;
-
-               const ClassInfo* right_info = add_joined_class( right_alias, class );
-               if( !right_info ) {
-                       osrfLogError(
-                               OSRF_LOG_MARK,
-                               "%s: JOIN failed.  Class \"%s\" not resolved in IDL",
-                               modulename,
-                               search_itr->key
-                       );
-                       jsonIteratorFree( search_itr );
-                       buffer_free( join_buf );
-                       if( freeable_hash )
-                               jsonObjectFree( freeable_hash );
-                       return NULL;
+       unsigned long order_idx = 0;
+       while(( working_hash = jsonObjectGetIndex( working_array, order_idx++ ) )) {
+
+           jsonObject* freeable_subhash = NULL;
+               if( working_hash->type == JSON_STRING ) {
+                       // turn it into a JSON_HASH by creating a wrapper
+                       // around a copy of the original
+                       const char* _inner_tmp = jsonObjectGetString( working_hash );
+                       freeable_subhash = jsonNewObjectType( JSON_HASH );
+                       jsonObjectSetKey( freeable_subhash, _inner_tmp, NULL );
+                       working_hash = freeable_subhash;
                }
-               osrfHash* links    = right_info->links;
-               const char* table  = right_info->source_def;
-
-               const char* fkey  = jsonObjectGetString( jsonObjectGetKeyConst( snode, "fkey" ) );
-               const char* field = jsonObjectGetString( jsonObjectGetKeyConst( snode, "field" ) );
-
-               if( field && !fkey ) {
-                       // Look up the corresponding join column in the IDL.
-                       // The link must be defined in the child table,
-                       // and point to the right parent table.
-                       osrfHash* idl_link = (osrfHash*) osrfHashGet( links, field );
-                       const char* reltype = NULL;
-                       const char* other_class = NULL;
-                       reltype = osrfHashGet( idl_link, "reltype" );
-                       if( reltype && strcmp( reltype, "has_many" ) )
-                               other_class = osrfHashGet( idl_link, "class" );
-                       if( other_class && !strcmp( other_class, leftclass ) )
-                               fkey = osrfHashGet( idl_link, "key" );
-                       if( !fkey ) {
-                               osrfLogError(
-                                       OSRF_LOG_MARK,
-                                       "%s: JOIN failed.  No link defined from %s.%s to %s",
-                                       modulename,
-                                       class,
-                                       field,
-                                       leftclass
-                               );
-                               buffer_free( join_buf );
-                               if( freeable_hash )
-                                       jsonObjectFree( freeable_hash );
-                               jsonIteratorFree( search_itr );
-                               return NULL;
-                       }
 
-               } else if( !field && fkey ) {
-                       // Look up the corresponding join column in the IDL.
-                       // The link must be defined in the child table,
-                       // and point to the right parent table.
-                       osrfHash* left_links = left_info->links;
-                       osrfHash* idl_link = (osrfHash*) osrfHashGet( left_links, fkey );
-                       const char* reltype = NULL;
-                       const char* other_class = NULL;
-                       reltype = osrfHashGet( idl_link, "reltype" );
-                       if( reltype && strcmp( reltype, "has_many" ) )
-                               other_class = osrfHashGet( idl_link, "class" );
-                       if( other_class && !strcmp( other_class, class ) )
-                               field = osrfHashGet( idl_link, "key" );
-                       if( !field ) {
+               jsonObject* snode = NULL;
+               jsonIterator* search_itr = jsonNewIterator( working_hash );
+       
+               while ( (snode = jsonIteratorNext( search_itr )) ) {
+                       const char* right_alias = search_itr->key;
+                       const char* class =
+                                       jsonObjectGetString( jsonObjectGetKeyConst( snode, "class" ) );
+                       if( ! class )
+                               class = right_alias;
+       
+                       const ClassInfo* right_info = add_joined_class( right_alias, class );
+                       if( !right_info ) {
                                osrfLogError(
                                        OSRF_LOG_MARK,
-                                       "%s: JOIN failed.  No link defined from %s.%s to %s",
+                                       "%s: JOIN failed.  Class \"%s\" not resolved in IDL",
                                        modulename,
-                                       leftclass,
-                                       fkey,
-                                       class
+                                       search_itr->key
                                );
+                               jsonIteratorFree( search_itr );
                                buffer_free( join_buf );
+                               if( freeable_subhash )
+                                       jsonObjectFree( freeable_subhash );
                                if( freeable_hash )
                                        jsonObjectFree( freeable_hash );
-                               jsonIteratorFree( search_itr );
+                               if( freeable_array )
+                                       jsonObjectFree( freeable_array );
                                return NULL;
                        }
-
-               } else if( !field && !fkey ) {
-                       osrfHash* left_links = left_info->links;
-
-                       // For each link defined for the left class:
-                       // see if the link references the joined class
-                       osrfHashIterator* itr = osrfNewHashIterator( left_links );
-                       osrfHash* curr_link = NULL;
-                       while( (curr_link = osrfHashIteratorNext( itr ) ) ) {
-                               const char* other_class = osrfHashGet( curr_link, "class" );
-                               if( other_class && !strcmp( other_class, class ) ) {
-
-                                       // In the IDL, the parent class doesn't always know then names of the child
-                                       // columns that are pointing to it, so don't use that end of the link
-                                       const char* reltype = osrfHashGet( curr_link, "reltype" );
-                                       if( reltype && strcmp( reltype, "has_many" ) ) {
-                                               // Found a link between the classes
-                                               fkey = osrfHashIteratorKey( itr );
-                                               field = osrfHashGet( curr_link, "key" );
-                                               break;
-                                       }
+                       osrfHash* links = right_info->links;
+                       const char* table  = right_info->source_def;
+       
+                       const char* fkey  = jsonObjectGetString( jsonObjectGetKeyConst( snode, "fkey" ) );
+                       const char* field = jsonObjectGetString( jsonObjectGetKeyConst( snode, "field" ) );
+       
+                       if( field && !fkey ) {
+                               // Look up the corresponding join column in the IDL.
+                               // The link must be defined in the child table,
+                               // and point to the right parent table.
+                               osrfHash* idl_link = (osrfHash*) osrfHashGet( links, field );
+                               const char* reltype = NULL;
+                               const char* other_class = NULL;
+                               reltype = osrfHashGet( idl_link, "reltype" );
+                               if( reltype && strcmp( reltype, "has_many" ) )
+                                       other_class = osrfHashGet( idl_link, "class" );
+                               if( other_class && !strcmp( other_class, leftclass ) )
+                                       fkey = osrfHashGet( idl_link, "key" );
+                               if( !fkey ) {
+                                       osrfLogError(
+                                               OSRF_LOG_MARK,
+                                               "%s: JOIN failed.  No link defined from %s.%s to %s",
+                                               modulename,
+                                               class,
+                                               field,
+                                               leftclass
+                                       );
+                                       buffer_free( join_buf );
+                                       if( freeable_subhash )
+                                               jsonObjectFree( freeable_subhash );
+                                       if( freeable_hash )
+                                               jsonObjectFree( freeable_hash );
+                                       if( freeable_array )
+                                               jsonObjectFree( freeable_array );
+                                       jsonIteratorFree( search_itr );
+                                       return NULL;
                                }
-                       }
-                       osrfHashIteratorFree( itr );
-
-                       if( !field || !fkey ) {
-                               // Do another such search, with the classes reversed
-
-                               // For each link defined for the joined class:
-                               // see if the link references the left class
-                               osrfHashIterator* itr = osrfNewHashIterator( links );
+       
+                       } else if( !field && fkey ) {
+                               // Look up the corresponding join column in the IDL.
+                               // The link must be defined in the child table,
+                               // and point to the right parent table.
+                               osrfHash* left_links = left_info->links;
+                               osrfHash* idl_link = (osrfHash*) osrfHashGet( left_links, fkey );
+                               const char* reltype = NULL;
+                               const char* other_class = NULL;
+                               reltype = osrfHashGet( idl_link, "reltype" );
+                               if( reltype && strcmp( reltype, "has_many" ) )
+                                       other_class = osrfHashGet( idl_link, "class" );
+                               if( other_class && !strcmp( other_class, class ) )
+                                       field = osrfHashGet( idl_link, "key" );
+                               if( !field ) {
+                                       osrfLogError(
+                                               OSRF_LOG_MARK,
+                                               "%s: JOIN failed.  No link defined from %s.%s to %s",
+                                               modulename,
+                                               leftclass,
+                                               fkey,
+                                               class
+                                       );
+                                       buffer_free( join_buf );
+                                       if( freeable_subhash )
+                                               jsonObjectFree( freeable_subhash );
+                                       if( freeable_hash )
+                                               jsonObjectFree( freeable_hash );
+                                       if( freeable_array )
+                                               jsonObjectFree( freeable_array );
+                                       jsonIteratorFree( search_itr );
+                                       return NULL;
+                               }
+       
+                       } else if( !field && !fkey ) {
+                               osrfHash* left_links = left_info->links;
+       
+                               // For each link defined for the left class:
+                               // see if the link references the joined class
+                               osrfHashIterator* itr = osrfNewHashIterator( left_links );
                                osrfHash* curr_link = NULL;
                                while( (curr_link = osrfHashIteratorNext( itr ) ) ) {
                                        const char* other_class = osrfHashGet( curr_link, "class" );
-                                       if( other_class && !strcmp( other_class, leftclass ) ) {
-
-                                               // In the IDL, the parent class doesn't know then names of the child
+                                       if( other_class && !strcmp( other_class, class ) ) {
+       
+                                               // In the IDL, the parent class doesn't always know then names of the child
                                                // columns that are pointing to it, so don't use that end of the link
                                                const char* reltype = osrfHashGet( curr_link, "reltype" );
                                                if( reltype && strcmp( reltype, "has_many" ) ) {
                                                        // Found a link between the classes
-                                                       field = osrfHashIteratorKey( itr );
-                                                       fkey = osrfHashGet( curr_link, "key" );
+                                                       fkey = osrfHashIteratorKey( itr );
+                                                       field = osrfHashGet( curr_link, "key" );
                                                        break;
                                                }
                                        }
                                }
                                osrfHashIteratorFree( itr );
+       
+                               if( !field || !fkey ) {
+                                       // Do another such search, with the classes reversed
+       
+                                       // For each link defined for the joined class:
+                                       // see if the link references the left class
+                                       osrfHashIterator* itr = osrfNewHashIterator( links );
+                                       osrfHash* curr_link = NULL;
+                                       while( (curr_link = osrfHashIteratorNext( itr ) ) ) {
+                                               const char* other_class = osrfHashGet( curr_link, "class" );
+                                               if( other_class && !strcmp( other_class, leftclass ) ) {
+       
+                                                       // In the IDL, the parent class doesn't know then names of the child
+                                                       // columns that are pointing to it, so don't use that end of the link
+                                                       const char* reltype = osrfHashGet( curr_link, "reltype" );
+                                                       if( reltype && strcmp( reltype, "has_many" ) ) {
+                                                               // Found a link between the classes
+                                                               field = osrfHashIteratorKey( itr );
+                                                               fkey = osrfHashGet( curr_link, "key" );
+                                                               break;
+                                                       }
+                                               }
+                                       }
+                                       osrfHashIteratorFree( itr );
+                               }
+       
+                               if( !field || !fkey ) {
+                                       osrfLogError(
+                                               OSRF_LOG_MARK,
+                                               "%s: JOIN failed.  No link defined between %s and %s",
+                                               modulename,
+                                               leftclass,
+                                               class
+                                       );
+                                       buffer_free( join_buf );
+                                       if( freeable_subhash )
+                                               jsonObjectFree( freeable_subhash );
+                                       if( freeable_hash )
+                                               jsonObjectFree( freeable_hash );
+                                       if( freeable_array )
+                                               jsonObjectFree( freeable_array );
+                                       jsonIteratorFree( search_itr );
+                                       return NULL;
+                               }
                        }
-
-                       if( !field || !fkey ) {
-                               osrfLogError(
-                                       OSRF_LOG_MARK,
-                                       "%s: JOIN failed.  No link defined between %s and %s",
-                                       modulename,
-                                       leftclass,
-                                       class
-                               );
-                               buffer_free( join_buf );
-                               if( freeable_hash )
-                                       jsonObjectFree( freeable_hash );
-                               jsonIteratorFree( search_itr );
-                               return NULL;
-                       }
-               }
-
-               const char* type = jsonObjectGetString( jsonObjectGetKeyConst( snode, "type" ) );
-               if( type ) {
-                       if( !strcasecmp( type,"left" )) {
-                               buffer_add( join_buf, " LEFT JOIN" );
-                       } else if( !strcasecmp( type,"right" )) {
-                               buffer_add( join_buf, " RIGHT JOIN" );
-                       } else if( !strcasecmp( type,"full" )) {
-                               buffer_add( join_buf, " FULL JOIN" );
+       
+                       const char* type = jsonObjectGetString( jsonObjectGetKeyConst( snode, "type" ) );
+                       if( type ) {
+                               if( !strcasecmp( type,"left" )) {
+                                       buffer_add( join_buf, " LEFT JOIN" );
+                               } else if( !strcasecmp( type,"right" )) {
+                                       buffer_add( join_buf, " RIGHT JOIN" );
+                               } else if( !strcasecmp( type,"full" )) {
+                                       buffer_add( join_buf, " FULL JOIN" );
+                               } else {
+                                       buffer_add( join_buf, " INNER JOIN" );
+                               }
                        } else {
                                buffer_add( join_buf, " INNER JOIN" );
                        }
-               } else {
-                       buffer_add( join_buf, " INNER JOIN" );
-               }
-
-               buffer_fadd( join_buf, " %s AS \"%s\" ON ( \"%s\".%s = \"%s\".%s",
-                                       table, right_alias, right_alias, field, left_info->alias, fkey );
-
-               // Add any other join conditions as specified by "filter"
-               const jsonObject* filter = jsonObjectGetKeyConst( snode, "filter" );
-               if( filter ) {
-                       const char* filter_op = jsonObjectGetString(
-                               jsonObjectGetKeyConst( snode, "filter_op" ) );
-                       if( filter_op && !strcasecmp( "or",filter_op )) {
-                               buffer_add( join_buf, " OR " );
-                       } else {
-                               buffer_add( join_buf, " AND " );
+       
+                       buffer_fadd( join_buf, " %s AS \"%s\" ON ( \"%s\".%s = \"%s\".%s",
+                                               table, right_alias, right_alias, field, left_info->alias, fkey );
+       
+                       // Add any other join conditions as specified by "filter"
+                       const jsonObject* filter = jsonObjectGetKeyConst( snode, "filter" );
+                       if( filter ) {
+                               const char* filter_op = jsonObjectGetString(
+                                       jsonObjectGetKeyConst( snode, "filter_op" ) );
+                               if( filter_op && !strcasecmp( "or",filter_op )) {
+                                       buffer_add( join_buf, " OR " );
+                               } else {
+                                       buffer_add( join_buf, " AND " );
+                               }
+       
+                               char* jpred = searchWHERE( filter, right_info, AND_OP_JOIN, NULL );
+                               if( jpred ) {
+                                       OSRF_BUFFER_ADD_CHAR( join_buf, ' ' );
+                                       OSRF_BUFFER_ADD( join_buf, jpred );
+                                       free( jpred );
+                               } else {
+                                       osrfLogError(
+                                               OSRF_LOG_MARK,
+                                               "%s: JOIN failed.  Invalid conditional expression.",
+                                               modulename
+                                       );
+                                       jsonIteratorFree( search_itr );
+                                       buffer_free( join_buf );
+                                       if( freeable_subhash )
+                                               jsonObjectFree( freeable_subhash );
+                                       if( freeable_hash )
+                                               jsonObjectFree( freeable_hash );
+                                       if( freeable_array )
+                                               jsonObjectFree( freeable_array );
+                                       return NULL;
+                               }
                        }
-
-                       char* jpred = searchWHERE( filter, right_info, AND_OP_JOIN, NULL );
-                       if( jpred ) {
-                               OSRF_BUFFER_ADD_CHAR( join_buf, ' ' );
-                               OSRF_BUFFER_ADD( join_buf, jpred );
-                               free( jpred );
-                       } else {
-                               osrfLogError(
-                                       OSRF_LOG_MARK,
-                                       "%s: JOIN failed.  Invalid conditional expression.",
-                                       modulename
-                               );
-                               jsonIteratorFree( search_itr );
-                               buffer_free( join_buf );
-                               if( freeable_hash )
-                                       jsonObjectFree( freeable_hash );
-                               return NULL;
+       
+                       buffer_add( join_buf, " ) " );
+       
+                       // Recursively add a nested join, if one is present
+                       const jsonObject* join_filter = jsonObjectGetKeyConst( snode, "join" );
+                       if( join_filter ) {
+                               char* jpred = searchJOIN( join_filter, right_info );
+                               if( jpred ) {
+                                       OSRF_BUFFER_ADD_CHAR( join_buf, ' ' );
+                                       OSRF_BUFFER_ADD( join_buf, jpred );
+                                       free( jpred );
+                               } else {
+                                       osrfLogError( OSRF_LOG_MARK, "%s: Invalid nested join.", modulename );
+                                       jsonIteratorFree( search_itr );
+                                       buffer_free( join_buf );
+                                       if( freeable_subhash )
+                                               jsonObjectFree( freeable_subhash );
+                                       if( freeable_hash )
+                                               jsonObjectFree( freeable_hash );
+                                       if( freeable_array )
+                                               jsonObjectFree( freeable_array );
+                                       return NULL;
+                               }
                        }
                }
 
-               buffer_add( join_buf, " ) " );
+               if( freeable_subhash )
+                       jsonObjectFree( freeable_subhash );
 
-               // Recursively add a nested join, if one is present
-               const jsonObject* join_filter = jsonObjectGetKeyConst( snode, "join" );
-               if( join_filter ) {
-                       char* jpred = searchJOIN( join_filter, right_info );
-                       if( jpred ) {
-                               OSRF_BUFFER_ADD_CHAR( join_buf, ' ' );
-                               OSRF_BUFFER_ADD( join_buf, jpred );
-                               free( jpred );
-                       } else {
-                               osrfLogError( OSRF_LOG_MARK, "%s: Invalid nested join.", modulename );
-                               jsonIteratorFree( search_itr );
-                               buffer_free( join_buf );
-                               if( freeable_hash )
-                                       jsonObjectFree( freeable_hash );
-                               return NULL;
-                       }
-               }
+               jsonIteratorFree( search_itr );
        }
 
        if( freeable_hash )
                jsonObjectFree( freeable_hash );
-       jsonIteratorFree( search_itr );
+
+       if( freeable_array )
+               jsonObjectFree( freeable_array );
+
 
        return buffer_release( join_buf );
 }
@@ -5900,7 +5965,7 @@ static jsonObject* doFieldmapperSearch( osrfMethodContext* ctx, osrfHash* class_
                                        "osrfMethodException", ctx->request, "Error setting timezone" );
                                if( !oilsIsDBConnected( writehandle )) {
                                        osrfAppSessionPanic( ctx->session );
-                                       return -1;
+                                       return NULL;
                                }
                        } else {
                                dbi_result_free( tz_res );
@@ -5913,7 +5978,7 @@ static jsonObject* doFieldmapperSearch( osrfMethodContext* ctx, osrfHash* class_
                                osrfLogError( OSRF_LOG_MARK, "%s: Error resetting timezone", modulename);
                                if( !oilsIsDBConnected( writehandle )) {
                                        osrfAppSessionPanic( ctx->session );
-                                       return -1;
+                                       return NULL;
                                }
                        } else {
                                dbi_result_free( res );
@@ -6788,7 +6853,6 @@ static jsonObject* oilsMakeJSONFromResult( dbi_result result ) {
        char dt_string[ 256 ];
        struct tm gmdt;
 
-       int fmIndex;
        int columnIndex = 1;
        int attr;
        unsigned short type;
@@ -6799,8 +6863,6 @@ static jsonObject* oilsMakeJSONFromResult( dbi_result result ) {
 
                osrfLogInternal( OSRF_LOG_MARK, "Looking for column named [%s]...", (char*) columnName );
 
-               fmIndex = -1; // reset the position
-
                /* determine the field type and storage attributes */
                type = dbi_result_get_field_type_idx( result, columnIndex );
                attr = dbi_result_get_field_attribs_idx( result, columnIndex );
index a8661cc..4c40d65 100644 (file)
@@ -2096,18 +2096,8 @@ sub basic_opac_copy_query {
         },
 
         from => {
-            acp => {
-                ($iss_id ? (
-                    sitem => {
-                        fkey => 'id',
-                        field => 'unit',
-                        filter => {issuance => $iss_id},
-                        join => {
-                            sstr => { }
-                        }
-                    }
-                ) : ()),
-                acn => {
+            acp => [
+                {acn => { # 0
                     join => {
                         acnp => { fkey => 'prefix' },
                         acns => { fkey => 'suffix' }
@@ -2116,28 +2106,38 @@ sub basic_opac_copy_query {
                         {deleted => 'f'},
                         ($rec_id ? {record => $rec_id} : ())
                     ],
-                },
-                circ => { # If the copy is circulating, retrieve the open circ
+                }},
+                'aou', # 1
+                {circ => { # 2 If the copy is circulating, retrieve the open circ
                     type => 'left',
                     filter => {checkin_time => undef}
-                },
-                acpl => {
+                }},
+                {acpl => { # 3
                     filter => {
                         deleted => 'f',
                         ($staff ? () : ( opac_visible => 't' )),
                     },
-                },
-                ccs => {
+                }},
+                {ccs => { # 4
                     ($staff ? () : (filter => { opac_visible => 't' }))
-                },
-                aou => {},
-                acpm => {
+                }},
+                {acpm => { # 5
                     type => 'left',
                     join => {
                         bmp => { type => 'left', filter => { deleted => 'f' } }
                     }
-                }
-            }
+                }},
+                ($iss_id ? { # 6 
+                    sitem => {
+                        fkey => 'id',
+                        field => 'unit',
+                        filter => {issuance => $iss_id},
+                        join => {
+                            sstr => { }
+                        }
+                    }
+                } : ())
+            ]
         },
 
         where => {
index 280ecdd..3a59bf9 100644 (file)
@@ -1269,10 +1269,15 @@ __PACKAGE__->register_method(
 
 sub mark_item {
     my( $self, $conn, $auth, $copy_id, $args ) = @_;
-    my $e = new_editor(authtoken=>$auth, xact =>1);
-    return $e->die_event unless $e->checkauth;
     $args ||= {};
 
+    # Items must be checked in before any attempt is made to mark damaged
+    my $evt = try_checkin($auth, $copy_id) if
+        ($self->api_name=~ /damaged/ && $args->{handle_checkin});
+    return $evt if $evt;
+
+    my $e = new_editor(authtoken=>$auth, xact =>1);
+    return $e->die_event unless $e->checkauth;
     my $copy = $e->retrieve_asset_copy([
         $copy_id,
         {flesh => 1, flesh_fields => {'acp' => ['call_number']}}])
@@ -1342,6 +1347,31 @@ sub mark_item {
     return 1;
 }
 
+sub try_checkin {
+    my($auth, $copy_id) = @_;
+
+    my $checkin = $U->simplereq(
+        'open-ils.circ',
+        'open-ils.circ.checkin.override',
+        $auth, {
+            copy_id => $copy_id,
+            noop => 1
+        }
+    );
+    if(ref $checkin ne 'ARRAY') { $checkin = [$checkin]; }
+
+    my $evt_code = $checkin->[0]->{textcode};
+    $logger->info("try_checkin() received event: $evt_code");
+
+    if($evt_code eq 'SUCCESS' || $evt_code eq 'NO_CHANGE') {
+        $logger->info('try_checkin() successful checkin');
+        return undef;
+    } else {
+        $logger->warn('try_checkin() un-successful checkin');
+        return $checkin;
+    }
+}
+
 sub handle_mark_damaged {
     my($e, $copy, $owning_lib, $args) = @_;
 
@@ -1422,8 +1452,6 @@ sub handle_mark_damaged {
         my $evt2 = OpenILS::Utils::Penalty->calculate_penalties($e, $circ->usr->id, $e->requestor->ws_ou);
         return $evt2 if $evt2;
 
-        return undef;
-
     } else {
         return OpenILS::Event->new('DAMAGE_CHARGE', 
             payload => {
index 4a0336d..06ec97d 100644 (file)
@@ -1364,11 +1364,13 @@ sub retrieve_cached_facets {
 
     eval {
         local $SIG{ALRM} = sub {die};
-        alarm(4); # we'll sleep for as much as 4s
+        alarm(10); # we'll sleep for as much as 10s
         do {
             die if $cache->get_cache($key . '_COMPLETE');
         } while (sleep(0.05));
+        alarm(0);
     };
+    alarm(0);
 
     my $blob = $cache->get_cache($key) || {};
 
index f643d5f..1fb0c9f 100644 (file)
@@ -1014,7 +1014,10 @@ sub toSQL {
 
         $final_c_attr_test = 'EXISTS (SELECT 1 FROM asset.copy_vis_attr_cache WHERE record = m.source AND vis_attr_vector @@ c_attr.vis_test)';
         if (!$pc_vis_test) { # staff search
-            $final_c_attr_test = '(' . $final_c_attr_test . ' OR NOT EXISTS (SELECT 1 FROM asset.copy_vis_attr_cache WHERE record = m.source))';
+            $final_c_attr_test = '(' . $final_c_attr_test . " OR (" .
+                    "NOT EXISTS (SELECT 1 FROM asset.copy_vis_attr_cache WHERE record = m.source) " .
+                    "AND (bre.vis_attr_vector IS NULL OR NOT ( int4range(0,268435455,'[]') @> ANY(bre.vis_attr_vector) ))".
+                "))";
         }
     }
  
@@ -1051,9 +1054,6 @@ sub toSQL {
 
         # These are magic numbers... see: search.calculate_visibility_attribute() UDF
         $final_b_attr_test = '(b_attr.vis_test IS NULL OR bre.vis_attr_vector @@ b_attr.vis_test)';
-        if (!$pb_vis_test) { # staff search
-            $final_b_attr_test .= " OR NOT ( int4range(0,268435455,'[]') @> ANY(bre.vis_attr_vector) )";
-        }
     }
 
     if ($final_c_attr_test or $final_b_attr_test) { # something...
index a8b2161..c8c3ea7 100644 (file)
@@ -95,7 +95,7 @@ sub ordered_records_from_metarecord { # XXX Replace with QP-based search-within-
     my $org = shift;
     my $depth = shift;
 
-    my $copies_visible = 'LEFT JOIN asset.copy_attr_vis_cache vc ON (br.id = vc.record '.
+    my $copies_visible = 'LEFT JOIN asset.copy_vis_attr_cache vc ON (br.id = vc.record '.
                          'AND vc.vis_attr_vector @@ (SELECT c_attrs::query_int FROM asset.patron_default_visibility_mask() LIMIT 1))';
     $copies_visible = '' if ($self->api_name =~ /staff/o);
 
index a7f56a2..2e48ce3 100644 (file)
@@ -294,7 +294,7 @@ sub mk_copy_query {
 
     if($org != $self->ctx->{aou_tree}->()->id) { 
         # no need to add the org join filter if we're not actually filtering
-        $query->{from}->{acp}->{aou} = {
+        $query->{from}->{acp}->[1] = { aou => {
             fkey => 'circ_lib',
             field => 'id',
             filter => {
@@ -311,7 +311,7 @@ sub mk_copy_query {
                     }
                 }
             }
-        };
+        }};
     };
 
     # Unsure if we want these in the shared function, leaving here for now
index 6610d59..d821d84 100644 (file)
@@ -92,7 +92,7 @@ CREATE TRIGGER no_overlapping_deps
     BEFORE INSERT OR UPDATE ON config.db_patch_dependencies
     FOR EACH ROW EXECUTE PROCEDURE evergreen.array_overlap_check ('deprecates');
 
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1077', :eg_version); -- csharp/gmcharlt
+INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1082', :eg_version); -- jboyer/gmcharlt
 
 CREATE TABLE config.bib_source (
        id              SERIAL  PRIMARY KEY,
@@ -215,7 +215,7 @@ CREATE TABLE config.metabib_field (
        authority_xpath TEXT,
        joiner      TEXT,
        restrict        BOOL    DEFAULT FALSE NOT NULL,
-    display_field BOOL NOT NULL DEFAULT TRUE
+    display_field BOOL NOT NULL DEFAULT FALSE
 );
 COMMENT ON TABLE config.metabib_field IS $$
 XPath used for record indexing ingest
index 53ddea5..13620a4 100644 (file)
@@ -64,7 +64,7 @@ CREATE INDEX biblio_record_entry_fp_idx ON biblio.record_entry ( fingerprint );
 CREATE UNIQUE INDEX biblio_record_unique_tcn ON biblio.record_entry (tcn_value) WHERE deleted = FALSE OR deleted IS FALSE;
 CREATE TRIGGER a_marcxml_is_well_formed BEFORE INSERT OR UPDATE ON biblio.record_entry FOR EACH ROW EXECUTE PROCEDURE biblio.check_marcxml_well_formed();
 CREATE TRIGGER b_maintain_901 BEFORE INSERT OR UPDATE ON biblio.record_entry FOR EACH ROW EXECUTE PROCEDURE evergreen.maintain_901();
-CREATE TRIGGER c_maintain_control_numbers BEFORE INSERT OR UPDATE ON biblio.record_entry FOR EACH ROW EXECUTE PROCEDURE maintain_control_numbers();
+CREATE TRIGGER c_maintain_control_numbers BEFORE INSERT OR UPDATE ON biblio.record_entry FOR EACH ROW EXECUTE PROCEDURE evergreen.maintain_control_numbers();
 
 CREATE TABLE biblio.record_note (
        id              BIGSERIAL       PRIMARY KEY,
index 4387417..6e1e034 100644 (file)
@@ -222,7 +222,7 @@ CREATE INDEX authority_record_entry_edit_date_idx ON authority.record_entry ( ed
 CREATE INDEX authority_record_deleted_idx ON authority.record_entry(deleted) WHERE deleted IS FALSE OR deleted = false;
 CREATE TRIGGER a_marcxml_is_well_formed BEFORE INSERT OR UPDATE ON authority.record_entry FOR EACH ROW EXECUTE PROCEDURE biblio.check_marcxml_well_formed();
 CREATE TRIGGER b_maintain_901 BEFORE INSERT OR UPDATE ON authority.record_entry FOR EACH ROW EXECUTE PROCEDURE evergreen.maintain_901();
-CREATE TRIGGER c_maintain_control_numbers BEFORE INSERT OR UPDATE ON authority.record_entry FOR EACH ROW EXECUTE PROCEDURE maintain_control_numbers();
+CREATE TRIGGER c_maintain_control_numbers BEFORE INSERT OR UPDATE ON authority.record_entry FOR EACH ROW EXECUTE PROCEDURE evergreen.maintain_control_numbers();
 
 CREATE TABLE authority.authority_linking (
     id      BIGSERIAL PRIMARY KEY,
@@ -295,7 +295,7 @@ BEGIN
     IF thes_code IS NULL THEN
         thes_code := '|';
     ELSIF thes_code = 'z' THEN
-        thes_code := COALESCE( oils_xpath_string('//*[@tag="040"]/*[@code="f"][1]', marcxml), '' );
+        thes_code := COALESCE( oils_xpath_string('//*[@tag="040"]/*[@code="f"][1]', marcxml), 'z' );
     ELSE
         SELECT code INTO thes_code FROM authority.thesaurus WHERE short_code = thes_code;
         IF NOT FOUND THEN
@@ -417,6 +417,7 @@ CREATE OR REPLACE FUNCTION authority.simple_heading_set( marcxml TEXT ) RETURNS
 DECLARE
     res             authority.simple_heading%ROWTYPE;
     acsaf           authority.control_set_authority_field%ROWTYPE;
+    heading_row     authority.heading%ROWTYPE;
     tag_used        TEXT;
     nfi_used        TEXT;
     sf              TEXT;
@@ -443,50 +444,58 @@ BEGIN
     res.thesaurus := authority.extract_thesaurus(marcxml);
 
     FOR acsaf IN SELECT * FROM authority.control_set_authority_field WHERE control_set = cset LOOP
-
         res.atag := acsaf.id;
-        tag_used := acsaf.tag;
-        nfi_used := acsaf.nfi;
-        joiner_text := COALESCE(acsaf.joiner, ' ');
-
-        FOR tmp_xml IN SELECT UNNEST(XPATH('//*[@tag="'||tag_used||'"]', marcxml::XML)::TEXT[]) LOOP
-
-            heading_text := COALESCE(
-                oils_xpath_string('./*[contains("'||acsaf.display_sf_list||'",@code)]', tmp_xml, joiner_text),
-                ''
-            );
-
-            IF nfi_used IS NOT NULL THEN
-
-                sort_text := SUBSTRING(
-                    heading_text FROM
-                    COALESCE(
-                        NULLIF(
-                            REGEXP_REPLACE(
-                                oils_xpath_string('./@ind'||nfi_used, tmp_xml::TEXT),
-                                $$\D+$$,
-                                '',
-                                'g'
-                            ),
-                            ''
-                        )::INT,
-                        0
-                    ) + 1
-                );
 
-            ELSE
-                sort_text := heading_text;
-            END IF;
-
-            IF heading_text IS NOT NULL AND heading_text <> '' THEN
-                res.value := heading_text;
-                res.sort_value := public.naco_normalize(sort_text);
+        IF acsaf.heading_field IS NULL THEN
+            tag_used := acsaf.tag;
+            nfi_used := acsaf.nfi;
+            joiner_text := COALESCE(acsaf.joiner, ' ');
+    
+            FOR tmp_xml IN SELECT UNNEST(XPATH('//*[@tag="'||tag_used||'"]', marcxml::XML)::TEXT[]) LOOP
+    
+                heading_text := COALESCE(
+                    oils_xpath_string('./*[contains("'||acsaf.display_sf_list||'",@code)]', tmp_xml, joiner_text),
+                    ''
+                );
+    
+                IF nfi_used IS NOT NULL THEN
+    
+                    sort_text := SUBSTRING(
+                        heading_text FROM
+                        COALESCE(
+                            NULLIF(
+                                REGEXP_REPLACE(
+                                    oils_xpath_string('./@ind'||nfi_used, tmp_xml::TEXT),
+                                    $$\D+$$,
+                                    '',
+                                    'g'
+                                ),
+                                ''
+                            )::INT,
+                            0
+                        ) + 1
+                    );
+    
+                ELSE
+                    sort_text := heading_text;
+                END IF;
+    
+                IF heading_text IS NOT NULL AND heading_text <> '' THEN
+                    res.value := heading_text;
+                    res.sort_value := public.naco_normalize(sort_text);
+                    res.index_vector = to_tsvector('keyword'::regconfig, res.sort_value);
+                    RETURN NEXT res;
+                END IF;
+    
+            END LOOP;
+        ELSE
+            FOR heading_row IN SELECT * FROM authority.extract_headings(marcxml, ARRAY[acsaf.heading_field]) LOOP
+                res.value := heading_row.heading;
+                res.sort_value := heading_row.normalized_heading;
                 res.index_vector = to_tsvector('keyword'::regconfig, res.sort_value);
                 RETURN NEXT res;
-            END IF;
-
-        END LOOP;
-
+            END LOOP;
+        END IF;
     END LOOP;
 
     RETURN;
index 0faba5f..f9948cb 100644 (file)
@@ -977,8 +977,7 @@ CREATE TRIGGER asset_copy_tag_fti_trigger
 
 CREATE TABLE asset.copy_tag_copy_map (
     id              BIGSERIAL PRIMARY KEY,
-    copy            BIGINT REFERENCES asset.copy (id)
-                    ON UPDATE CASCADE ON DELETE CASCADE,
+    copy            BIGINT,
     tag             INTEGER REFERENCES asset.copy_tag (id)
                     ON UPDATE CASCADE ON DELETE CASCADE
 );
index b3a3056..594dfaf 100644 (file)
@@ -75,7 +75,7 @@ END;
 $f$ LANGUAGE PLPGSQL VOLATILE COST 50;
 
 CREATE CONSTRAINT TRIGGER inherit_copy_bucket_item_target_copy_fkey
-        AFTER UPDATE OR INSERT OR DELETE ON container.copy_bucket_item
+        AFTER UPDATE OR INSERT ON container.copy_bucket_item
         DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.container_copy_bucket_item_target_copy_inh_fkey();
 
 
index 8211b23..7d06aba 100644 (file)
@@ -826,7 +826,7 @@ CREATE TABLE action.fieldset_group (
 CREATE TABLE action.fieldset (
     id              SERIAL          PRIMARY KEY,
     fieldset_group  INT             REFERENCES action.fieldset_group (id)
-                                    DEFERRABLE INITIALLY DEFERRED,
+                                    ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
     owner           INT             NOT NULL REFERENCES actor.usr (id)
                                     DEFERRABLE INITIALLY DEFERRED,
        owning_lib      INT             NOT NULL REFERENCES actor.org_unit (id)
index 8c65f6f..eac0097 100644 (file)
@@ -25,7 +25,7 @@ CREATE INDEX serial_record_entry_creator_idx ON serial.record_entry ( creator );
 CREATE INDEX serial_record_entry_editor_idx ON serial.record_entry ( editor );
 CREATE INDEX serial_record_entry_owning_lib_idx ON serial.record_entry ( owning_lib, deleted );
 CREATE TRIGGER b_maintain_901 BEFORE INSERT OR UPDATE ON serial.record_entry FOR EACH ROW EXECUTE PROCEDURE evergreen.maintain_901();
-CREATE TRIGGER c_maintain_control_numbers BEFORE INSERT OR UPDATE ON serial.record_entry FOR EACH ROW EXECUTE PROCEDURE maintain_control_numbers();
+CREATE TRIGGER c_maintain_control_numbers BEFORE INSERT OR UPDATE ON serial.record_entry FOR EACH ROW EXECUTE PROCEDURE evergreen.maintain_control_numbers();
 
 CREATE RULE protect_mfhd_delete AS ON DELETE TO serial.record_entry DO INSTEAD UPDATE serial.record_entry SET deleted = true WHERE old.id = serial.record_entry.id;
 
index 3349a45..8dabef9 100644 (file)
@@ -593,7 +593,7 @@ BEGIN
             RETURN NEW;
         ELSIF TG_OP = 'DELETE' THEN
             DELETE FROM asset.copy_vis_attr_cache
-              WHERE record = NEW.peer_record AND target_copy = NEW.target_copy;
+              WHERE record = OLD.peer_record AND target_copy = OLD.target_copy;
 
             RETURN OLD;
         END IF;
index 1ee6ead..5835ee3 100644 (file)
@@ -124,7 +124,7 @@ END;
 $f$ LANGUAGE PLPGSQL VOLATILE COST 50;
 
 CREATE CONSTRAINT TRIGGER inherit_import_item_imported_as_fkey
-        AFTER UPDATE OR INSERT OR DELETE ON vandelay.import_item
+        AFTER UPDATE OR INSERT ON vandelay.import_item
         DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.vandelay_import_item_imported_as_inh_fkey();
 
 ALTER TABLE vandelay.bib_queue ADD CONSTRAINT match_bucket_fkey FOREIGN KEY (match_bucket) REFERENCES container.biblio_record_entry_bucket(id) ON DELETE SET NULL DEFERRABLE INITIALLY DEFERRED;
@@ -142,9 +142,25 @@ END;
 $f$ LANGUAGE PLPGSQL VOLATILE COST 50;
 
 CREATE CONSTRAINT TRIGGER inherit_asset_copy_note_copy_fkey
-        AFTER UPDATE OR INSERT OR DELETE ON asset.copy_note
+        AFTER UPDATE OR INSERT ON asset.copy_note
         DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.asset_copy_note_owning_copy_inh_fkey();
 
+CREATE OR REPLACE FUNCTION evergreen.asset_copy_tag_copy_map_copy_inh_fkey() RETURNS TRIGGER AS $f$
+BEGIN
+        PERFORM 1 FROM asset.copy WHERE id = NEW.copy;
+        IF NOT FOUND THEN
+                RAISE foreign_key_violation USING MESSAGE = FORMAT(
+                        $$Referenced asset.copy id not found, copy:%s$$, NEW.copy
+                );
+        END IF;
+        RETURN NEW;
+END;
+$f$ LANGUAGE PLPGSQL VOLATILE COST 50;
+
+CREATE CONSTRAINT TRIGGER inherit_asset_copy_tag_copy_map_copy_fkey
+        AFTER UPDATE OR INSERT ON asset.copy_tag_copy_map
+        DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.asset_copy_tag_copy_map_copy_inh_fkey();
+
 ALTER TABLE asset.copy_note ADD CONSTRAINT asset_copy_note_creator_fkey FOREIGN KEY (creator) REFERENCES actor.usr (id) ON DELETE SET NULL DEFERRABLE INITIALLY DEFERRED;
 
 ALTER TABLE asset.call_number ADD CONSTRAINT asset_call_number_owning_lib_fkey FOREIGN KEY (owning_lib) REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED;
index a42bf69..dc72926 100644 (file)
@@ -1061,6 +1061,8 @@ BEGIN
                SELECT  INTO target_cn *
                  FROM  asset.call_number
                  WHERE label = source_cn.label
+            AND prefix = source_cn.prefix
+            AND suffix = source_cn.suffix
                        AND owning_lib = source_cn.owning_lib
                        AND record = target_record
                        AND NOT deleted;
@@ -1082,6 +1084,8 @@ BEGIN
                
                                moved_objects := moved_objects + 1;
                        END LOOP;
+        
+            UPDATE asset.call_number SET deleted = TRUE WHERE id = source_cn.id;
 
                -- ... if not ...
                ELSE
index 2b3540c..4b8df82 100755 (executable)
@@ -15,7 +15,7 @@ export PGHOST PGPORT PGDATABASE PGUSER PGPASSWORD
 # ---------------------------------------------------------------------------
 # Lookup the database version from the PostgreSQL server.
 # ---------------------------------------------------------------------------
-DB_VERSION=`psql -qtc 'show server_version;' | xargs | cut -c1,3`
+DB_VERSION=`psql -qtc 'show server_version;' | xargs | cut -d. -f 1,2 | tr -d '.' | cut -c1,2`
 if [ -z "$DB_VERSION" ] || [ `echo $DB_VERSION | grep -c '[^0-9]'` != 0 ]; then
   cat <<EOM
 ********************************************************************************
diff --git a/Open-ILS/src/sql/Pg/t/lp1145213_test_func_asset.merge_record_assets.pg b/Open-ILS/src/sql/Pg/t/lp1145213_test_func_asset.merge_record_assets.pg
new file mode 100644 (file)
index 0000000..c6adcb3
--- /dev/null
@@ -0,0 +1,94 @@
+BEGIN;
+
+SELECT plan(5);
+
+----------------------------------
+--
+-- Setup Test environment and data
+--
+----------------------------------
+
+-- create mock bib records to be merged:
+-- Data:
+-- bib 60000 (new lead),           org 4 acn 'F Cline' copy 1
+-- bib 60001 (merged from target), org 5 acn 'JF cline' copy 2, org 6 acn 'JF Cline' copy 3, org 4 acn 'F Cline' copy 4
+-- 
+-- copy 2 ACN
+
+-- create bib 60,000
+INSERT into biblio.record_entry (id, marc, last_xact_id)
+  VALUES (60000, 
+  $$
+  <record xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd" xmlns="http://www.loc.gov/MARC21/slim"><leader>00934njm a2200241 a 4500</leader><controlfield tag="001">03-0003745</controlfield><controlfield tag="005">19991118131708.0</controlfield><controlfield tag="008">971016n nyuuuu eng </controlfield><datafield tag="050" ind1=" " ind2=" "><subfield code="a">4539</subfield></datafield><datafield tag="100" ind1="1" ind2=" "><subfield code="a">Bart&#xF3;k, B&#xE9;la,</subfield><subfield code="d">1881-1945.</subfield></datafield><datafield tag="240" ind1=" " ind2=" "><subfield code="t">Concertos,</subfield><subfield code="m">piano,</subfield><subfield code="n">no. 1,</subfield><subfield code="n">Sz. 83</subfield><subfield code="f">(1926) </subfield><subfield code="a">Concertos,</subfield><subfield code="m">piano,</subfield><subfield code="n">no. 1,</subfield><subfield code="n">Sz. 83,</subfield><subfield code="n">(1926)</subfield></datafield><datafield tag="245" ind1=" " ind2=" "><subfield code="a">Piano concerto no. 1 (1926) ; Rhapsody, op. 1 (1904)</subfield></datafield><datafield tag="260" ind1=" " ind2=" "><subfield code="a">New York, NY :</subfield><subfield code="b">Vox</subfield></datafield><datafield tag="300" ind1=" " ind2=" "><subfield code="a">1 sound disc :</subfield><subfield code="b">33 1/3 rpm, stereo.</subfield></datafield><datafield tag="349" ind1=" " ind2=" "><subfield code="a">PHONO RECORD</subfield></datafield><datafield tag="511" ind1=" " ind2=" "><subfield code="a">Gy&#xF6;rgy S&#xE1;ndor, piano ; Sudwest[rund]funkorchester, Baden-Baden ; Rolf Reinhardt, conductor.</subfield></datafield><datafield tag="700" ind1="1" ind2=" "><subfield code="a">S&#xE1;ndor, Gy&#xF6;rgy,</subfield><subfield code="d">1912-</subfield></datafield><datafield tag="700" ind1="1" ind2=" "><subfield code="a">Reinhardt, Rolf</subfield></datafield><datafield tag="710" ind1=" " ind2=" "><subfield code="a">Sudwestrundfunkorchester (Baden-Baden, Germany)</subfield></datafield><datafield tag="730" ind1=" " ind2=" "><subfield code="a">Rhapsodies,</subfield><subfield code="m">piano, orchestra,</subfield><subfield code="n">op. 1,</subfield><subfield code="n">Sz. 27,</subfield><subfield code="n">(1904)</subfield></datafield><datafield tag="901" ind1=" " ind2=" "><subfield code="a">a339398</subfield><subfield code="b">Sirsi_Auto</subfield><subfield code="c">339398</subfield></datafield></record>
+  $$,
+  'PGTAP'
+  );
+
+
+-- create bib 60,001
+INSERT into biblio.record_entry (id, marc, last_xact_id)
+  VALUES (60001,
+  $$
+  <record xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd" xmlns="http://www.loc.gov/MARC21/slim"><leader>00863njm a2200253 a 4500</leader><controlfield tag="001">03-0004689</controlfield><controlfield tag="005">19991127191346.0</controlfield><controlfield tag="008">971027r19631952nyuuuu eng </controlfield><datafield tag="050" ind1=" " ind2=" "><subfield code="a">4578</subfield></datafield><datafield tag="100" ind1="1" ind2=" "><subfield code="a">Telemann, Georg Philipp,</subfield><subfield code="d">1681-1767</subfield></datafield><datafield tag="245" ind1=" " ind2=" "><subfield code="a">Viola concerto in G major</subfield></datafield><datafield tag="260" ind1=" " ind2=" "><subfield code="a">New York, NY :</subfield><subfield code="b">Vox,</subfield><subfield code="c">1963</subfield></datafield><datafield tag="300" ind1=" " ind2=" "><subfield code="a">1 sound disc :</subfield><subfield code="b">33 1/3 rpm, mono.</subfield></datafield><datafield tag="349" ind1=" " ind2=" "><subfield code="a">PHONO RECORD</subfield></datafield><datafield tag="505" ind1=" " ind2=" "><subfield code="a">Viola concerto / Telemann -- Viola concerto in D major / Stamitz.</subfield></datafield><datafield tag="511" ind1=" " ind2=" "><subfield code="a">Heinz Wigand, viola ; Pro Musica Orchestra, Stuttgart ; Rolf Reinhardt, conductor.</subfield></datafield><datafield tag="650" ind1=" " ind2="0"><subfield code="a">Concertos (Viola)</subfield></datafield><datafield tag="700" ind1="1" ind2=" "><subfield code="a">Stamitz, Carl,</subfield><subfield code="d">1745-1801</subfield></datafield><datafield tag="700" ind1="1" ind2=" "><subfield code="a">Reinhardt, Rolf</subfield></datafield><datafield tag="700" ind1=" " ind2=" "><subfield code="a">Wigand, Heinz</subfield></datafield><datafield tag="710" ind1="2" ind2=" "><subfield code="a">Pro Musica Orchestra (Stuttgart)</subfield></datafield><datafield tag="901" ind1=" " ind2=" "><subfield code="a">a340312</subfield><subfield code="b">Sirsi_Auto</subfield><subfield code="c">340312</subfield></datafield></record>
+  $$,
+  'PGTAP'
+  );
+
+
+INSERT into asset.call_number(id, record, creator, editor, owning_lib, label, label_class, prefix)
+    VALUES (999999, 60000, 1, 1, 4, 'Cline', 1, 9986),
+    (1000000,60001, 1, 1, 5, 'Cline', 1, 9987),
+    (1000001,60001, 1, 1, 6, 'Cline', 1, 9988),
+    (1000002,60001, 1, 1, 4, 'Cline', 1, 9986);
+
+INSERT into asset.call_number_prefix(id, owning_lib, label) VALUES
+    (9986, 4, 'F'),
+    (9987, 5, 'F'),
+    (9988, 6, 'JF');
+
+-- circ_lib for copy == the same as acn
+INSERT INTO asset.copy(id, circ_lib, creator, call_number, editor, copy_number, loan_duration, fine_level, barcode) VALUES
+    (905555, 4, 1, 999999, 1, 1, 1, 1, '1copycopycopy'),
+    (906666, 5, 1, 1000000, 1, 1, 1, 1, '2copycopycopy'),
+    (907777, 6, 1, 1000001, 1, 1, 1, 1, '3copycopycopy'),
+    (908888, 4, 1, 1000002, 1, 1, 1, 1, '4copycopycopy');
+
+-----------------------------------
+-- Test asset.merge_record_assets() 
+-----------------------------------
+
+-- do merge
+SELECT is(asset.merge_record_assets(60000, 60001), 4, 'Record assets merged!');
+
+-- check if copy 4's acn was updated
+SELECT is(
+  (SELECT call_number from asset.copy where id=908888)::BIGINT,
+  999999::BIGINT,
+  'LP 1145213 asset.merge_record_assets() messing up call numbers. copy 4 should have acn -> 999999'
+);
+
+-- acn #1,000,002 should be deleted
+SELECT is(
+  (SELECT deleted FROM asset.call_number WHERE id=1000002)::BOOLEAN,
+  TRUE,
+  'LP 1145213 asset.merge_record_assets() should have deleted acn #1000002'
+);
+
+-- all non-deleted acn should point to source bib record #60,000
+SELECT is(
+  (SELECT count(*) FROM asset.call_number WHERE record=60001 AND not deleted=true)::INT,
+  0::INT,
+  'LP 1145213 asset.merge_record_assets() all call_numbers should point to bib record #60,000 '
+);
+
+-- test copies to make sure none point to stale acn
+SELECT is(
+  (SELECT count(*) from asset.copy where call_number=1000002)::INT,
+  0::INT,
+  'LP 1145213 asset.merge_record_assets() all copies should point to acn #999999 which is for bib record #60,000 '
+);
+
+SELECT * FROM finish();
+
+ROLLBACK;
+
diff --git a/Open-ILS/src/sql/Pg/upgrade/1078.data.add_possibly_missing_billing_types.sql b/Open-ILS/src/sql/Pg/upgrade/1078.data.add_possibly_missing_billing_types.sql
new file mode 100644 (file)
index 0000000..d5c4705
--- /dev/null
@@ -0,0 +1,22 @@
+BEGIN;
+
+SELECT evergreen.upgrade_deps_block_check('1078', :eg_version); -- csharp/bshum/gmcharlt
+
+-- The following billing types would not have been automatically added
+-- in upgrade scripts between versions 1.2 and 1.4 (early 2009).  We
+-- add them here.  It's okay if they fail, so this should probably be 
+-- run outside a transaction if added to the version-upgrade scripts.
+
+INSERT INTO config.billing_type (id, name, owner)
+    SELECT 7, 'Damaged Item', 1
+    WHERE NOT EXISTS (SELECT 1 FROM config.billing_type WHERE name = 'Damaged Item');
+
+INSERT INTO config.billing_type (id, name, owner)
+    SELECT 8, 'Damaged Item Processing Fee', 1
+    WHERE NOT EXISTS (SELECT 1 FROM config.billing_type WHERE name = 'Damaged Item Processing Fee');
+
+INSERT INTO config.billing_type (id, name, owner)
+    SELECT 9, 'Notification Fee', 1
+    WHERE NOT EXISTS (SELECT 1 FROM config.billing_type WHERE name = 'Notification Fee');
+
+COMMIT;
diff --git a/Open-ILS/src/sql/Pg/upgrade/1079.schema.fix_asset_merge.sql b/Open-ILS/src/sql/Pg/upgrade/1079.schema.fix_asset_merge.sql
new file mode 100644 (file)
index 0000000..0f82c96
--- /dev/null
@@ -0,0 +1,263 @@
+BEGIN;
+
+SELECT evergreen.upgrade_deps_block_check('1079', :eg_version); -- rhamby/cesardv/gmcharlt
+
+CREATE OR REPLACE FUNCTION asset.merge_record_assets( target_record BIGINT, source_record BIGINT ) RETURNS INT AS $func$
+DECLARE
+    moved_objects INT := 0;
+    source_cn     asset.call_number%ROWTYPE;
+    target_cn     asset.call_number%ROWTYPE;
+    metarec       metabib.metarecord%ROWTYPE;
+    hold          action.hold_request%ROWTYPE;
+    ser_rec       serial.record_entry%ROWTYPE;
+    ser_sub       serial.subscription%ROWTYPE;
+    acq_lineitem  acq.lineitem%ROWTYPE;
+    acq_request   acq.user_request%ROWTYPE;
+    booking       booking.resource_type%ROWTYPE;
+    source_part   biblio.monograph_part%ROWTYPE;
+    target_part   biblio.monograph_part%ROWTYPE;
+    multi_home    biblio.peer_bib_copy_map%ROWTYPE;
+    uri_count     INT := 0;
+    counter       INT := 0;
+    uri_datafield TEXT;
+    uri_text      TEXT := '';
+BEGIN
+
+    -- move any 856 entries on records that have at least one MARC-mapped URI entry
+    SELECT  INTO uri_count COUNT(*)
+      FROM  asset.uri_call_number_map m
+            JOIN asset.call_number cn ON (m.call_number = cn.id)
+      WHERE cn.record = source_record;
+
+    IF uri_count > 0 THEN
+        
+        -- This returns more nodes than you might expect:
+        -- 7 instead of 1 for an 856 with $u $y $9
+        SELECT  COUNT(*) INTO counter
+          FROM  oils_xpath_table(
+                    'id',
+                    'marc',
+                    'biblio.record_entry',
+                    '//*[@tag="856"]',
+                    'id=' || source_record
+                ) as t(i int,c text);
+    
+        FOR i IN 1 .. counter LOOP
+            SELECT  '<datafield xmlns="http://www.loc.gov/MARC21/slim"' || 
+                       ' tag="856"' ||
+                       ' ind1="' || FIRST(ind1) || '"'  ||
+                       ' ind2="' || FIRST(ind2) || '">' ||
+                        STRING_AGG(
+                            '<subfield code="' || subfield || '">' ||
+                            regexp_replace(
+                                regexp_replace(
+                                    regexp_replace(data,'&','&amp;','g'),
+                                    '>', '&gt;', 'g'
+                                ),
+                                '<', '&lt;', 'g'
+                            ) || '</subfield>', ''
+                        ) || '</datafield>' INTO uri_datafield
+              FROM  oils_xpath_table(
+                        'id',
+                        'marc',
+                        'biblio.record_entry',
+                        '//*[@tag="856"][position()=' || i || ']/@ind1|' ||
+                        '//*[@tag="856"][position()=' || i || ']/@ind2|' ||
+                        '//*[@tag="856"][position()=' || i || ']/*/@code|' ||
+                        '//*[@tag="856"][position()=' || i || ']/*[@code]',
+                        'id=' || source_record
+                    ) as t(id int,ind1 text, ind2 text,subfield text,data text);
+
+            -- As most of the results will be NULL, protect against NULLifying
+            -- the valid content that we do generate
+            uri_text := uri_text || COALESCE(uri_datafield, '');
+        END LOOP;
+
+        IF uri_text <> '' THEN
+            UPDATE  biblio.record_entry
+              SET   marc = regexp_replace(marc,'(</[^>]*record>)', uri_text || E'\\1')
+              WHERE id = target_record;
+        END IF;
+
+    END IF;
+
+       -- Find and move metarecords to the target record
+       SELECT  INTO metarec *
+         FROM  metabib.metarecord
+         WHERE master_record = source_record;
+
+       IF FOUND THEN
+               UPDATE  metabib.metarecord
+                 SET   master_record = target_record,
+                       mods = NULL
+                 WHERE id = metarec.id;
+
+               moved_objects := moved_objects + 1;
+       END IF;
+
+       -- Find call numbers attached to the source ...
+       FOR source_cn IN SELECT * FROM asset.call_number WHERE record = source_record LOOP
+
+               SELECT  INTO target_cn *
+                 FROM  asset.call_number
+                 WHERE label = source_cn.label
+            AND prefix = source_cn.prefix
+            AND suffix = source_cn.suffix
+                       AND owning_lib = source_cn.owning_lib
+                       AND record = target_record
+                       AND NOT deleted;
+
+               -- ... and if there's a conflicting one on the target ...
+               IF FOUND THEN
+
+                       -- ... move the copies to that, and ...
+                       UPDATE  asset.copy
+                         SET   call_number = target_cn.id
+                         WHERE call_number = source_cn.id;
+
+                       -- ... move V holds to the move-target call number
+                       FOR hold IN SELECT * FROM action.hold_request WHERE target = source_cn.id AND hold_type = 'V' LOOP
+               
+                               UPDATE  action.hold_request
+                                 SET   target = target_cn.id
+                                 WHERE id = hold.id;
+               
+                               moved_objects := moved_objects + 1;
+                       END LOOP;
+        
+            UPDATE asset.call_number SET deleted = TRUE WHERE id = source_cn.id;
+
+               -- ... if not ...
+               ELSE
+                       -- ... just move the call number to the target record
+                       UPDATE  asset.call_number
+                         SET   record = target_record
+                         WHERE id = source_cn.id;
+               END IF;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- Find T holds targeting the source record ...
+       FOR hold IN SELECT * FROM action.hold_request WHERE target = source_record AND hold_type = 'T' LOOP
+
+               -- ... and move them to the target record
+               UPDATE  action.hold_request
+                 SET   target = target_record
+                 WHERE id = hold.id;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- Find serial records targeting the source record ...
+       FOR ser_rec IN SELECT * FROM serial.record_entry WHERE record = source_record LOOP
+               -- ... and move them to the target record
+               UPDATE  serial.record_entry
+                 SET   record = target_record
+                 WHERE id = ser_rec.id;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- Find serial subscriptions targeting the source record ...
+       FOR ser_sub IN SELECT * FROM serial.subscription WHERE record_entry = source_record LOOP
+               -- ... and move them to the target record
+               UPDATE  serial.subscription
+                 SET   record_entry = target_record
+                 WHERE id = ser_sub.id;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- Find booking resource types targeting the source record ...
+       FOR booking IN SELECT * FROM booking.resource_type WHERE record = source_record LOOP
+               -- ... and move them to the target record
+               UPDATE  booking.resource_type
+                 SET   record = target_record
+                 WHERE id = booking.id;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- Find acq lineitems targeting the source record ...
+       FOR acq_lineitem IN SELECT * FROM acq.lineitem WHERE eg_bib_id = source_record LOOP
+               -- ... and move them to the target record
+               UPDATE  acq.lineitem
+                 SET   eg_bib_id = target_record
+                 WHERE id = acq_lineitem.id;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- Find acq user purchase requests targeting the source record ...
+       FOR acq_request IN SELECT * FROM acq.user_request WHERE eg_bib = source_record LOOP
+               -- ... and move them to the target record
+               UPDATE  acq.user_request
+                 SET   eg_bib = target_record
+                 WHERE id = acq_request.id;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- Find parts attached to the source ...
+       FOR source_part IN SELECT * FROM biblio.monograph_part WHERE record = source_record LOOP
+
+               SELECT  INTO target_part *
+                 FROM  biblio.monograph_part
+                 WHERE label = source_part.label
+                       AND record = target_record;
+
+               -- ... and if there's a conflicting one on the target ...
+               IF FOUND THEN
+
+                       -- ... move the copy-part maps to that, and ...
+                       UPDATE  asset.copy_part_map
+                         SET   part = target_part.id
+                         WHERE part = source_part.id;
+
+                       -- ... move P holds to the move-target part
+                       FOR hold IN SELECT * FROM action.hold_request WHERE target = source_part.id AND hold_type = 'P' LOOP
+               
+                               UPDATE  action.hold_request
+                                 SET   target = target_part.id
+                                 WHERE id = hold.id;
+               
+                               moved_objects := moved_objects + 1;
+                       END LOOP;
+
+               -- ... if not ...
+               ELSE
+                       -- ... just move the part to the target record
+                       UPDATE  biblio.monograph_part
+                         SET   record = target_record
+                         WHERE id = source_part.id;
+               END IF;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- Find multi_home items attached to the source ...
+       FOR multi_home IN SELECT * FROM biblio.peer_bib_copy_map WHERE peer_record = source_record LOOP
+               -- ... and move them to the target record
+               UPDATE  biblio.peer_bib_copy_map
+                 SET   peer_record = target_record
+                 WHERE id = multi_home.id;
+
+               moved_objects := moved_objects + 1;
+       END LOOP;
+
+       -- And delete mappings where the item's home bib was merged with the peer bib
+       DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = (
+               SELECT (SELECT record FROM asset.call_number WHERE id = call_number)
+               FROM asset.copy WHERE id = target_copy
+       );
+
+    -- Finally, "delete" the source record
+    DELETE FROM biblio.record_entry WHERE id = source_record;
+
+       -- That's all, folks!
+       RETURN moved_objects;
+END;
+$func$ LANGUAGE plpgsql;
+
+COMMIT;
diff --git a/Open-ILS/src/sql/Pg/upgrade/1080.schema.lp1724246_cache_copy_visibility.sql b/Open-ILS/src/sql/Pg/upgrade/1080.schema.lp1724246_cache_copy_visibility.sql
new file mode 100644 (file)
index 0000000..0a57fb2
--- /dev/null
@@ -0,0 +1,148 @@
+BEGIN;
+
+SELECT evergreen.upgrade_deps_block_check('1080', :eg_version); -- miker/jboyer/gmcharlt
+
+CREATE OR REPLACE FUNCTION asset.cache_copy_visibility () RETURNS TRIGGER as $func$
+DECLARE
+    ocn     asset.call_number%ROWTYPE;
+    ncn     asset.call_number%ROWTYPE;
+    cid     BIGINT;
+BEGIN
+
+    IF TG_TABLE_NAME = 'peer_bib_copy_map' THEN -- Only needs ON INSERT OR DELETE, so handle separately
+        IF TG_OP = 'INSERT' THEN
+            INSERT INTO asset.copy_vis_attr_cache (record, target_copy, vis_attr_vector) VALUES (
+                NEW.peer_record,
+                NEW.target_copy,
+                asset.calculate_copy_visibility_attribute_set(NEW.target_copy)
+            );
+
+            RETURN NEW;
+        ELSIF TG_OP = 'DELETE' THEN
+            DELETE FROM asset.copy_vis_attr_cache
+              WHERE record = OLD.peer_record AND target_copy = OLD.target_copy;
+
+            RETURN OLD;
+        END IF;
+    END IF;
+
+    IF TG_OP = 'INSERT' THEN -- Handles ON INSERT. ON UPDATE is below.
+        IF TG_TABLE_NAME IN ('copy', 'unit') THEN
+            SELECT * INTO ncn FROM asset.call_number cn WHERE id = NEW.call_number;
+            INSERT INTO asset.copy_vis_attr_cache (record, target_copy, vis_attr_vector) VALUES (
+                ncn.record,
+                NEW.id,
+                asset.calculate_copy_visibility_attribute_set(NEW.id)
+            );
+        ELSIF TG_TABLE_NAME = 'record_entry' THEN
+            NEW.vis_attr_vector := biblio.calculate_bib_visibility_attribute_set(NEW.id);
+        END IF;
+
+        RETURN NEW;
+    END IF;
+
+    -- handle items first, since with circulation activity
+    -- their statuses change frequently
+    IF TG_TABLE_NAME IN ('copy', 'unit') THEN -- This handles ON UPDATE OR DELETE. ON INSERT above
+
+        IF TG_OP = 'DELETE' THEN -- Shouldn't get here, normally
+            DELETE FROM asset.copy_vis_attr_cache WHERE target_copy = OLD.id;
+            RETURN OLD;
+        END IF;
+
+        SELECT * INTO ncn FROM asset.call_number cn WHERE id = NEW.call_number;
+
+        IF OLD.deleted <> NEW.deleted THEN
+            IF NEW.deleted THEN
+                DELETE FROM asset.copy_vis_attr_cache WHERE target_copy = OLD.id;
+            ELSE
+                INSERT INTO asset.copy_vis_attr_cache (record, target_copy, vis_attr_vector) VALUES (
+                    ncn.record,
+                    NEW.id,
+                    asset.calculate_copy_visibility_attribute_set(NEW.id)
+                );
+            END IF;
+
+            RETURN NEW;
+        ELSIF OLD.call_number  <> NEW.call_number THEN
+            SELECT * INTO ocn FROM asset.call_number cn WHERE id = OLD.call_number;
+
+            IF ncn.record <> ocn.record THEN
+                UPDATE  biblio.record_entry
+                  SET   vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(ncn.record)
+                  WHERE id = ocn.record;
+
+                -- We have to use a record-specific WHERE clause
+                -- to avoid modifying the entries for peer-bib copies.
+                UPDATE  asset.copy_vis_attr_cache
+                  SET   target_copy = NEW.id,
+                        record = ncn.record
+                  WHERE target_copy = OLD.id
+                        AND record = ocn.record;
+            END IF;
+        END IF;
+
+        IF OLD.location     <> NEW.location OR
+           OLD.status       <> NEW.status OR
+           OLD.opac_visible <> NEW.opac_visible OR
+           OLD.circ_lib     <> NEW.circ_lib
+        THEN
+            -- Any of these could change visibility, but
+            -- we'll save some queries and not try to calculate
+            -- the change directly.  We want to update peer-bib
+            -- entries in this case, unlike above.
+            UPDATE  asset.copy_vis_attr_cache
+              SET   target_copy = NEW.id,
+                    vis_attr_vector = asset.calculate_copy_visibility_attribute_set(NEW.id)
+              WHERE target_copy = OLD.id;
+
+        END IF;
+
+    ELSIF TG_TABLE_NAME = 'call_number' THEN -- Only ON UPDATE. Copy handler will deal with ON INSERT OR DELETE.
+
+        IF OLD.record <> NEW.record THEN
+            IF NEW.label = '##URI##' THEN
+                UPDATE  biblio.record_entry
+                  SET   vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(OLD.record)
+                  WHERE id = OLD.record;
+
+                UPDATE  biblio.record_entry
+                  SET   vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(NEW.record)
+                  WHERE id = NEW.record;
+            END IF;
+
+            UPDATE  asset.copy_vis_attr_cache
+              SET   record = NEW.record,
+                    vis_attr_vector = asset.calculate_copy_visibility_attribute_set(target_copy)
+              WHERE target_copy IN (SELECT id FROM asset.copy WHERE call_number = NEW.id)
+                    AND record = OLD.record;
+
+        ELSIF OLD.owning_lib <> NEW.owning_lib THEN
+            UPDATE  asset.copy_vis_attr_cache
+              SET   vis_attr_vector = asset.calculate_copy_visibility_attribute_set(target_copy)
+              WHERE target_copy IN (SELECT id FROM asset.copy WHERE call_number = NEW.id)
+                    AND record = NEW.record;
+
+            IF NEW.label = '##URI##' THEN
+                UPDATE  biblio.record_entry
+                  SET   vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(OLD.record)
+                  WHERE id = OLD.record;
+            END IF;
+        END IF;
+
+    ELSIF TG_TABLE_NAME = 'record_entry' THEN -- Only handles ON UPDATE OR DELETE
+
+        IF TG_OP = 'DELETE' THEN -- Shouldn't get here, normally
+            DELETE FROM asset.copy_vis_attr_cache WHERE record = OLD.id;
+            RETURN OLD;
+        ELSIF OLD.source <> NEW.source THEN
+            NEW.vis_attr_vector := biblio.calculate_bib_visibility_attribute_set(NEW.id);
+        END IF;
+
+    END IF;
+
+    RETURN NEW;
+END;
+$func$ LANGUAGE PLPGSQL;
+
+COMMIT;
diff --git a/Open-ILS/src/sql/Pg/upgrade/1081.schema.lp1724223-re-inherited.sql b/Open-ILS/src/sql/Pg/upgrade/1081.schema.lp1724223-re-inherited.sql
new file mode 100644 (file)
index 0000000..f2a5fe8
--- /dev/null
@@ -0,0 +1,24 @@
+BEGIN;
+
+SELECT evergreen.upgrade_deps_block_check('1081', :eg_version); -- jboyer/gmcharlt
+
+DROP TRIGGER IF EXISTS inherit_copy_bucket_item_target_copy_fkey ON container.copy_bucket_item;
+DROP TRIGGER IF EXISTS inherit_import_item_imported_as_fkey ON vandelay.import_item;
+DROP TRIGGER IF EXISTS inherit_asset_copy_note_copy_fkey ON asset.copy_note;
+DROP TRIGGER IF EXISTS inherit_asset_copy_tag_copy_map_copy_fkey ON asset.copy_tag_copy_map;
+
+CREATE CONSTRAINT TRIGGER inherit_copy_bucket_item_target_copy_fkey
+  AFTER UPDATE OR INSERT ON container.copy_bucket_item
+  DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.container_copy_bucket_item_target_copy_inh_fkey();
+CREATE CONSTRAINT TRIGGER inherit_import_item_imported_as_fkey
+  AFTER UPDATE OR INSERT ON vandelay.import_item
+  DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.vandelay_import_item_imported_as_inh_fkey();
+CREATE CONSTRAINT TRIGGER inherit_asset_copy_note_copy_fkey
+  AFTER UPDATE OR INSERT ON asset.copy_note
+  DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.asset_copy_note_owning_copy_inh_fkey();
+CREATE CONSTRAINT TRIGGER inherit_asset_copy_tag_copy_map_copy_fkey
+  AFTER UPDATE OR INSERT ON asset.copy_tag_copy_map
+  DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.asset_copy_tag_copy_map_copy_inh_fkey();
+
+COMMIT;
+
diff --git a/Open-ILS/src/sql/Pg/upgrade/1082.data.lp1731281-acvac-cleanup.sql b/Open-ILS/src/sql/Pg/upgrade/1082.data.lp1731281-acvac-cleanup.sql
new file mode 100644 (file)
index 0000000..67156d9
--- /dev/null
@@ -0,0 +1,7 @@
+BEGIN;
+
+SELECT evergreen.upgrade_deps_block_check('1082', :eg_version); -- jboyer/gmcharlt
+
+DELETE FROM asset.copy_vis_attr_cache WHERE target_copy IN (SELECT id FROM asset.copy WHERE deleted);
+
+COMMIT;
diff --git a/Open-ILS/src/sql/Pg/version-upgrade/2.12.5-3.0-beta1-upgrade-db.sql b/Open-ILS/src/sql/Pg/version-upgrade/2.12.5-3.0-beta1-upgrade-db.sql
deleted file mode 100644 (file)
index e8e585b..0000000
+++ /dev/null
@@ -1,6956 +0,0 @@
---Upgrade Script for 2.12.5 to 3.0-beta1
-\set eg_version '''3.0-beta1'''
-
--- verify that we're running a recent enough version of Pg
-\set ON_ERROR_STOP on
-BEGIN;
-
-DO $$
-   DECLARE ver INTEGER;
-   BEGIN
-      SELECT current_setting('server_version_num') INTO ver;
-      IF (ver < 90400) THEN
-         RAISE EXCEPTION 'Not running a new enough version of PostgreSQL. Minimum required is 9.4; you have %', ver;
-      END IF;
-   END;
-$$;
-
-COMMIT;
-\set ON_ERROR_STOP off
-
-BEGIN;
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('3.0-beta1', :eg_version);
-
-SELECT evergreen.upgrade_deps_block_check('1032', :eg_version); -- Bmagic/csharp/gmcharlt
-
-CREATE OR REPLACE VIEW action.all_circulation_combined_types AS 
- SELECT acirc.id AS id,
-    acirc.xact_start,
-    acirc.circ_lib,
-    acirc.circ_staff,
-    acirc.create_time,
-    ac_acirc.circ_modifier AS item_type,
-    'regular_circ'::text AS circ_type
-   FROM action.circulation acirc,
-    asset.copy ac_acirc
-  WHERE acirc.target_copy = ac_acirc.id
-UNION ALL
- SELECT ancc.id::BIGINT AS id,
-    ancc.circ_time AS xact_start,
-    ancc.circ_lib,
-    ancc.staff AS circ_staff,
-    ancc.circ_time AS create_time,
-    cnct_ancc.name AS item_type,
-    'non-cat_circ'::text AS circ_type
-   FROM action.non_cataloged_circulation ancc,
-    config.non_cataloged_type cnct_ancc
-  WHERE ancc.item_type = cnct_ancc.id
-UNION ALL
- SELECT aihu.id::BIGINT AS id,
-    aihu.use_time AS xact_start,
-    aihu.org_unit AS circ_lib,
-    aihu.staff AS circ_staff,
-    aihu.use_time AS create_time,
-    ac_aihu.circ_modifier AS item_type,
-    'in-house_use'::text AS circ_type
-   FROM action.in_house_use aihu,
-    asset.copy ac_aihu
-  WHERE aihu.item = ac_aihu.id
-UNION ALL
- SELECT ancihu.id::BIGINT AS id,
-    ancihu.use_time AS xact_start,
-    ancihu.org_unit AS circ_lib,
-    ancihu.staff AS circ_staff,
-    ancihu.use_time AS create_time,
-    cnct_ancihu.name AS item_type,
-    'non-cat_circ'::text AS circ_type
-   FROM action.non_cat_in_house_use ancihu,
-    config.non_cataloged_type cnct_ancihu
-  WHERE ancihu.item_type = cnct_ancihu.id
-UNION ALL
- SELECT aacirc.id AS id,
-    aacirc.xact_start,
-    aacirc.circ_lib,
-    aacirc.circ_staff,
-    aacirc.create_time,
-    ac_aacirc.circ_modifier AS item_type,
-    'aged_circ'::text AS circ_type
-   FROM action.aged_circulation aacirc,
-    asset.copy ac_aacirc
-  WHERE aacirc.target_copy = ac_aacirc.id;
-
-
-SELECT evergreen.upgrade_deps_block_check('1034', :eg_version);
-
-ALTER TABLE config.hold_matrix_matchpoint
-    ADD COLUMN description TEXT;
-
-ALTER TABLE config.circ_matrix_matchpoint
-    ADD COLUMN description TEXT;
-
-
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1035', :eg_version); -- dyrcona/gmcharlt
-
--- Per Lp bug 1684984, the config.internal_flag,
--- ingest.disable_metabib_field_entry, was made obsolete by the
--- addition of the ingest.skip_browse_indexing,
--- ingest.skip_search_indexing, and ingest.skip_facet_indexing flags.
--- Since it is not used in the database, we delete it.
-DELETE FROM config.internal_flag
-WHERE name = 'ingest.disable_metabib_field_entry';
-
-
-SELECT evergreen.upgrade_deps_block_check('1036', :eg_version);
-
-CREATE OR REPLACE FUNCTION config.update_hard_due_dates () RETURNS INT AS $func$
-DECLARE
-    temp_value  config.hard_due_date_values%ROWTYPE;
-    updated     INT := 0;
-BEGIN
-    FOR temp_value IN
-      SELECT  DISTINCT ON (hard_due_date) *
-        FROM  config.hard_due_date_values
-        WHERE active_date <= NOW() -- We've passed (or are at) the rollover time
-        ORDER BY hard_due_date, active_date DESC -- Latest (nearest to us) active time
-   LOOP
-        UPDATE  config.hard_due_date
-          SET   ceiling_date = temp_value.ceiling_date
-          WHERE id = temp_value.hard_due_date
-                AND ceiling_date <> temp_value.ceiling_date -- Time is equal if we've already updated the chdd
-                AND temp_value.ceiling_date >= NOW(); -- Don't update ceiling dates to the past
-
-        IF FOUND THEN
-            updated := updated + 1;
-        END IF;
-    END LOOP;
-
-    RETURN updated;
-END;
-$func$ LANGUAGE plpgsql;
-
-
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1041', :eg_version); -- stompro/csharp/gmcharlt
-
---delete all instances from permission.grp_perm_map first
-DELETE FROM permission.grp_perm_map where perm in 
-(select id from permission.perm_list where code='SET_CIRC_MISSING');
-
---delete all instances from permission.usr_perm_map too
-DELETE FROM permission.usr_perm_map where perm in
-(select id from permission.perm_list where code='SET_CIRC_MISSING');
-
---delete from permission.perm_list
-DELETE FROM permission.perm_list where code='SET_CIRC_MISSING';
-
-
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1042', :eg_version); -- mmorgan/gmcharlt
-
-ALTER TABLE asset.copy_location
-          ADD COLUMN url TEXT;
-
-
-SELECT evergreen.upgrade_deps_block_check('1043', :eg_version);
-
-ALTER TABLE action_trigger.event_definition
-    ADD COLUMN retention_interval INTERVAL;
-
-CREATE OR REPLACE FUNCTION action_trigger.check_valid_retention_interval() 
-    RETURNS TRIGGER AS $_$
-BEGIN
-
-    /*
-     * 1. Retention intervals are alwyas allowed on active hooks.
-     * 2. On passive hooks, retention intervals are only allowed
-     *    when the event definition has a max_delay value and the
-     *    retention_interval value is greater than the difference 
-     *    beteween the delay and max_delay values.
-     */ 
-    PERFORM TRUE FROM action_trigger.hook 
-        WHERE key = NEW.hook AND NOT passive;
-
-    IF FOUND THEN
-        RETURN NEW;
-    END IF;
-
-    IF NEW.max_delay IS NOT NULL THEN
-        IF EXTRACT(EPOCH FROM NEW.retention_interval) > 
-            ABS(EXTRACT(EPOCH FROM (NEW.max_delay - NEW.delay))) THEN
-            RETURN NEW; -- all good
-        ELSE
-            RAISE EXCEPTION 'retention_interval is too short';
-        END IF;
-    ELSE
-        RAISE EXCEPTION 'retention_interval requires max_delay';
-    END IF;
-END;
-$_$ LANGUAGE PLPGSQL;
-
-CREATE TRIGGER is_valid_retention_interval 
-    BEFORE INSERT OR UPDATE ON action_trigger.event_definition
-    FOR EACH ROW WHEN (NEW.retention_interval IS NOT NULL)
-    EXECUTE PROCEDURE action_trigger.check_valid_retention_interval();
-
-CREATE OR REPLACE FUNCTION action_trigger.purge_events() RETURNS VOID AS $_$
-/**
-  * Deleting expired events without simultaneously deleting their outputs
-  * creates orphaned outputs.  Deleting their outputs and all of the events 
-  * linking back to them, plus any outputs those events link to is messy and 
-  * inefficient.  It's simpler to handle them in 2 sweeping steps.
-  *
-  * 1. Delete expired events.
-  * 2. Delete orphaned event outputs.
-  *
-  * This has the added benefit of removing outputs that may have been
-  * orphaned by some other process.  Such outputs are not usuable by
-  * the system.
-  *
-  * This does not guarantee that all events within an event group are
-  * purged at the same time.  In such cases, the remaining events will
-  * be purged with the next instance of the purge (or soon thereafter).
-  * This is another nod toward efficiency over completeness of old 
-  * data that's circling the bit bucket anyway.
-  */
-BEGIN
-
-    DELETE FROM action_trigger.event WHERE id IN (
-        SELECT evt.id
-        FROM action_trigger.event evt
-        JOIN action_trigger.event_definition def ON (def.id = evt.event_def)
-        WHERE def.retention_interval IS NOT NULL 
-            AND evt.state <> 'pending'
-            AND evt.update_time < (NOW() - def.retention_interval)
-    );
-
-    WITH linked_outputs AS (
-        SELECT templates.id AS id FROM (
-            SELECT DISTINCT(template_output) AS id
-                FROM action_trigger.event WHERE template_output IS NOT NULL
-            UNION
-            SELECT DISTINCT(error_output) AS id
-                FROM action_trigger.event WHERE error_output IS NOT NULL
-            UNION
-            SELECT DISTINCT(async_output) AS id
-                FROM action_trigger.event WHERE async_output IS NOT NULL
-        ) templates
-    ) DELETE FROM action_trigger.event_output
-        WHERE id NOT IN (SELECT id FROM linked_outputs);
-
-END;
-$_$ LANGUAGE PLPGSQL;
-
-
-/* -- UNDO --
-
-DROP FUNCTION IF EXISTS action_trigger.purge_events();
-DROP TRIGGER IF EXISTS is_valid_retention_interval ON action_trigger.event_definition;
-DROP FUNCTION IF EXISTS action_trigger.check_valid_retention_interval();
-ALTER TABLE action_trigger.event_definition DROP COLUMN retention_interval;
-
-*/
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1044', :eg_version);
-
-UPDATE action_trigger.hook SET passive = FALSE WHERE key IN (
-    'format.po.html',
-    'format.po.pdf',
-    'format.selfcheck.checkout',
-    'format.selfcheck.items_out',
-    'format.selfcheck.holds',
-    'format.selfcheck.fines',
-    'format.acqcle.html',
-    'format.acqinv.html',
-    'format.acqli.html',
-    'aur.ordered',
-    'aur.received',
-    'aur.cancelled',
-    'aur.created',
-    'aur.rejected'
-);
-
-
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1045', :eg_version); -- csharp/berick/gmcharlt
-
-ALTER TABLE action.transit_copy
-       ADD COLUMN cancel_time TIMESTAMPTZ;
-
--- change "abort" to "cancel" in stock perm descriptions
-UPDATE permission.perm_list 
-       SET description = 'Allow a user to cancel a copy transit if the user is at the transit destination or source' 
-       WHERE code = 'ABORT_TRANSIT'
-       AND description = 'Allow a user to abort a copy transit if the user is at the transit destination or source';
-UPDATE permission.perm_list 
-       SET description = 'Allow a user to cancel a copy transit if the user is not at the transit source or dest' 
-       WHERE code = 'ABORT_REMOTE_TRANSIT'
-       AND description = 'Allow a user to abort a copy transit if the user is not at the transit source or dest';
-UPDATE permission.perm_list 
-       SET description = 'Allows a user to cancel a transit on a copy with status of LOST' 
-       WHERE code = 'ABORT_TRANSIT_ON_LOST'
-       AND description = 'Allows a user to abort a transit on a copy with status of LOST';
-UPDATE permission.perm_list 
-       SET description = 'Allows a user to cancel a transit on a copy with status of MISSING' 
-       WHERE code = 'ABORT_TRANSIT_ON_MISSING'
-       AND description = 'Allows a user to abort a transit on a copy with status of MISSING';
-
-SELECT evergreen.upgrade_deps_block_check('1046', :eg_version); -- phasefx/berick/gmcharlt
-
-INSERT into config.org_unit_setting_type (
-     name
-    ,grp
-    ,label
-    ,description
-    ,datatype
-) VALUES ( ----------------------------------------
-     'webstaff.format.dates'
-    ,'gui'
-    ,oils_i18n_gettext(
-         'webstaff.format.dates'
-        ,'Format Dates with this pattern'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.format.dates'
-        ,'Format Dates with this pattern (examples: "yyyy-MM-dd" for "2010-04-26", "MMM d, yyyy" for "Apr 26, 2010").  This will be used in areas where a date without a timestamp is sufficient, like Date of Birth.'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.format.date_and_time'
-    ,'gui'
-    ,oils_i18n_gettext(
-         'webstaff.format.date_and_time'
-        ,'Format Date+Time with this pattern'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.format.date_and_time'
-        ,'Format Date+Time with this pattern (examples: "yy-MM-dd h:m:s.SSS a" for "16-04-05 2:07:20.666 PM", "yyyy-dd-MMM HH:mm" for "2016-05-Apr 14:07").  This will be used in areas of the client where a date with a timestamp is needed, like Checkout, Due Date, or Record Created.'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-);
-
-UPDATE
-    config.org_unit_setting_type
-SET
-    label = 'Deprecated: ' || label -- FIXME: Is this okay?
-WHERE
-    name IN ('format.date','format.time')
-;
-
-
-SELECT evergreen.upgrade_deps_block_check('1047', :eg_version); -- gmcharlt/stompro
-
-CREATE TABLE config.copy_tag_type (
-    code            TEXT NOT NULL PRIMARY KEY,
-    label           TEXT NOT NULL,
-    owner           INTEGER NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED
-);
-
-CREATE INDEX config_copy_tag_type_owner_idx
-    ON config.copy_tag_type (owner);
-
-CREATE TABLE asset.copy_tag (
-    id              SERIAL PRIMARY KEY,
-    tag_type        TEXT REFERENCES config.copy_tag_type (code)
-                    ON UPDATE CASCADE ON DELETE CASCADE,
-    label           TEXT NOT NULL,
-    value           TEXT NOT NULL,
-    index_vector    tsvector NOT NULL,
-    staff_note      TEXT,
-    pub             BOOLEAN DEFAULT TRUE,
-    owner           INTEGER NOT NULL REFERENCES actor.org_unit (id)
-);
-
-CREATE INDEX asset_copy_tag_label_idx
-    ON asset.copy_tag (label);
-CREATE INDEX asset_copy_tag_label_lower_idx
-    ON asset.copy_tag (evergreen.lowercase(label));
-CREATE INDEX asset_copy_tag_index_vector_idx
-    ON asset.copy_tag
-    USING GIN(index_vector);
-CREATE INDEX asset_copy_tag_tag_type_idx
-    ON asset.copy_tag (tag_type);
-CREATE INDEX asset_copy_tag_owner_idx
-    ON asset.copy_tag (owner);
-
-CREATE OR REPLACE FUNCTION asset.set_copy_tag_value () RETURNS TRIGGER AS $$
-BEGIN
-    IF NEW.value IS NULL THEN
-        NEW.value = NEW.label;        
-    END IF;
-
-    RETURN NEW;
-END;
-$$ LANGUAGE 'plpgsql';
-
--- name of following trigger chosen to ensure it runs first
-CREATE TRIGGER asset_copy_tag_do_value
-    BEFORE INSERT OR UPDATE ON asset.copy_tag
-    FOR EACH ROW EXECUTE PROCEDURE asset.set_copy_tag_value();
-CREATE TRIGGER asset_copy_tag_fti_trigger
-    BEFORE UPDATE OR INSERT ON asset.copy_tag
-    FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('default');
-
-CREATE TABLE asset.copy_tag_copy_map (
-    id              BIGSERIAL PRIMARY KEY,
-    copy            BIGINT REFERENCES asset.copy (id)
-                    ON UPDATE CASCADE ON DELETE CASCADE,
-    tag             INTEGER REFERENCES asset.copy_tag (id)
-                    ON UPDATE CASCADE ON DELETE CASCADE
-);
-
-CREATE INDEX asset_copy_tag_copy_map_copy_idx
-    ON asset.copy_tag_copy_map (copy);
-CREATE INDEX asset_copy_tag_copy_map_tag_idx
-    ON asset.copy_tag_copy_map (tag);
-
-INSERT INTO config.copy_tag_type (code, label, owner) VALUES ('bookplate', 'Digital Bookplate', 1);
-
-INSERT INTO permission.perm_list ( id, code, description ) VALUES
- ( 590, 'ADMIN_COPY_TAG_TYPES', oils_i18n_gettext( 590,
-    'Administer copy tag types', 'ppl', 'description' )),
- ( 591, 'ADMIN_COPY_TAG', oils_i18n_gettext( 591,
-    'Administer copy tag', 'ppl', 'description' ))
-;
-
-INSERT INTO config.org_unit_setting_type
-    (name, label, description, grp, datatype)
-VALUES (
-    'opac.search.enable_bookplate_search',
-    oils_i18n_gettext(
-        'opac.search.enable_bookplate_search',
-        'Enable Digital Bookplate Search',
-        'coust',
-        'label'
-    ),
-    oils_i18n_gettext(
-        'opac.search.enable_bookplate_search',
-        'If enabled, adds a "Digital Bookplate" option to the query type selectors in the public catalog for search on copy tags.',   
-        'coust',
-        'description'
-    ),
-    'opac',
-    'bool'
-);
-
-
-SELECT evergreen.upgrade_deps_block_check('1048', :eg_version);
-
-INSERT into config.org_unit_setting_type (
-     name
-    ,grp
-    ,label
-    ,description
-    ,datatype
-) VALUES ( ----------------------------------------
-     'webstaff.cat.label.font.family'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.font.family'
-        ,'Item Print Label Font Family'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.font.family'
-        ,'Set the preferred font family for item print labels. You can specify a list of CSS fonts, separated by commas, in order of preference; the system will use the first font it finds with a matching name. For example, "Arial, Helvetica, serif"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.cat.label.font.size'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.font.size'
-        ,'Item Print Label Font Size'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.font.size'
-        ,'Set the default font size for item print labels. Please include a unit of measurement that is valid CSS. For example, "12pt" or "16px" or "1em"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.cat.label.font.weight'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.font.weight'
-        ,'Item Print Label Font Weight'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.font.weight'
-        ,'Set the default font weight for item print labels. Please use the CSS specification for values for font-weight.  For example, "normal", "bold", "bolder", or "lighter"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.cat.label.left_label.left_margin'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.left_label.left_margin'
-        ,'Item Print Label - Left Margin for Left Label'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.left_label.left_margin'
-        ,'Set the default left margin for the leftmost item print Label. Please include a unit of measurement that is valid CSS. For example, "1in" or "2.5cm"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.cat.label.right_label.left_margin'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.right_label.left_margin'
-        ,'Item Print Label - Left Margin for Right Label'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.right_label.left_margin'
-        ,'Set the default left margin for the rightmost item print label (or in other words, the desired space between the two labels). Please include a unit of measurement that is valid CSS. For example, "1in" or "2.5cm"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.cat.label.left_label.height'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.left_label.height'
-        ,'Item Print Label - Height for Left Label'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.left_label.height'
-        ,'Set the default height for the leftmost item print label. Please include a unit of measurement that is valid CSS. For example, "1in" or "2.5cm"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.cat.label.left_label.width'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.left_label.width'
-        ,'Item Print Label - Width for Left Label'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.left_label.width'
-        ,'Set the default width for the leftmost item print label. Please include a unit of measurement that is valid CSS. For example, "1in" or "2.5cm"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.cat.label.right_label.height'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.right_label.height'
-        ,'Item Print Label - Height for Right Label'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.right_label.height'
-        ,'Set the default height for the rightmost item print label. Please include a unit of measurement that is valid CSS. For example, "1in" or "2.5cm"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), ( ----------------------------------------
-     'webstaff.cat.label.right_label.width'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.right_label.width'
-        ,'Item Print Label - Width for Right Label'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.right_label.width'
-        ,'Set the default width for the rightmost item print label. Please include a unit of measurement that is valid CSS. For example, "1in" or "2.5cm"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), (
-     'webstaff.cat.label.inline_css'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.inline_css'
-        ,'Item Print Label - Inline CSS'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.inline_css'
-        ,'This setting allows you to inject arbitrary CSS into the item print label template.  For example, ".printlabel { text-transform: uppercase; }"'
-        ,'coust'
-        ,'description'
-    )
-    ,'string'
-), (
-     'webstaff.cat.label.call_number_wrap_filter_height'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.call_number_wrap_filter_height'
-        ,'Item Print Label - Call Number Wrap Filter Height'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.call_number_wrap_filter_height'
-        ,'This setting is used to set the default height (in number of lines) to use for call number wrapping in the left print label.'
-        ,'coust'
-        ,'description'
-    )
-    ,'integer'
-), (
-     'webstaff.cat.label.call_number_wrap_filter_width'
-    ,'cat'
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.call_number_wrap_filter_width'
-        ,'Item Print Label - Call Number Wrap Filter Width'
-        ,'coust'
-        ,'label'
-    )
-    ,oils_i18n_gettext(
-         'webstaff.cat.label.call_number_wrap_filter_width'
-        ,'This setting is used to set the default width (in number of characters) to use for call number wrapping in the left print label.'
-        ,'coust'
-        ,'description'
-    )
-    ,'integer'
-
-
-);
-
--- for testing, setting removal:
---DELETE FROM actor.org_unit_setting WHERE name IN (
---     'webstaff.cat.label.font.family'
---    ,'webstaff.cat.label.font.size'
---    ,'webstaff.cat.label.font.weight'
---    ,'webstaff.cat.label.left_label.height'
---    ,'webstaff.cat.label.left_label.width'
---    ,'webstaff.cat.label.left_label.left_margin'
---    ,'webstaff.cat.label.right_label.height'
---    ,'webstaff.cat.label.right_label.width'
---    ,'webstaff.cat.label.right_label.left_margin'
---    ,'webstaff.cat.label.inline_css'
---    ,'webstaff.cat.label.call_number_wrap_filter_height'
---    ,'webstaff.cat.label.call_number_wrap_filter_width'
---);
---DELETE FROM config.org_unit_setting_type_log WHERE field_name IN (
---     'webstaff.cat.label.font.family'
---    ,'webstaff.cat.label.font.size'
---    ,'webstaff.cat.label.font.weight'
---    ,'webstaff.cat.label.left_label.height'
---    ,'webstaff.cat.label.left_label.width'
---    ,'webstaff.cat.label.left_label.left_margin'
---    ,'webstaff.cat.label.right_label.height'
---    ,'webstaff.cat.label.right_label.width'
---    ,'webstaff.cat.label.right_label.left_margin'
---    ,'webstaff.cat.label.inline_css'
---    ,'webstaff.cat.label.call_number_wrap_filter_height'
---    ,'webstaff.cat.label.call_number_wrap_filter_width'
---);
---DELETE FROM config.org_unit_setting_type WHERE name IN (
---     'webstaff.cat.label.font.family'
---    ,'webstaff.cat.label.font.size'
---    ,'webstaff.cat.label.font.weight'
---    ,'webstaff.cat.label.left_label.height'
---    ,'webstaff.cat.label.left_label.width'
---    ,'webstaff.cat.label.left_label.left_margin'
---    ,'webstaff.cat.label.right_label.height'
---    ,'webstaff.cat.label.right_label.width'
---    ,'webstaff.cat.label.right_label.left_margin'
---    ,'webstaff.cat.label.inline_css'
---    ,'webstaff.cat.label.call_number_wrap_filter_height'
---    ,'webstaff.cat.label.call_number_wrap_filter_width'
---);
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1049', :eg_version); -- mmorgan/stompro/gmcharlt
-
-\echo -----------------------------------------------------------
-\echo Setting invalid age_protect and circ_as_type entries to NULL,
-\echo otherwise they will break the Serial Copy Templates editor.
-\echo Please review any Serial Copy Templates listed below.
-\echo
-UPDATE asset.copy_template act
-SET age_protect = NULL
-FROM actor.org_unit aou
-WHERE aou.id=act.owning_lib
-   AND act.age_protect NOT IN
-   (
-   SELECT id FROM config.rule_age_hold_protect
-   )
-RETURNING act.id "Template ID", act.name "Template Name",
-          aou.shortname "Owning Lib",
-          'Age Protection value reset to null.' "Description";
-
-UPDATE asset.copy_template act
-SET circ_as_type = NULL
-FROM actor.org_unit aou
-WHERE aou.id=act.owning_lib
-   AND act.circ_as_type NOT IN
-   (
-   SELECT code FROM config.item_type_map
-   )
-RETURNING act.id "Template ID", act.name "Template Name",
-          aou.shortname "Owning Lib",
-          'Circ as Type value reset to null.' as "Description";
-
-\echo -----------End Serial Template Fix----------------
-
-SELECT evergreen.upgrade_deps_block_check('1050', :eg_version); -- mmorgan/cesardv/gmcharlt
-
-CREATE OR REPLACE FUNCTION permission.usr_perms ( INT ) RETURNS SETOF permission.usr_perm_map AS $$
-    SELECT     DISTINCT ON (usr,perm) *
-         FROM  (
-                       (SELECT * FROM permission.usr_perm_map WHERE usr = $1)
-            UNION ALL
-                       (SELECT -p.id, $1 AS usr, p.perm, p.depth, p.grantable
-                         FROM  permission.grp_perm_map p
-                         WHERE p.grp IN (
-      SELECT   (permission.grp_ancestors(
-      (SELECT profile FROM actor.usr WHERE id = $1)
-                                       )).id
-                               )
-                       )
-            UNION ALL
-                       (SELECT -p.id, $1 AS usr, p.perm, p.depth, p.grantable
-                         FROM  permission.grp_perm_map p
-                         WHERE p.grp IN (SELECT (permission.grp_ancestors(m.grp)).id FROM permission.usr_grp_map m WHERE usr = $1))
-               ) AS x
-         ORDER BY 2, 3, 4 ASC, 5 DESC ;
-$$ LANGUAGE SQL STABLE ROWS 10;
-
-SELECT evergreen.upgrade_deps_block_check('1051', :eg_version);
-
-CREATE OR REPLACE VIEW action.all_circulation_slim AS
-    SELECT
-        id,
-        usr,
-        xact_start,
-        xact_finish,
-        unrecovered,
-        target_copy,
-        circ_lib,
-        circ_staff,
-        checkin_staff,
-        checkin_lib,
-        renewal_remaining,
-        grace_period,
-        due_date,
-        stop_fines_time,
-        checkin_time,
-        create_time,
-        duration,
-        fine_interval,
-        recurring_fine,
-        max_fine,
-        phone_renewal,
-        desk_renewal,
-        opac_renewal,
-        duration_rule,
-        recurring_fine_rule,
-        max_fine_rule,
-        stop_fines,
-        workstation,
-        checkin_workstation,
-        copy_location,
-        checkin_scan_time,
-        parent_circ
-    FROM action.circulation
-UNION ALL
-    SELECT
-        id,
-        NULL AS usr,
-        xact_start,
-        xact_finish,
-        unrecovered,
-        target_copy,
-        circ_lib,
-        circ_staff,
-        checkin_staff,
-        checkin_lib,
-        renewal_remaining,
-        grace_period,
-        due_date,
-        stop_fines_time,
-        checkin_time,
-        create_time,
-        duration,
-        fine_interval,
-        recurring_fine,
-        max_fine,
-        phone_renewal,
-        desk_renewal,
-        opac_renewal,
-        duration_rule,
-        recurring_fine_rule,
-        max_fine_rule,
-        stop_fines,
-        workstation,
-        checkin_workstation,
-        copy_location,
-        checkin_scan_time,
-        parent_circ
-    FROM action.aged_circulation
-;
-
-DROP FUNCTION action.summarize_all_circ_chain(INTEGER);
-DROP FUNCTION action.all_circ_chain(INTEGER);
-
-CREATE OR REPLACE FUNCTION action.all_circ_chain (ctx_circ_id INTEGER) 
-    RETURNS SETOF action.all_circulation_slim AS $$
-DECLARE
-    tmp_circ action.all_circulation_slim%ROWTYPE;
-    circ_0 action.all_circulation_slim%ROWTYPE;
-BEGIN
-
-    SELECT INTO tmp_circ * FROM action.all_circulation_slim WHERE id = ctx_circ_id;
-
-    IF tmp_circ IS NULL THEN
-        RETURN NEXT tmp_circ;
-    END IF;
-    circ_0 := tmp_circ;
-
-    -- find the front of the chain
-    WHILE TRUE LOOP
-        SELECT INTO tmp_circ * FROM action.all_circulation_slim 
-            WHERE id = tmp_circ.parent_circ;
-        IF tmp_circ IS NULL THEN
-            EXIT;
-        END IF;
-        circ_0 := tmp_circ;
-    END LOOP;
-
-    -- now send the circs to the caller, oldest to newest
-    tmp_circ := circ_0;
-    WHILE TRUE LOOP
-        IF tmp_circ IS NULL THEN
-            EXIT;
-        END IF;
-        RETURN NEXT tmp_circ;
-        SELECT INTO tmp_circ * FROM action.all_circulation_slim 
-            WHERE parent_circ = tmp_circ.id;
-    END LOOP;
-
-END;
-$$ LANGUAGE 'plpgsql';
-
-CREATE OR REPLACE FUNCTION action.summarize_all_circ_chain 
-    (ctx_circ_id INTEGER) RETURNS action.circ_chain_summary AS $$
-
-DECLARE
-
-    -- first circ in the chain
-    circ_0 action.all_circulation_slim%ROWTYPE;
-
-    -- last circ in the chain
-    circ_n action.all_circulation_slim%ROWTYPE;
-
-    -- circ chain under construction
-    chain action.circ_chain_summary;
-    tmp_circ action.all_circulation_slim%ROWTYPE;
-
-BEGIN
-    
-    chain.num_circs := 0;
-    FOR tmp_circ IN SELECT * FROM action.all_circ_chain(ctx_circ_id) LOOP
-
-        IF chain.num_circs = 0 THEN
-            circ_0 := tmp_circ;
-        END IF;
-
-        chain.num_circs := chain.num_circs + 1;
-        circ_n := tmp_circ;
-    END LOOP;
-
-    chain.start_time := circ_0.xact_start;
-    chain.last_stop_fines := circ_n.stop_fines;
-    chain.last_stop_fines_time := circ_n.stop_fines_time;
-    chain.last_checkin_time := circ_n.checkin_time;
-    chain.last_checkin_scan_time := circ_n.checkin_scan_time;
-    SELECT INTO chain.checkout_workstation name FROM actor.workstation WHERE id = circ_0.workstation;
-    SELECT INTO chain.last_checkin_workstation name FROM actor.workstation WHERE id = circ_n.checkin_workstation;
-
-    IF chain.num_circs > 1 THEN
-        chain.last_renewal_time := circ_n.xact_start;
-        SELECT INTO chain.last_renewal_workstation name FROM actor.workstation WHERE id = circ_n.workstation;
-    END IF;
-
-    RETURN chain;
-
-END;
-$$ LANGUAGE 'plpgsql';
-
-CREATE OR REPLACE FUNCTION rating.percent_time_circulating(badge_id INT)
-    RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$
-DECLARE
-    badge   rating.badge_with_orgs%ROWTYPE;
-BEGIN
-
-    SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id;
-
-    PERFORM rating.precalc_bibs_by_copy(badge_id);
-
-    DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN (
-        SELECT id FROM precalc_filter_bib_list
-            INTERSECT
-        SELECT id FROM precalc_bibs_by_copy_list
-    );
-
-    ANALYZE precalc_copy_filter_bib_list;
-
-    RETURN QUERY
-     SELECT bib,
-            SUM(COALESCE(circ_time,0))::NUMERIC / SUM(age)::NUMERIC
-      FROM  (SELECT cn.record AS bib,
-                    cp.id,
-                    EXTRACT( EPOCH FROM AGE(cp.active_date) ) + 1 AS age,
-                    SUM(  -- time copy spent circulating
-                        EXTRACT(
-                            EPOCH FROM
-                            AGE(
-                                COALESCE(circ.checkin_time, circ.stop_fines_time, NOW()),
-                                circ.xact_start
-                            )
-                        )
-                    )::NUMERIC AS circ_time
-              FROM  asset.copy cp
-                    JOIN precalc_copy_filter_bib_list c ON (cp.id = c.copy)
-                    JOIN asset.call_number cn ON (cn.id = cp.call_number)
-                    LEFT JOIN action.all_circulation_slim circ ON (
-                        circ.target_copy = cp.id
-                        AND stop_fines NOT IN (
-                            'LOST',
-                            'LONGOVERDUE',
-                            'CLAIMSRETURNED',
-                            'LONGOVERDUE'
-                        )
-                        AND NOT (
-                            checkin_time IS NULL AND
-                            stop_fines = 'MAXFINES'
-                        )
-                    )
-              WHERE cn.owning_lib = ANY (badge.orgs)
-                    AND cp.active_date IS NOT NULL
-                    -- Next line requires that copies with no circs (circ.id IS NULL) also not be deleted
-                    AND ((circ.id IS NULL AND NOT cp.deleted) OR circ.id IS NOT NULL)
-              GROUP BY 1,2,3
-            ) x
-      GROUP BY 1;
-END;
-$f$ LANGUAGE PLPGSQL STRICT;
-
-
--- ROLLBACK;
-
-
-SELECT evergreen.upgrade_deps_block_check('1052', :eg_version);
-
-CREATE OR REPLACE FUNCTION rating.inhouse_over_time(badge_id INT)
-    RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$
-DECLARE
-    badge   rating.badge_with_orgs%ROWTYPE;
-    iage    INT     := 1;
-    iint    INT     := NULL;
-    iscale  NUMERIC := NULL;
-BEGIN
-
-    SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id;
-
-    IF badge.horizon_age IS NULL THEN
-        RAISE EXCEPTION 'Badge "%" with id % requires a horizon age but has none.',
-            badge.name,
-            badge.id;
-    END IF;
-
-    PERFORM rating.precalc_bibs_by_copy(badge_id);
-
-    DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN (
-        SELECT id FROM precalc_filter_bib_list
-            INTERSECT
-        SELECT id FROM precalc_bibs_by_copy_list
-    );
-
-    ANALYZE precalc_copy_filter_bib_list;
-
-    iint := EXTRACT(EPOCH FROM badge.importance_interval);
-    IF badge.importance_age IS NOT NULL THEN
-        iage := (EXTRACT(EPOCH FROM badge.importance_age) / iint)::INT;
-    END IF;
-
-    -- if iscale is smaller than 1, scaling slope will be shallow ... BEWARE!
-    iscale := COALESCE(badge.importance_scale, 1.0);
-
-    RETURN QUERY
-     SELECT bib,
-            SUM( uses * GREATEST( iscale * (iage - cage), 1.0 ))
-      FROM (
-         SELECT cn.record AS bib,
-                (1 + EXTRACT(EPOCH FROM AGE(u.use_time)) / iint)::INT AS cage,
-                COUNT(u.id)::INT AS uses
-          FROM  action.in_house_use u
-                JOIN precalc_copy_filter_bib_list cf ON (u.item = cf.copy)
-                JOIN asset.copy cp ON (cp.id = u.item)
-                JOIN asset.call_number cn ON (cn.id = cp.call_number)
-          WHERE u.use_time >= NOW() - badge.horizon_age
-                AND cn.owning_lib = ANY (badge.orgs)
-          GROUP BY 1, 2
-      ) x
-      GROUP BY 1;
-END;
-$f$ LANGUAGE PLPGSQL STRICT;
-
-INSERT INTO rating.popularity_parameter (id, name, func, require_horizon,require_percentile) VALUES
-    (18,'In-House Use Over Time', 'rating.inhouse_over_time', TRUE, TRUE);
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1053', :eg_version);
-
-CREATE OR REPLACE FUNCTION rating.org_unit_count(badge_id INT)
-    RETURNS TABLE (record INT, value NUMERIC) AS $f$
-DECLARE
-    badge   rating.badge_with_orgs%ROWTYPE;
-BEGIN
-
-    SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id;
-
-    PERFORM rating.precalc_bibs_by_copy(badge_id);
-
-    DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN (
-        SELECT id FROM precalc_filter_bib_list
-            INTERSECT
-        SELECT id FROM precalc_bibs_by_copy_list
-    );
-    ANALYZE precalc_copy_filter_bib_list;
-
-    -- Use circ rather than owning lib here as that means "on the shelf at..."
-    RETURN QUERY
-     SELECT f.id::INT AS bib,
-            COUNT(DISTINCT cp.circ_lib)::NUMERIC
-     FROM asset.copy cp
-          JOIN precalc_copy_filter_bib_list f ON (cp.id = f.copy)
-     WHERE cp.circ_lib = ANY (badge.orgs) GROUP BY 1;
-
-END;
-$f$ LANGUAGE PLPGSQL STRICT;
-
-INSERT INTO rating.popularity_parameter (id, name, func, require_percentile) VALUES
-    (17,'Circulation Library Count', 'rating.org_unit_count', TRUE);
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1054', :eg_version);
-
-INSERT into config.org_unit_setting_type
-( name, grp, label, description, datatype ) VALUES
-
-( 'lib.timezone', 'lib',
-    oils_i18n_gettext('lib.timezone',
-        'Library time zone',
-        'coust', 'label'),
-    oils_i18n_gettext('lib.timezone',
-        'Define the time zone in which a library physically resides',
-        'coust', 'description'),
-    'string');
-
-ALTER TABLE actor.org_unit_closed ADD COLUMN full_day BOOLEAN DEFAULT FALSE;
-ALTER TABLE actor.org_unit_closed ADD COLUMN multi_day BOOLEAN DEFAULT FALSE;
-
-UPDATE actor.org_unit_closed SET multi_day = TRUE
-  WHERE close_start::DATE <> close_end::DATE;
-
-UPDATE actor.org_unit_closed SET full_day = TRUE
-  WHERE close_start::DATE = close_end::DATE
-        AND SUBSTRING(close_start::time::text FROM 1 FOR 8) = '00:00:00'
-        AND SUBSTRING(close_end::time::text FROM 1 FOR 8) = '23:59:59';
-
-CREATE OR REPLACE FUNCTION action.push_circ_due_time () RETURNS TRIGGER AS $$
-DECLARE
-    proper_tz TEXT := COALESCE(
-        oils_json_to_text((
-            SELECT value
-              FROM  actor.org_unit_ancestor_setting('lib.timezone',NEW.circ_lib)
-              LIMIT 1
-        )),
-        CURRENT_SETTING('timezone')
-    );
-BEGIN
-
-    IF (EXTRACT(EPOCH FROM NEW.duration)::INT % EXTRACT(EPOCH FROM '1 day'::INTERVAL)::INT) = 0 -- day-granular duration
-        AND SUBSTRING((NEW.due_date AT TIME ZONE proper_tz)::TIME::TEXT FROM 1 FOR 8) <> '23:59:59' THEN -- has not yet been pushed
-        NEW.due_date = ((NEW.due_date AT TIME ZONE proper_tz)::DATE + '1 day'::INTERVAL - '1 second'::INTERVAL) || ' ' || proper_tz;
-    END IF;
-
-    RETURN NEW;
-END;
-$$ LANGUAGE PLPGSQL;
-
-
-\qecho The following query will adjust all historical, unaged circulations so
-\qecho that if their due date field is pushed to the end of the day, it is done
-\qecho in the circulating library''''s time zone, and not the server time zone.
-\qecho 
-\qecho It is safe to run this after any change to library time zones.
-\qecho 
-\qecho Running this is not required, as no code before this change has
-\qecho depended on the time string of '''23:59:59'''.  It is also not necessary
-\qecho if all of your libraries are in the same time zone, and that time zone
-\qecho is the same as the database''''s configured time zone.
-\qecho 
-\qecho 'DO $$'
-\qecho 'declare'
-\qecho '    new_tz  text;'
-\qecho '    ou_id   int;'
-\qecho 'begin'
-\qecho '    for ou_id in select id from actor.org_unit loop'
-\qecho '        for new_tz in select oils_json_to_text(value) from actor.org_unit_ancestor_setting('''lib.timezone''',ou_id) loop'
-\qecho '            if new_tz is not null then'
-\qecho '                update  action.circulation'
-\qecho '                  set   due_date = (due_date::timestamp || ''' ''' || new_tz)::timestamptz'
-\qecho '                  where circ_lib = ou_id'
-\qecho '                        and substring((due_date at time zone new_tz)::time::text from 1 for 8) <> '''23:59:59''';'
-\qecho '            end if;'
-\qecho '        end loop;'
-\qecho '    end loop;'
-\qecho 'end;'
-\qecho '$$;'
-\qecho 
-
-SELECT evergreen.upgrade_deps_block_check('1055', :eg_version);
-
-CREATE OR REPLACE FUNCTION actor.usr_merge( src_usr INT, dest_usr INT, del_addrs BOOLEAN, del_cards BOOLEAN, deactivate_cards BOOLEAN ) RETURNS VOID AS $$
-DECLARE
-       suffix TEXT;
-       bucket_row RECORD;
-       picklist_row RECORD;
-       queue_row RECORD;
-       folder_row RECORD;
-BEGIN
-
-    -- do some initial cleanup 
-    UPDATE actor.usr SET card = NULL WHERE id = src_usr;
-    UPDATE actor.usr SET mailing_address = NULL WHERE id = src_usr;
-    UPDATE actor.usr SET billing_address = NULL WHERE id = src_usr;
-
-    -- actor.*
-    IF del_cards THEN
-        DELETE FROM actor.card where usr = src_usr;
-    ELSE
-        IF deactivate_cards THEN
-            UPDATE actor.card SET active = 'f' WHERE usr = src_usr;
-        END IF;
-        UPDATE actor.card SET usr = dest_usr WHERE usr = src_usr;
-    END IF;
-
-
-    IF del_addrs THEN
-        DELETE FROM actor.usr_address WHERE usr = src_usr;
-    ELSE
-        UPDATE actor.usr_address SET usr = dest_usr WHERE usr = src_usr;
-    END IF;
-
-    UPDATE actor.usr_note SET usr = dest_usr WHERE usr = src_usr;
-    -- dupes are technically OK in actor.usr_standing_penalty, should manually delete them...
-    UPDATE actor.usr_standing_penalty SET usr = dest_usr WHERE usr = src_usr;
-    PERFORM actor.usr_merge_rows('actor.usr_org_unit_opt_in', 'usr', src_usr, dest_usr);
-    PERFORM actor.usr_merge_rows('actor.usr_setting', 'usr', src_usr, dest_usr);
-
-    -- permission.*
-    PERFORM actor.usr_merge_rows('permission.usr_perm_map', 'usr', src_usr, dest_usr);
-    PERFORM actor.usr_merge_rows('permission.usr_object_perm_map', 'usr', src_usr, dest_usr);
-    PERFORM actor.usr_merge_rows('permission.usr_grp_map', 'usr', src_usr, dest_usr);
-    PERFORM actor.usr_merge_rows('permission.usr_work_ou_map', 'usr', src_usr, dest_usr);
-
-
-    -- container.*
-       
-       -- For each *_bucket table: transfer every bucket belonging to src_usr
-       -- into the custody of dest_usr.
-       --
-       -- In order to avoid colliding with an existing bucket owned by
-       -- the destination user, append the source user's id (in parenthesese)
-       -- to the name.  If you still get a collision, add successive
-       -- spaces to the name and keep trying until you succeed.
-       --
-       FOR bucket_row in
-               SELECT id, name
-               FROM   container.biblio_record_entry_bucket
-               WHERE  owner = src_usr
-       LOOP
-               suffix := ' (' || src_usr || ')';
-               LOOP
-                       BEGIN
-                               UPDATE  container.biblio_record_entry_bucket
-                               SET     owner = dest_usr, name = name || suffix
-                               WHERE   id = bucket_row.id;
-                       EXCEPTION WHEN unique_violation THEN
-                               suffix := suffix || ' ';
-                               CONTINUE;
-                       END;
-                       EXIT;
-               END LOOP;
-       END LOOP;
-
-       FOR bucket_row in
-               SELECT id, name
-               FROM   container.call_number_bucket
-               WHERE  owner = src_usr
-       LOOP
-               suffix := ' (' || src_usr || ')';
-               LOOP
-                       BEGIN
-                               UPDATE  container.call_number_bucket
-                               SET     owner = dest_usr, name = name || suffix
-                               WHERE   id = bucket_row.id;
-                       EXCEPTION WHEN unique_violation THEN
-                               suffix := suffix || ' ';
-                               CONTINUE;
-                       END;
-                       EXIT;
-               END LOOP;
-       END LOOP;
-
-       FOR bucket_row in
-               SELECT id, name
-               FROM   container.copy_bucket
-               WHERE  owner = src_usr
-       LOOP
-               suffix := ' (' || src_usr || ')';
-               LOOP
-                       BEGIN
-                               UPDATE  container.copy_bucket
-                               SET     owner = dest_usr, name = name || suffix
-                               WHERE   id = bucket_row.id;
-                       EXCEPTION WHEN unique_violation THEN
-                               suffix := suffix || ' ';
-                               CONTINUE;
-                       END;
-                       EXIT;
-               END LOOP;
-       END LOOP;
-
-       FOR bucket_row in
-               SELECT id, name
-               FROM   container.user_bucket
-               WHERE  owner = src_usr
-       LOOP
-               suffix := ' (' || src_usr || ')';
-               LOOP
-                       BEGIN
-                               UPDATE  container.user_bucket
-                               SET     owner = dest_usr, name = name || suffix
-                               WHERE   id = bucket_row.id;
-                       EXCEPTION WHEN unique_violation THEN
-                               suffix := suffix || ' ';
-                               CONTINUE;
-                       END;
-                       EXIT;
-               END LOOP;
-       END LOOP;
-
-       UPDATE container.user_bucket_item SET target_user = dest_usr WHERE target_user = src_usr;
-
-    -- vandelay.*
-       -- transfer queues the same way we transfer buckets (see above)
-       FOR queue_row in
-               SELECT id, name
-               FROM   vandelay.queue
-               WHERE  owner = src_usr
-       LOOP
-               suffix := ' (' || src_usr || ')';
-               LOOP
-                       BEGIN
-                               UPDATE  vandelay.queue
-                               SET     owner = dest_usr, name = name || suffix
-                               WHERE   id = queue_row.id;
-                       EXCEPTION WHEN unique_violation THEN
-                               suffix := suffix || ' ';
-                               CONTINUE;
-                       END;
-                       EXIT;
-               END LOOP;
-       END LOOP;
-
-    -- money.*
-    PERFORM actor.usr_merge_rows('money.collections_tracker', 'usr', src_usr, dest_usr);
-    PERFORM actor.usr_merge_rows('money.collections_tracker', 'collector', src_usr, dest_usr);
-    UPDATE money.billable_xact SET usr = dest_usr WHERE usr = src_usr;
-    UPDATE money.billing SET voider = dest_usr WHERE voider = src_usr;
-    UPDATE money.bnm_payment SET accepting_usr = dest_usr WHERE accepting_usr = src_usr;
-
-    -- action.*
-    UPDATE action.circulation SET usr = dest_usr WHERE usr = src_usr;
-    UPDATE action.circulation SET circ_staff = dest_usr WHERE circ_staff = src_usr;
-    UPDATE action.circulation SET checkin_staff = dest_usr WHERE checkin_staff = src_usr;
-    UPDATE action.usr_circ_history SET usr = dest_usr WHERE usr = src_usr;
-
-    UPDATE action.hold_request SET usr = dest_usr WHERE usr = src_usr;
-    UPDATE action.hold_request SET fulfillment_staff = dest_usr WHERE fulfillment_staff = src_usr;
-    UPDATE action.hold_request SET requestor = dest_usr WHERE requestor = src_usr;
-    UPDATE action.hold_notification SET notify_staff = dest_usr WHERE notify_staff = src_usr;
-
-    UPDATE action.in_house_use SET staff = dest_usr WHERE staff = src_usr;
-    UPDATE action.non_cataloged_circulation SET staff = dest_usr WHERE staff = src_usr;
-    UPDATE action.non_cataloged_circulation SET patron = dest_usr WHERE patron = src_usr;
-    UPDATE action.non_cat_in_house_use SET staff = dest_usr WHERE staff = src_usr;
-    UPDATE action.survey_response SET usr = dest_usr WHERE usr = src_usr;
-
-    -- acq.*
-    UPDATE acq.fund_allocation SET allocator = dest_usr WHERE allocator = src_usr;
-       UPDATE acq.fund_transfer SET transfer_user = dest_usr WHERE transfer_user = src_usr;
-
-       -- transfer picklists the same way we transfer buckets (see above)
-       FOR picklist_row in
-               SELECT id, name
-               FROM   acq.picklist
-               WHERE  owner = src_usr
-       LOOP
-               suffix := ' (' || src_usr || ')';
-               LOOP
-                       BEGIN
-                               UPDATE  acq.picklist
-                               SET     owner = dest_usr, name = name || suffix
-                               WHERE   id = picklist_row.id;
-                       EXCEPTION WHEN unique_violation THEN
-                               suffix := suffix || ' ';
-                               CONTINUE;
-                       END;
-                       EXIT;
-               END LOOP;
-       END LOOP;
-
-    UPDATE acq.purchase_order SET owner = dest_usr WHERE owner = src_usr;
-    UPDATE acq.po_note SET creator = dest_usr WHERE creator = src_usr;
-    UPDATE acq.po_note SET editor = dest_usr WHERE editor = src_usr;
-    UPDATE acq.provider_note SET creator = dest_usr WHERE creator = src_usr;
-    UPDATE acq.provider_note SET editor = dest_usr WHERE editor = src_usr;
-    UPDATE acq.lineitem_note SET creator = dest_usr WHERE creator = src_usr;
-    UPDATE acq.lineitem_note SET editor = dest_usr WHERE editor = src_usr;
-    UPDATE acq.lineitem_usr_attr_definition SET usr = dest_usr WHERE usr = src_usr;
-
-    -- asset.*
-    UPDATE asset.copy SET creator = dest_usr WHERE creator = src_usr;
-    UPDATE asset.copy SET editor = dest_usr WHERE editor = src_usr;
-    UPDATE asset.copy_note SET creator = dest_usr WHERE creator = src_usr;
-    UPDATE asset.call_number SET creator = dest_usr WHERE creator = src_usr;
-    UPDATE asset.call_number SET editor = dest_usr WHERE editor = src_usr;
-    UPDATE asset.call_number_note SET creator = dest_usr WHERE creator = src_usr;
-
-    -- serial.*
-    UPDATE serial.record_entry SET creator = dest_usr WHERE creator = src_usr;
-    UPDATE serial.record_entry SET editor = dest_usr WHERE editor = src_usr;
-
-    -- reporter.*
-    -- It's not uncommon to define the reporter schema in a replica 
-    -- DB only, so don't assume these tables exist in the write DB.
-    BEGIN
-       UPDATE reporter.template SET owner = dest_usr WHERE owner = src_usr;
-    EXCEPTION WHEN undefined_table THEN
-        -- do nothing
-    END;
-    BEGIN
-       UPDATE reporter.report SET owner = dest_usr WHERE owner = src_usr;
-    EXCEPTION WHEN undefined_table THEN
-        -- do nothing
-    END;
-    BEGIN
-       UPDATE reporter.schedule SET runner = dest_usr WHERE runner = src_usr;
-    EXCEPTION WHEN undefined_table THEN
-        -- do nothing
-    END;
-    BEGIN
-               -- transfer folders the same way we transfer buckets (see above)
-               FOR folder_row in
-                       SELECT id, name
-                       FROM   reporter.template_folder
-                       WHERE  owner = src_usr
-               LOOP
-                       suffix := ' (' || src_usr || ')';
-                       LOOP
-                               BEGIN
-                                       UPDATE  reporter.template_folder
-                                       SET     owner = dest_usr, name = name || suffix
-                                       WHERE   id = folder_row.id;
-                               EXCEPTION WHEN unique_violation THEN
-                                       suffix := suffix || ' ';
-                                       CONTINUE;
-                               END;
-                               EXIT;
-                       END LOOP;
-               END LOOP;
-    EXCEPTION WHEN undefined_table THEN
-        -- do nothing
-    END;
-    BEGIN
-               -- transfer folders the same way we transfer buckets (see above)
-               FOR folder_row in
-                       SELECT id, name
-                       FROM   reporter.report_folder
-                       WHERE  owner = src_usr
-               LOOP
-                       suffix := ' (' || src_usr || ')';
-                       LOOP
-                               BEGIN
-                                       UPDATE  reporter.report_folder
-                                       SET     owner = dest_usr, name = name || suffix
-                                       WHERE   id = folder_row.id;
-                               EXCEPTION WHEN unique_violation THEN
-                                       suffix := suffix || ' ';
-                                       CONTINUE;
-                               END;
-                               EXIT;
-                       END LOOP;
-               END LOOP;
-    EXCEPTION WHEN undefined_table THEN
-        -- do nothing
-    END;
-    BEGIN
-               -- transfer folders the same way we transfer buckets (see above)
-               FOR folder_row in
-                       SELECT id, name
-                       FROM   reporter.output_folder
-                       WHERE  owner = src_usr
-               LOOP
-                       suffix := ' (' || src_usr || ')';
-                       LOOP
-                               BEGIN
-                                       UPDATE  reporter.output_folder
-                                       SET     owner = dest_usr, name = name || suffix
-                                       WHERE   id = folder_row.id;
-                               EXCEPTION WHEN unique_violation THEN
-                                       suffix := suffix || ' ';
-                                       CONTINUE;
-                               END;
-                               EXIT;
-                       END LOOP;
-               END LOOP;
-    EXCEPTION WHEN undefined_table THEN
-        -- do nothing
-    END;
-
-    -- Finally, delete the source user
-    DELETE FROM actor.usr WHERE id = src_usr;
-
-END;
-$$ LANGUAGE plpgsql;
-
-
-
-
-
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1056', :eg_version); -- miker/gmcharlt
-
-INSERT INTO permission.perm_list (id,code,description) VALUES (592,'CONTAINER_BATCH_UPDATE','Allow batch update via buckets');
-
-INSERT INTO container.user_bucket_type (code,label) SELECT code,label FROM container.copy_bucket_type where code = 'staff_client';
-
-CREATE TABLE action.fieldset_group (
-    id              SERIAL  PRIMARY KEY,
-    name            TEXT        NOT NULL,
-    create_time     TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-    complete_time   TIMESTAMPTZ,
-    container       INT,        -- Points to a container of some type ...
-    container_type  TEXT,       -- One of 'biblio_record_entry', 'user', 'call_number', 'copy'
-    can_rollback    BOOL        DEFAULT TRUE,
-    rollback_group  INT         REFERENCES action.fieldset_group (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
-    rollback_time   TIMESTAMPTZ,
-    creator         INT         NOT NULL REFERENCES actor.usr (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
-    owning_lib      INT         NOT NULL REFERENCES actor.org_unit (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED
-);
-
-ALTER TABLE action.fieldset ADD COLUMN fieldset_group INT REFERENCES action.fieldset_group (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE action.fieldset ADD COLUMN error_msg TEXT;
-ALTER TABLE container.biblio_record_entry_bucket ADD COLUMN owning_lib INT REFERENCES actor.org_unit (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE container.user_bucket ADD COLUMN owning_lib INT REFERENCES actor.org_unit (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE container.call_number_bucket ADD COLUMN owning_lib INT REFERENCES actor.org_unit (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE container.copy_bucket ADD COLUMN owning_lib INT REFERENCES actor.org_unit (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-
-UPDATE query.stored_query SET id = id + 1000 WHERE id < 1000;
-UPDATE query.from_relation SET id = id + 1000 WHERE id < 1000;
-UPDATE query.expression SET id = id + 1000 WHERE id < 1000;
-
-SELECT SETVAL('query.stored_query_id_seq', 1, FALSE);
-SELECT SETVAL('query.from_relation_id_seq', 1, FALSE);
-SELECT SETVAL('query.expression_id_seq', 1, FALSE);
-
-INSERT INTO query.bind_variable (name,type,description,label)
-    SELECT  'bucket','number','ID of the bucket to pull items from','Bucket ID'
-      WHERE NOT EXISTS (SELECT 1 FROM query.bind_variable WHERE name = 'bucket');
-
--- Assumes completely empty 'query' schema
-INSERT INTO query.stored_query (type, use_distinct) VALUES ('SELECT', TRUE); -- 1
-
-INSERT INTO query.from_relation (type, table_name, class_name, table_alias) VALUES ('RELATION', 'container.user_bucket_item', 'cubi', 'cubi'); -- 1
-UPDATE query.stored_query SET from_clause = 1;
-
-INSERT INTO query.expr_xcol (table_alias, column_name) VALUES ('cubi', 'target_user'); -- 1
-INSERT INTO query.select_item (stored_query,seq_no,expression) VALUES (1,1,1);
-
-INSERT INTO query.expr_xcol (table_alias, column_name) VALUES ('cubi', 'bucket'); -- 2
-INSERT INTO query.expr_xbind (bind_variable) VALUES ('bucket'); -- 3
-
-INSERT INTO query.expr_xop (left_operand, operator, right_operand) VALUES (2, '=', 3); -- 4
-UPDATE query.stored_query SET where_clause = 4;
-
-SELECT SETVAL('query.stored_query_id_seq', 1000, TRUE) FROM query.stored_query;
-SELECT SETVAL('query.from_relation_id_seq', 1000, TRUE) FROM query.from_relation;
-SELECT SETVAL('query.expression_id_seq', 10000, TRUE) FROM query.expression;
-
-CREATE OR REPLACE FUNCTION action.apply_fieldset(
-    fieldset_id IN INT,        -- id from action.fieldset
-    table_name  IN TEXT,       -- table to be updated
-    pkey_name   IN TEXT,       -- name of primary key column in that table
-    query       IN TEXT        -- query constructed by qstore (for query-based
-                               --    fieldsets only; otherwise null
-)
-RETURNS TEXT AS $$
-DECLARE
-    statement TEXT;
-    where_clause TEXT;
-    fs_status TEXT;
-    fs_pkey_value TEXT;
-    fs_query TEXT;
-    sep CHAR;
-    status_code TEXT;
-    msg TEXT;
-    fs_id INT;
-    fsg_id INT;
-    update_count INT;
-    cv RECORD;
-    fs_obj action.fieldset%ROWTYPE;
-    fs_group action.fieldset_group%ROWTYPE;
-    rb_row RECORD;
-BEGIN
-    -- Sanity checks
-    IF fieldset_id IS NULL THEN
-        RETURN 'Fieldset ID parameter is NULL';
-    END IF;
-    IF table_name IS NULL THEN
-        RETURN 'Table name parameter is NULL';
-    END IF;
-    IF pkey_name IS NULL THEN
-        RETURN 'Primary key name parameter is NULL';
-    END IF;
-
-    SELECT
-        status,
-        quote_literal( pkey_value )
-    INTO
-        fs_status,
-        fs_pkey_value
-    FROM
-        action.fieldset
-    WHERE
-        id = fieldset_id;
-
-    --
-    -- Build the WHERE clause.  This differs according to whether it's a
-    -- single-row fieldset or a query-based fieldset.
-    --
-    IF query IS NULL        AND fs_pkey_value IS NULL THEN
-        RETURN 'Incomplete fieldset: neither a primary key nor a query available';
-    ELSIF query IS NOT NULL AND fs_pkey_value IS NULL THEN
-        fs_query := rtrim( query, ';' );
-        where_clause := 'WHERE ' || pkey_name || ' IN ( '
-                     || fs_query || ' )';
-    ELSIF query IS NULL     AND fs_pkey_value IS NOT NULL THEN
-        where_clause := 'WHERE ' || pkey_name || ' = ';
-        IF pkey_name = 'id' THEN
-            where_clause := where_clause || fs_pkey_value;
-        ELSIF pkey_name = 'code' THEN
-            where_clause := where_clause || quote_literal(fs_pkey_value);
-        ELSE
-            RETURN 'Only know how to handle "id" and "code" pkeys currently, received ' || pkey_name;
-        END IF;
-    ELSE  -- both are not null
-        RETURN 'Ambiguous fieldset: both a primary key and a query provided';
-    END IF;
-
-    IF fs_status IS NULL THEN
-        RETURN 'No fieldset found for id = ' || fieldset_id;
-    ELSIF fs_status = 'APPLIED' THEN
-        RETURN 'Fieldset ' || fieldset_id || ' has already been applied';
-    END IF;
-
-    SELECT * INTO fs_obj FROM action.fieldset WHERE id = fieldset_id;
-    SELECT * INTO fs_group FROM action.fieldset_group WHERE id = fs_obj.fieldset_group;
-
-    IF fs_group.can_rollback THEN
-        -- This is part of a non-rollback group.  We need to record the current values for future rollback.
-
-        INSERT INTO action.fieldset_group (can_rollback, name, creator, owning_lib, container, container_type)
-            VALUES (FALSE, 'ROLLBACK: '|| fs_group.name, fs_group.creator, fs_group.owning_lib, fs_group.container, fs_group.container_type);
-
-        fsg_id := CURRVAL('action.fieldset_group_id_seq');
-
-        FOR rb_row IN EXECUTE 'SELECT * FROM ' || table_name || ' ' || where_clause LOOP
-            IF pkey_name = 'id' THEN
-                fs_pkey_value := rb_row.id;
-            ELSIF pkey_name = 'code' THEN
-                fs_pkey_value := rb_row.code;
-            ELSE
-                RETURN 'Only know how to handle "id" and "code" pkeys currently, received ' || pkey_name;
-            END IF;
-            INSERT INTO action.fieldset (fieldset_group,owner,owning_lib,status,classname,name,pkey_value)
-                VALUES (fsg_id, fs_obj.owner, fs_obj.owning_lib, 'PENDING', fs_obj.classname, fs_obj.name || ' ROLLBACK FOR ' || fs_pkey_value, fs_pkey_value);
-
-            fs_id := CURRVAL('action.fieldset_id_seq');
-            sep := '';
-            FOR cv IN
-                SELECT  DISTINCT col
-                FROM    action.fieldset_col_val
-                WHERE   fieldset = fieldset_id
-            LOOP
-                EXECUTE 'INSERT INTO action.fieldset_col_val (fieldset, col, val) ' || 
-                    'SELECT '|| fs_id || ', '||quote_literal(cv.col)||', '||cv.col||' FROM '||table_name||' WHERE '||pkey_name||' = '||fs_pkey_value;
-            END LOOP;
-        END LOOP;
-    END IF;
-
-    statement := 'UPDATE ' || table_name || ' SET';
-
-    sep := '';
-    FOR cv IN
-        SELECT  col,
-                val
-        FROM    action.fieldset_col_val
-        WHERE   fieldset = fieldset_id
-    LOOP
-        statement := statement || sep || ' ' || cv.col
-                     || ' = ' || coalesce( quote_literal( cv.val ), 'NULL' );
-        sep := ',';
-    END LOOP;
-
-    IF sep = '' THEN
-        RETURN 'Fieldset ' || fieldset_id || ' has no column values defined';
-    END IF;
-    statement := statement || ' ' || where_clause;
-
-    --
-    -- Execute the update
-    --
-    BEGIN
-        EXECUTE statement;
-        GET DIAGNOSTICS update_count = ROW_COUNT;
-
-        IF update_count = 0 THEN
-            RAISE data_exception;
-        END IF;
-
-        IF fsg_id IS NOT NULL THEN
-            UPDATE action.fieldset_group SET rollback_group = fsg_id WHERE id = fs_group.id;
-        END IF;
-
-        IF fs_group.id IS NOT NULL THEN
-            UPDATE action.fieldset_group SET complete_time = now() WHERE id = fs_group.id;
-        END IF;
-
-        UPDATE action.fieldset SET status = 'APPLIED', applied_time = now() WHERE id = fieldset_id;
-
-    EXCEPTION WHEN data_exception THEN
-        msg := 'No eligible rows found for fieldset ' || fieldset_id;
-        UPDATE action.fieldset SET status = 'ERROR', applied_time = now() WHERE id = fieldset_id;
-        RETURN msg;
-
-    END;
-
-    RETURN msg;
-
-EXCEPTION WHEN OTHERS THEN
-    msg := 'Unable to apply fieldset ' || fieldset_id || ': ' || sqlerrm;
-    UPDATE action.fieldset SET status = 'ERROR', applied_time = now() WHERE id = fieldset_id;
-    RETURN msg;
-
-END;
-$$ LANGUAGE plpgsql;
-
-
-
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1057', :eg_version); -- miker/gmcharlt/kmlussier
-
--- Thist change drops a needless join and saves 10-15% in time cost
-CREATE OR REPLACE FUNCTION search.facets_for_record_set(ignore_facet_classes text[], hits bigint[]) RETURNS TABLE(id integer, value text, count bigint)
-AS $f$
-    SELECT id, value, count
-      FROM (
-        SELECT  mfae.field AS id,
-                mfae.value,
-                COUNT(DISTINCT mfae.source),
-                row_number() OVER (
-                    PARTITION BY mfae.field ORDER BY COUNT(DISTINCT mfae.source) DESC
-                ) AS rownum
-          FROM  metabib.facet_entry mfae
-                JOIN config.metabib_field cmf ON (cmf.id = mfae.field)
-          WHERE mfae.source = ANY ($2)
-                AND cmf.facet_field
-                AND cmf.field_class NOT IN (SELECT * FROM unnest($1))
-          GROUP by 1, 2
-      ) all_facets
-      WHERE rownum <= (
-        SELECT COALESCE(
-            (SELECT value::INT FROM config.global_flag WHERE name = 'search.max_facets_per_field' AND enabled),
-            1000
-        )
-      );
-$f$ LANGUAGE SQL;
-
-CREATE OR REPLACE FUNCTION unapi.metabib_virtual_record_feed ( id_list BIGINT[], format TEXT, includes TEXT[], org TEXT, depth INT DEFAULT NULL, slimit HSTORE DEFAULT NULL, soffset HSTORE DEFAULT NULL, include_xmlns BOOL DEFAULT TRUE, title TEXT DEFAULT NULL, description TEXT DEFAULT NULL, creator TEXT DEFAULT NULL, update_ts TEXT DEFAULT NULL, unapi_url TEXT DEFAULT NULL, header_xml XML DEFAULT NULL ) RETURNS XML AS $F$
-DECLARE
-    layout          unapi.bre_output_layout%ROWTYPE;
-    transform       config.xml_transform%ROWTYPE;
-    item_format     TEXT;
-    tmp_xml         TEXT;
-    xmlns_uri       TEXT := 'http://open-ils.org/spec/feed-xml/v1';
-    ouid            INT;
-    element_list    TEXT[];
-BEGIN
-
-    IF org = '-' OR org IS NULL THEN
-        SELECT shortname INTO org FROM evergreen.org_top();
-    END IF;
-
-    SELECT id INTO ouid FROM actor.org_unit WHERE shortname = org;
-    SELECT * INTO layout FROM unapi.bre_output_layout WHERE name = format;
-
-    IF layout.name IS NULL THEN
-        RETURN NULL::XML;
-    END IF;
-
-    SELECT * INTO transform FROM config.xml_transform WHERE name = layout.transform;
-    xmlns_uri := COALESCE(transform.namespace_uri,xmlns_uri);
-
-    -- Gather the bib xml
-    SELECT XMLAGG( unapi.mmr(i, format, '', includes, org, depth, slimit, soffset, include_xmlns)) INTO tmp_xml FROM UNNEST( id_list ) i;
-
-    IF layout.title_element IS NOT NULL THEN
-        EXECUTE 'SELECT XMLCONCAT( XMLELEMENT( name '|| layout.title_element ||', XMLATTRIBUTES( $1 AS xmlns), $3), $2)' INTO tmp_xml USING xmlns_uri, tmp_xml::XML, title;
-    END IF;
-
-    IF layout.description_element IS NOT NULL THEN
-        EXECUTE 'SELECT XMLCONCAT( XMLELEMENT( name '|| layout.description_element ||', XMLATTRIBUTES( $1 AS xmlns), $3), $2)' INTO tmp_xml USING xmlns_uri, tmp_xml::XML, description;
-    END IF;
-
-    IF layout.creator_element IS NOT NULL THEN
-        EXECUTE 'SELECT XMLCONCAT( XMLELEMENT( name '|| layout.creator_element ||', XMLATTRIBUTES( $1 AS xmlns), $3), $2)' INTO tmp_xml USING xmlns_uri, tmp_xml::XML, creator;
-    END IF;
-
-    IF layout.update_ts_element IS NOT NULL THEN
-        EXECUTE 'SELECT XMLCONCAT( XMLELEMENT( name '|| layout.update_ts_element ||', XMLATTRIBUTES( $1 AS xmlns), $3), $2)' INTO tmp_xml USING xmlns_uri, tmp_xml::XML, update_ts;
-    END IF;
-
-    IF unapi_url IS NOT NULL THEN
-        EXECUTE $$SELECT XMLCONCAT( XMLELEMENT( name link, XMLATTRIBUTES( 'http://www.w3.org/1999/xhtml' AS xmlns, 'unapi-server' AS rel, $1 AS href, 'unapi' AS title)), $2)$$ INTO tmp_xml USING unapi_url, tmp_xml::XML;
-    END IF;
-
-    IF header_xml IS NOT NULL THEN tmp_xml := XMLCONCAT(header_xml,tmp_xml::XML); END IF;
-
-    element_list := regexp_split_to_array(layout.feed_top,E'\\.');
-    FOR i IN REVERSE ARRAY_UPPER(element_list, 1) .. 1 LOOP
-        EXECUTE 'SELECT XMLELEMENT( name '|| quote_ident(element_list[i]) ||', XMLATTRIBUTES( $1 AS xmlns), $2)' INTO tmp_xml USING xmlns_uri, tmp_xml::XML;
-    END LOOP;
-
-    RETURN tmp_xml::XML;
-END;
-$F$ LANGUAGE PLPGSQL STABLE;
-
-CREATE TABLE asset.copy_vis_attr_cache (
-    id              BIGSERIAL   PRIMARY KEY,
-    record          BIGINT      NOT NULL, -- No FKEYs, managed by user triggers.
-    target_copy     BIGINT      NOT NULL,
-    vis_attr_vector INT[]
-);
-CREATE INDEX copy_vis_attr_cache_record_idx ON asset.copy_vis_attr_cache (record);
-CREATE INDEX copy_vis_attr_cache_copy_idx ON asset.copy_vis_attr_cache (target_copy);
-
-ALTER TABLE biblio.record_entry ADD COLUMN vis_attr_vector INT[];
-
-CREATE OR REPLACE FUNCTION search.calculate_visibility_attribute ( value INT, attr TEXT ) RETURNS INT AS $f$
-SELECT  ((CASE $2
-
-            WHEN 'luri_org'         THEN 0 -- "b" attr
-            WHEN 'bib_source'       THEN 1 -- "b" attr
-
-            WHEN 'copy_flags'       THEN 0 -- "c" attr
-            WHEN 'owning_lib'       THEN 1 -- "c" attr
-            WHEN 'circ_lib'         THEN 2 -- "c" attr
-            WHEN 'status'           THEN 3 -- "c" attr
-            WHEN 'location'         THEN 4 -- "c" attr
-            WHEN 'location_group'   THEN 5 -- "c" attr
-
-        END) << 28 ) | $1;
-
-/* copy_flags bit positions, LSB-first:
-
- 0: asset.copy.opac_visible
-
-
-   When adding flags, you must update asset.all_visible_flags()
-
-   Because bib and copy values are stored separately, we can reuse
-   shifts, saving us some space. We could probably take back a bit
-   too, but I'm not sure its worth squeezing that last one out. We'd
-   be left with just 2 slots for copy attrs, rather than 10.
-*/
-
-$f$ LANGUAGE SQL IMMUTABLE;
-
-CREATE OR REPLACE FUNCTION search.calculate_visibility_attribute_list ( attr TEXT, value INT[] ) RETURNS INT[] AS $f$
-    SELECT ARRAY_AGG(search.calculate_visibility_attribute(x, $1)) FROM UNNEST($2) AS X;
-$f$ LANGUAGE SQL IMMUTABLE;
-
-CREATE OR REPLACE FUNCTION search.calculate_visibility_attribute_test ( attr TEXT, value INT[], negate BOOL DEFAULT FALSE ) RETURNS TEXT AS $f$
-    SELECT  CASE WHEN $3 THEN '!' ELSE '' END || '(' || ARRAY_TO_STRING(search.calculate_visibility_attribute_list($1,$2),'|') || ')';
-$f$ LANGUAGE SQL IMMUTABLE;
-
-CREATE OR REPLACE FUNCTION asset.calculate_copy_visibility_attribute_set ( copy_id BIGINT ) RETURNS INT[] AS $f$
-DECLARE
-    copy_row    asset.copy%ROWTYPE;
-    lgroup_map  asset.copy_location_group_map%ROWTYPE;
-    attr_set    INT[];
-BEGIN
-    SELECT * INTO copy_row FROM asset.copy WHERE id = copy_id;
-
-    attr_set := attr_set || search.calculate_visibility_attribute(copy_row.opac_visible::INT, 'copy_flags');
-    attr_set := attr_set || search.calculate_visibility_attribute(copy_row.circ_lib, 'circ_lib');
-    attr_set := attr_set || search.calculate_visibility_attribute(copy_row.status, 'status');
-    attr_set := attr_set || search.calculate_visibility_attribute(copy_row.location, 'location');
-
-    SELECT  ARRAY_APPEND(
-                attr_set,
-                search.calculate_visibility_attribute(owning_lib, 'owning_lib')
-            ) INTO attr_set
-      FROM  asset.call_number
-      WHERE id = copy_row.call_number;
-
-    FOR lgroup_map IN SELECT * FROM asset.copy_location_group_map WHERE location = copy_row.location LOOP
-        attr_set := attr_set || search.calculate_visibility_attribute(lgroup_map.lgroup, 'location_group');
-    END LOOP;
-
-    RETURN attr_set;
-END;
-$f$ LANGUAGE PLPGSQL;
-
-CREATE OR REPLACE FUNCTION biblio.calculate_bib_visibility_attribute_set ( bib_id BIGINT ) RETURNS INT[] AS $f$
-DECLARE
-    bib_row     biblio.record_entry%ROWTYPE;
-    cn_row      asset.call_number%ROWTYPE;
-    attr_set    INT[];
-BEGIN
-    SELECT * INTO bib_row FROM biblio.record_entry WHERE id = bib_id;
-
-    IF bib_row.source IS NOT NULL THEN
-        attr_set := attr_set || search.calculate_visibility_attribute(bib_row.source, 'bib_source');
-    END IF;
-
-    FOR cn_row IN
-        SELECT  cn.*
-          FROM  asset.call_number cn
-                JOIN asset.uri_call_number_map m ON (cn.id = m.call_number)
-                JOIN asset.uri u ON (u.id = m.uri)
-          WHERE cn.record = bib_id
-                AND cn.label = '##URI##'
-                AND u.active
-    LOOP
-        attr_set := attr_set || search.calculate_visibility_attribute(cn_row.owning_lib, 'luri_org');
-    END LOOP;
-
-    RETURN attr_set;
-END;
-$f$ LANGUAGE PLPGSQL;
-
-
-SELECT evergreen.upgrade_deps_block_check('1076', :eg_version); -- miker/gmcharlt
-
-CREATE OR REPLACE FUNCTION asset.cache_copy_visibility () RETURNS TRIGGER as $func$
-DECLARE
-    ocn     asset.call_number%ROWTYPE;
-    ncn     asset.call_number%ROWTYPE;
-    cid     BIGINT;
-BEGIN
-
-    IF TG_TABLE_NAME = 'peer_bib_copy_map' THEN -- Only needs ON INSERT OR DELETE, so handle separately
-        IF TG_OP = 'INSERT' THEN
-            INSERT INTO asset.copy_vis_attr_cache (record, target_copy, vis_attr_vector) VALUES (
-                NEW.peer_record,
-                NEW.target_copy,
-                asset.calculate_copy_visibility_attribute_set(NEW.target_copy)
-            );
-
-            RETURN NEW;
-        ELSIF TG_OP = 'DELETE' THEN
-            DELETE FROM asset.copy_vis_attr_cache
-              WHERE record = NEW.peer_record AND target_copy = NEW.target_copy;
-
-            RETURN OLD;
-        END IF;
-    END IF;
-
-    IF TG_OP = 'INSERT' THEN -- Handles ON INSERT. ON UPDATE is below.
-        IF TG_TABLE_NAME IN ('copy', 'unit') THEN
-            SELECT * INTO ncn FROM asset.call_number cn WHERE id = NEW.call_number;
-            INSERT INTO asset.copy_vis_attr_cache (record, target_copy, vis_attr_vector) VALUES (
-                ncn.record,
-                NEW.id,
-                asset.calculate_copy_visibility_attribute_set(NEW.id)
-            );
-        ELSIF TG_TABLE_NAME = 'record_entry' THEN
-            NEW.vis_attr_vector := biblio.calculate_bib_visibility_attribute_set(NEW.id);
-        END IF;
-
-        RETURN NEW;
-    END IF;
-
-    -- handle items first, since with circulation activity
-    -- their statuses change frequently
-    IF TG_TABLE_NAME IN ('copy', 'unit') THEN -- This handles ON UPDATE OR DELETE. ON INSERT above
-
-        IF TG_OP = 'DELETE' THEN -- Shouldn't get here, normally
-            DELETE FROM asset.copy_vis_attr_cache WHERE target_copy = OLD.id;
-            RETURN OLD;
-        END IF;
-
-        SELECT * INTO ncn FROM asset.call_number cn WHERE id = NEW.call_number;
-
-        IF OLD.deleted <> NEW.deleted THEN
-            IF NEW.deleted THEN
-                DELETE FROM asset.copy_vis_attr_cache WHERE target_copy = OLD.id;
-            ELSE
-                INSERT INTO asset.copy_vis_attr_cache (record, target_copy, vis_attr_vector) VALUES (
-                    ncn.record,
-                    NEW.id,
-                    asset.calculate_copy_visibility_attribute_set(NEW.id)
-                );
-            END IF;
-
-            RETURN NEW;
-        ELSIF OLD.call_number  <> NEW.call_number THEN
-            SELECT * INTO ocn FROM asset.call_number cn WHERE id = OLD.call_number;
-
-            IF ncn.record <> ocn.record THEN
-                UPDATE  biblio.record_entry
-                  SET   vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(ncn.record)
-                  WHERE id = ocn.record;
-
-                -- We have to use a record-specific WHERE clause
-                -- to avoid modifying the entries for peer-bib copies.
-                UPDATE  asset.copy_vis_attr_cache
-                  SET   target_copy = NEW.id,
-                        record = ncn.record
-                  WHERE target_copy = OLD.id
-                        AND record = ocn.record;
-            END IF;
-        END IF;
-
-        IF OLD.location     <> NEW.location OR
-           OLD.status       <> NEW.status OR
-           OLD.opac_visible <> NEW.opac_visible OR
-           OLD.circ_lib     <> NEW.circ_lib
-        THEN
-            -- Any of these could change visibility, but
-            -- we'll save some queries and not try to calculate
-            -- the change directly.  We want to update peer-bib
-            -- entries in this case, unlike above.
-            UPDATE  asset.copy_vis_attr_cache
-              SET   target_copy = NEW.id,
-                    vis_attr_vector = asset.calculate_copy_visibility_attribute_set(NEW.id)
-              WHERE target_copy = OLD.id;
-
-        END IF;
-
-    ELSIF TG_TABLE_NAME = 'call_number' THEN -- Only ON UPDATE. Copy handler will deal with ON INSERT OR DELETE.
-
-        IF OLD.record <> NEW.record THEN
-            IF NEW.label = '##URI##' THEN
-                UPDATE  biblio.record_entry
-                  SET   vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(OLD.record)
-                  WHERE id = OLD.record;
-
-                UPDATE  biblio.record_entry
-                  SET   vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(NEW.record)
-                  WHERE id = NEW.record;
-            END IF;
-
-            UPDATE  asset.copy_vis_attr_cache
-              SET   record = NEW.record,
-                    vis_attr_vector = asset.calculate_copy_visibility_attribute_set(target_copy)
-              WHERE target_copy IN (SELECT id FROM asset.copy WHERE call_number = NEW.id)
-                    AND record = OLD.record;
-
-        ELSIF OLD.owning_lib <> NEW.owning_lib THEN
-            UPDATE  asset.copy_vis_attr_cache
-              SET   vis_attr_vector = asset.calculate_copy_visibility_attribute_set(target_copy)
-              WHERE target_copy IN (SELECT id FROM asset.copy WHERE call_number = NEW.id)
-                    AND record = NEW.record;
-
-            IF NEW.label = '##URI##' THEN
-                UPDATE  biblio.record_entry
-                  SET   vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(OLD.record)
-                  WHERE id = OLD.record;
-            END IF;
-        END IF;
-
-    ELSIF TG_TABLE_NAME = 'record_entry' THEN -- Only handles ON UPDATE OR DELETE
-
-        IF TG_OP = 'DELETE' THEN -- Shouldn't get here, normally
-            DELETE FROM asset.copy_vis_attr_cache WHERE record = OLD.id;
-            RETURN OLD;
-        ELSIF OLD.source <> NEW.source THEN
-            NEW.vis_attr_vector := biblio.calculate_bib_visibility_attribute_set(NEW.id);
-        END IF;
-
-    END IF;
-
-    RETURN NEW;
-END;
-$func$ LANGUAGE PLPGSQL;
-
-
--- Helper functions for use in constructing searches --
-
-CREATE OR REPLACE FUNCTION asset.all_visible_flags () RETURNS TEXT AS $f$
-    SELECT  '(' || ARRAY_TO_STRING(ARRAY_AGG(search.calculate_visibility_attribute(1 << x, 'copy_flags')),'&') || ')'
-      FROM  GENERATE_SERIES(0,0) AS x; -- increment as new flags are added.
-$f$ LANGUAGE SQL STABLE;
-
-CREATE OR REPLACE FUNCTION asset.visible_orgs (otype TEXT) RETURNS TEXT AS $f$
-    SELECT  '(' || ARRAY_TO_STRING(ARRAY_AGG(search.calculate_visibility_attribute(id, $1)),'|') || ')'
-      FROM  actor.org_unit
-      WHERE opac_visible;
-$f$ LANGUAGE SQL STABLE;
-
-CREATE OR REPLACE FUNCTION asset.invisible_orgs (otype TEXT) RETURNS TEXT AS $f$
-    SELECT  '!(' || ARRAY_TO_STRING(ARRAY_AGG(search.calculate_visibility_attribute(id, $1)),'|') || ')'
-      FROM  actor.org_unit
-      WHERE NOT opac_visible;
-$f$ LANGUAGE SQL STABLE;
-
--- Bib-oriented defaults for search
-CREATE OR REPLACE FUNCTION asset.bib_source_default () RETURNS TEXT AS $f$
-    SELECT  '(' || ARRAY_TO_STRING(ARRAY_AGG(search.calculate_visibility_attribute(id, 'bib_source')),'|') || ')'
-      FROM  config.bib_source
-      WHERE transcendant;
-$f$ LANGUAGE SQL IMMUTABLE;
-
-CREATE OR REPLACE FUNCTION asset.luri_org_default () RETURNS TEXT AS $f$
-    SELECT  * FROM asset.invisible_orgs('luri_org');
-$f$ LANGUAGE SQL STABLE;
-
--- Copy-oriented defaults for search
-CREATE OR REPLACE FUNCTION asset.location_group_default () RETURNS TEXT AS $f$
-    SELECT  '!(' || ARRAY_TO_STRING(ARRAY_AGG(search.calculate_visibility_attribute(id, 'location_group')),'|') || ')'
-      FROM  asset.copy_location_group
-      WHERE NOT opac_visible;
-$f$ LANGUAGE SQL STABLE;
-
-CREATE OR REPLACE FUNCTION asset.location_default () RETURNS TEXT AS $f$
-    SELECT  '!(' || ARRAY_TO_STRING(ARRAY_AGG(search.calculate_visibility_attribute(id, 'location')),'|') || ')'
-      FROM  asset.copy_location
-      WHERE NOT opac_visible;
-$f$ LANGUAGE SQL STABLE;
-
-CREATE OR REPLACE FUNCTION asset.status_default () RETURNS TEXT AS $f$
-    SELECT  '!(' || ARRAY_TO_STRING(ARRAY_AGG(search.calculate_visibility_attribute(id, 'status')),'|') || ')'
-      FROM  config.copy_status
-      WHERE NOT opac_visible;
-$f$ LANGUAGE SQL STABLE;
-
-CREATE OR REPLACE FUNCTION asset.owning_lib_default () RETURNS TEXT AS $f$
-    SELECT  * FROM asset.invisible_orgs('owning_lib');
-$f$ LANGUAGE SQL STABLE;
-
-CREATE OR REPLACE FUNCTION asset.circ_lib_default () RETURNS TEXT AS $f$
-    SELECT  * FROM asset.invisible_orgs('circ_lib');
-$f$ LANGUAGE SQL STABLE;
-
-CREATE OR REPLACE FUNCTION asset.patron_default_visibility_mask () RETURNS TABLE (b_attrs TEXT, c_attrs TEXT)  AS $f$
-DECLARE
-    copy_flags      TEXT; -- "c" attr
-
-    owning_lib      TEXT; -- "c" attr
-    circ_lib        TEXT; -- "c" attr
-    status          TEXT; -- "c" attr
-    location        TEXT; -- "c" attr
-    location_group  TEXT; -- "c" attr
-
-    luri_org        TEXT; -- "b" attr
-    bib_sources     TEXT; -- "b" attr
-BEGIN
-    copy_flags      := asset.all_visible_flags(); -- Will always have at least one
-
-    owning_lib      := NULLIF(asset.owning_lib_default(),'!()');
-    
-    circ_lib        := NULLIF(asset.circ_lib_default(),'!()');
-    status          := NULLIF(asset.status_default(),'!()');
-    location        := NULLIF(asset.location_default(),'!()');
-    location_group  := NULLIF(asset.location_group_default(),'!()');
-
-    luri_org        := NULLIF(asset.luri_org_default(),'!()');
-    bib_sources     := NULLIF(asset.bib_source_default(),'()');
-
-    RETURN QUERY SELECT
-        '('||ARRAY_TO_STRING(
-            ARRAY[luri_org,bib_sources],
-            '|'
-        )||')',
-        '('||ARRAY_TO_STRING(
-            ARRAY[copy_flags,owning_lib,circ_lib,status,location,location_group]::TEXT[],
-            '&'
-        )||')';
-END;
-$f$ LANGUAGE PLPGSQL STABLE ROWS 1;
-
-CREATE OR REPLACE FUNCTION metabib.suggest_browse_entries(raw_query_text text, search_class text, headline_opts text, visibility_org integer, query_limit integer, normalization integer)
- RETURNS TABLE(value text, field integer, buoyant_and_class_match boolean, field_match boolean, field_weight integer, rank real, buoyant boolean, match text)
-AS $f$
-DECLARE
-    prepared_query_texts    TEXT[];
-    query                   TSQUERY;
-    plain_query             TSQUERY;
-    opac_visibility_join    TEXT;
-    search_class_join       TEXT;
-    r_fields                RECORD;
-BEGIN
-    prepared_query_texts := metabib.autosuggest_prepare_tsquery(raw_query_text);
-
-    query := TO_TSQUERY('keyword', prepared_query_texts[1]);
-    plain_query := TO_TSQUERY('keyword', prepared_query_texts[2]);
-
-    visibility_org := NULLIF(visibility_org,-1);
-    IF visibility_org IS NOT NULL THEN
-        PERFORM FROM actor.org_unit WHERE id = visibility_org AND parent_ou IS NULL;
-        IF FOUND THEN
-            opac_visibility_join := '';
-        ELSE
-            opac_visibility_join := '
-    JOIN asset.copy_vis_attr_cache acvac ON (acvac.record = x.source)
-    JOIN vm ON (acvac.vis_attr_vector @@
-            (vm.c_attrs || $$&$$ ||
-                search.calculate_visibility_attribute_test(
-                    $$circ_lib$$,
-                    (SELECT ARRAY_AGG(id) FROM actor.org_unit_descendants($4))
-                )
-            )::query_int
-         )
-';
-        END IF;
-    ELSE
-        opac_visibility_join := '';
-    END IF;
-
-    -- The following determines whether we only provide suggestsons matching
-    -- the user's selected search_class, or whether we show other suggestions
-    -- too. The reason for MIN() is that for search_classes like
-    -- 'title|proper|uniform' you would otherwise get multiple rows.  The
-    -- implication is that if title as a class doesn't have restrict,
-    -- nor does the proper field, but the uniform field does, you're going
-    -- to get 'false' for your overall evaluation of 'should we restrict?'
-    -- To invert that, change from MIN() to MAX().
-
-    SELECT
-        INTO r_fields
-            MIN(cmc.restrict::INT) AS restrict_class,
-            MIN(cmf.restrict::INT) AS restrict_field
-        FROM metabib.search_class_to_registered_components(search_class)
-            AS _registered (field_class TEXT, field INT)
-        JOIN
-            config.metabib_class cmc ON (cmc.name = _registered.field_class)
-        LEFT JOIN
-            config.metabib_field cmf ON (cmf.id = _registered.field);
-
-    -- evaluate 'should we restrict?'
-    IF r_fields.restrict_field::BOOL OR r_fields.restrict_class::BOOL THEN
-        search_class_join := '
-    JOIN
-        metabib.search_class_to_registered_components($2)
-        AS _registered (field_class TEXT, field INT) ON (
-            (_registered.field IS NULL AND
-                _registered.field_class = cmf.field_class) OR
-            (_registered.field = cmf.id)
-        )
-    ';
-    ELSE
-        search_class_join := '
-    LEFT JOIN
-        metabib.search_class_to_registered_components($2)
-        AS _registered (field_class TEXT, field INT) ON (
-            _registered.field_class = cmc.name
-        )
-    ';
-    END IF;
-
-    RETURN QUERY EXECUTE '
-WITH vm AS ( SELECT * FROM asset.patron_default_visibility_mask() ),
-     mbe AS (SELECT * FROM metabib.browse_entry WHERE index_vector @@ $1 LIMIT 10000)
-SELECT  DISTINCT
-        x.value,
-        x.id,
-        x.push,
-        x.restrict,
-        x.weight,
-        x.ts_rank_cd,
-        x.buoyant,
-        TS_HEADLINE(value, $7, $3)
-  FROM  (SELECT DISTINCT
-                mbe.value,
-                cmf.id,
-                cmc.buoyant AND _registered.field_class IS NOT NULL AS push,
-                _registered.field = cmf.id AS restrict,
-                cmf.weight,
-                TS_RANK_CD(mbe.index_vector, $1, $6),
-                cmc.buoyant,
-                mbedm.source
-          FROM  metabib.browse_entry_def_map mbedm
-                JOIN mbe ON (mbe.id = mbedm.entry)
-                JOIN config.metabib_field cmf ON (cmf.id = mbedm.def)
-                JOIN config.metabib_class cmc ON (cmf.field_class = cmc.name)
-                '  || search_class_join || '
-          ORDER BY 3 DESC, 4 DESC NULLS LAST, 5 DESC, 6 DESC, 7 DESC, 1 ASC
-          LIMIT 1000) AS x
-        ' || opac_visibility_join || '
-  ORDER BY 3 DESC, 4 DESC NULLS LAST, 5 DESC, 6 DESC, 7 DESC, 1 ASC
-  LIMIT $5
-'   -- sic, repeat the order by clause in the outer select too
-    USING
-        query, search_class, headline_opts,
-        visibility_org, query_limit, normalization, plain_query
-        ;
-
-    -- sort order:
-    --  buoyant AND chosen class = match class
-    --  chosen field = match field
-    --  field weight
-    --  rank
-    --  buoyancy
-    --  value itself
-
-END;
-$f$ LANGUAGE plpgsql ROWS 10;
-
-CREATE OR REPLACE FUNCTION metabib.browse(search_field integer[], browse_term text, context_org integer DEFAULT NULL::integer, context_loc_group integer DEFAULT NULL::integer, staff boolean DEFAULT false, pivot_id bigint DEFAULT NULL::bigint, result_limit integer DEFAULT 10)
- RETURNS SETOF metabib.flat_browse_entry_appearance
-AS $f$
-DECLARE
-    core_query              TEXT;
-    back_query              TEXT;
-    forward_query           TEXT;
-    pivot_sort_value        TEXT;
-    pivot_sort_fallback     TEXT;
-    context_locations       INT[];
-    browse_superpage_size   INT;
-    results_skipped         INT := 0;
-    back_limit              INT;
-    back_to_pivot           INT;
-    forward_limit           INT;
-    forward_to_pivot        INT;
-BEGIN
-    -- First, find the pivot if we were given a browse term but not a pivot.
-    IF pivot_id IS NULL THEN
-        pivot_id := metabib.browse_pivot(search_field, browse_term);
-    END IF;
-
-    SELECT INTO pivot_sort_value, pivot_sort_fallback
-        sort_value, value FROM metabib.browse_entry WHERE id = pivot_id;
-
-    -- Bail if we couldn't find a pivot.
-    IF pivot_sort_value IS NULL THEN
-        RETURN;
-    END IF;
-
-    -- Transform the context_loc_group argument (if any) (logc at the
-    -- TPAC layer) into a form we'll be able to use.
-    IF context_loc_group IS NOT NULL THEN
-        SELECT INTO context_locations ARRAY_AGG(location)
-            FROM asset.copy_location_group_map
-            WHERE lgroup = context_loc_group;
-    END IF;
-
-    -- Get the configured size of browse superpages.
-    SELECT INTO browse_superpage_size COALESCE(value::INT,100)     -- NULL ok
-        FROM config.global_flag
-        WHERE enabled AND name = 'opac.browse.holdings_visibility_test_limit';
-
-    -- First we're going to search backward from the pivot, then we're going
-    -- to search forward.  In each direction, we need two limits.  At the
-    -- lesser of the two limits, we delineate the edge of the result set
-    -- we're going to return.  At the greater of the two limits, we find the
-    -- pivot value that would represent an offset from the current pivot
-    -- at a distance of one "page" in either direction, where a "page" is a
-    -- result set of the size specified in the "result_limit" argument.
-    --
-    -- The two limits in each direction make four derived values in total,
-    -- and we calculate them now.
-    back_limit := CEIL(result_limit::FLOAT / 2);
-    back_to_pivot := result_limit;
-    forward_limit := result_limit / 2;
-    forward_to_pivot := result_limit - 1;
-
-    -- This is the meat of the SQL query that finds browse entries.  We'll
-    -- pass this to a function which uses it with a cursor, so that individual
-    -- rows may be fetched in a loop until some condition is satisfied, without
-    -- waiting for a result set of fixed size to be collected all at once.
-    core_query := '
-SELECT  mbe.id,
-        mbe.value,
-        mbe.sort_value
-  FROM  metabib.browse_entry mbe
-  WHERE (
-            EXISTS ( -- are there any bibs using this mbe via the requested fields?
-                SELECT  1
-                  FROM  metabib.browse_entry_def_map mbedm
-                  WHERE mbedm.entry = mbe.id AND mbedm.def = ANY(' || quote_literal(search_field) || ')
-            ) OR EXISTS ( -- are there any authorities using this mbe via the requested fields?
-                SELECT  1
-                  FROM  metabib.browse_entry_simple_heading_map mbeshm
-                        JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
-                        JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
-                            ash.atag = map.authority_field
-                            AND map.metabib_field = ANY(' || quote_literal(search_field) || ')
-                        )
-                  WHERE mbeshm.entry = mbe.id
-            )
-        ) AND ';
-
-    -- This is the variant of the query for browsing backward.
-    back_query := core_query ||
-        ' mbe.sort_value <= ' || quote_literal(pivot_sort_value) ||
-    ' ORDER BY mbe.sort_value DESC, mbe.value DESC LIMIT 1000';
-
-    -- This variant browses forward.
-    forward_query := core_query ||
-        ' mbe.sort_value > ' || quote_literal(pivot_sort_value) ||
-    ' ORDER BY mbe.sort_value, mbe.value LIMIT 1000';
-
-    -- We now call the function which applies a cursor to the provided
-    -- queries, stopping at the appropriate limits and also giving us
-    -- the next page's pivot.
-    RETURN QUERY
-        SELECT * FROM metabib.staged_browse(
-            back_query, search_field, context_org, context_locations,
-            staff, browse_superpage_size, TRUE, back_limit, back_to_pivot
-        ) UNION
-        SELECT * FROM metabib.staged_browse(
-            forward_query, search_field, context_org, context_locations,
-            staff, browse_superpage_size, FALSE, forward_limit, forward_to_pivot
-        ) ORDER BY row_number DESC;
-
-END;
-$f$ LANGUAGE plpgsql ROWS 10;
-
-CREATE OR REPLACE FUNCTION metabib.staged_browse(query text, fields integer[], context_org integer, context_locations integer[], staff boolean, browse_superpage_size integer, count_up_from_zero boolean, result_limit integer, next_pivot_pos integer)
- RETURNS SETOF metabib.flat_browse_entry_appearance
-AS $f$
-DECLARE
-    curs                    REFCURSOR;
-    rec                     RECORD;
-    qpfts_query             TEXT;
-    aqpfts_query            TEXT;
-    afields                 INT[];
-    bfields                 INT[];
-    result_row              metabib.flat_browse_entry_appearance%ROWTYPE;
-    results_skipped         INT := 0;
-    row_counter             INT := 0;
-    row_number              INT;
-    slice_start             INT;
-    slice_end               INT;
-    full_end                INT;
-    all_records             BIGINT[];
-    all_brecords             BIGINT[];
-    all_arecords            BIGINT[];
-    superpage_of_records    BIGINT[];
-    superpage_size          INT;
-    c_tests                 TEXT := '';
-    b_tests                 TEXT := '';
-    c_orgs                  INT[];
-BEGIN
-    IF count_up_from_zero THEN
-        row_number := 0;
-    ELSE
-        row_number := -1;
-    END IF;
-
-    IF NOT staff THEN
-        SELECT x.c_attrs, x.b_attrs INTO c_tests, b_tests FROM asset.patron_default_visibility_mask() x;
-    END IF;
-
-    IF c_tests <> '' THEN c_tests := c_tests || '&'; END IF;
-    IF b_tests <> '' THEN b_tests := b_tests || '&'; END IF;
-
-    SELECT ARRAY_AGG(id) INTO c_orgs FROM actor.org_unit_descendants(context_org);
-    
-    c_tests := c_tests || search.calculate_visibility_attribute_test('circ_lib',c_orgs)
-               || '&' || search.calculate_visibility_attribute_test('owning_lib',c_orgs);
-    
-    PERFORM 1 FROM config.internal_flag WHERE enabled AND name = 'opac.located_uri.act_as_copy';
-    IF FOUND THEN
-        b_tests := b_tests || search.calculate_visibility_attribute_test(
-            'luri_org',
-            (SELECT ARRAY_AGG(id) FROM actor.org_unit_full_path(context_org) x)
-        );
-    ELSE
-        b_tests := b_tests || search.calculate_visibility_attribute_test(
-            'luri_org',
-            (SELECT ARRAY_AGG(id) FROM actor.org_unit_ancestors(context_org) x)
-        );
-    END IF;
-
-    IF context_locations THEN
-        IF c_tests <> '' THEN c_tests := c_tests || '&'; END IF;
-        c_tests := c_tests || search.calculate_visibility_attribute_test('location',context_locations);
-    END IF;
-
-    OPEN curs NO SCROLL FOR EXECUTE query;
-
-    LOOP
-        FETCH curs INTO rec;
-        IF NOT FOUND THEN
-            IF result_row.pivot_point IS NOT NULL THEN
-                RETURN NEXT result_row;
-            END IF;
-            RETURN;
-        END IF;
-
-        -- Gather aggregate data based on the MBE row we're looking at now, authority axis
-        SELECT INTO all_arecords, result_row.sees, afields
-                ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility
-                STRING_AGG(DISTINCT aal.source::TEXT, $$,$$), -- authority record ids
-                ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows
-
-          FROM  metabib.browse_entry_simple_heading_map mbeshm
-                JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
-                JOIN authority.authority_linking aal ON ( ash.record = aal.source )
-                JOIN authority.bib_linking abl ON ( aal.target = abl.authority )
-                JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
-                    ash.atag = map.authority_field
-                    AND map.metabib_field = ANY(fields)
-                )
-          WHERE mbeshm.entry = rec.id;
-
-        -- Gather aggregate data based on the MBE row we're looking at now, bib axis
-        SELECT INTO all_brecords, result_row.authorities, bfields
-                ARRAY_AGG(DISTINCT source),
-                STRING_AGG(DISTINCT authority::TEXT, $$,$$),
-                ARRAY_AGG(DISTINCT def)
-          FROM  metabib.browse_entry_def_map
-          WHERE entry = rec.id
-                AND def = ANY(fields);
-
-        SELECT INTO result_row.fields STRING_AGG(DISTINCT x::TEXT, $$,$$) FROM UNNEST(afields || bfields) x;
-
-        result_row.sources := 0;
-        result_row.asources := 0;
-
-        -- Bib-linked vis checking
-        IF ARRAY_UPPER(all_brecords,1) IS NOT NULL THEN
-
-            SELECT  INTO result_row.sources COUNT(DISTINCT b.id)
-              FROM  biblio.record_entry b
-                    JOIN asset.copy_vis_attr_cache acvac ON (acvac.record = b.id)
-              WHERE b.id = ANY(all_brecords[1:browse_superpage_size])
-                    AND (
-                        acvac.vis_attr_vector @@ c_tests::query_int
-                        OR b.vis_attr_vector @@ b_tests::query_int
-                    );
-
-            result_row.accurate := TRUE;
-
-        END IF;
-
-        -- Authority-linked vis checking
-        IF ARRAY_UPPER(all_arecords,1) IS NOT NULL THEN
-
-            SELECT  INTO result_row.asources COUNT(DISTINCT b.id)
-              FROM  biblio.record_entry b
-                    JOIN asset.copy_vis_attr_cache acvac ON (acvac.record = b.id)
-              WHERE b.id = ANY(all_arecords[1:browse_superpage_size])
-                    AND (
-                        acvac.vis_attr_vector @@ c_tests::query_int
-                        OR b.vis_attr_vector @@ b_tests::query_int
-                    );
-
-            result_row.aaccurate := TRUE;
-
-        END IF;
-
-        IF result_row.sources > 0 OR result_row.asources > 0 THEN
-
-            -- The function that calls this function needs row_number in order
-            -- to correctly order results from two different runs of this
-            -- functions.
-            result_row.row_number := row_number;
-
-            -- Now, if row_counter is still less than limit, return a row.  If
-            -- not, but it is less than next_pivot_pos, continue on without
-            -- returning actual result rows until we find
-            -- that next pivot, and return it.
-
-            IF row_counter < result_limit THEN
-                result_row.browse_entry := rec.id;
-                result_row.value := rec.value;
-
-                RETURN NEXT result_row;
-            ELSE
-                result_row.browse_entry := NULL;
-                result_row.authorities := NULL;
-                result_row.fields := NULL;
-                result_row.value := NULL;
-                result_row.sources := NULL;
-                result_row.sees := NULL;
-                result_row.accurate := NULL;
-                result_row.aaccurate := NULL;
-                result_row.pivot_point := rec.id;
-
-                IF row_counter >= next_pivot_pos THEN
-                    RETURN NEXT result_row;
-                    RETURN;
-                END IF;
-            END IF;
-
-            IF count_up_from_zero THEN
-                row_number := row_number + 1;
-            ELSE
-                row_number := row_number - 1;
-            END IF;
-
-            -- row_counter is different from row_number.
-            -- It simply counts up from zero so that we know when
-            -- we've reached our limit.
-            row_counter := row_counter + 1;
-        END IF;
-    END LOOP;
-END;
-$f$ LANGUAGE plpgsql ROWS 10;
-
-DROP TRIGGER IF EXISTS a_opac_vis_mat_view_tgr ON biblio.peer_bib_copy_map;
-DROP TRIGGER IF EXISTS a_opac_vis_mat_view_tgr ON biblio.record_entry;
-DROP TRIGGER IF EXISTS a_opac_vis_mat_view_tgr ON asset.copy;
-DROP TRIGGER IF EXISTS a_opac_vis_mat_view_tgr ON asset.call_number;
-DROP TRIGGER IF EXISTS a_opac_vis_mat_view_tgr ON asset.copy_location;
-DROP TRIGGER IF EXISTS a_opac_vis_mat_view_tgr ON serial.unit;
-DROP TRIGGER IF EXISTS a_opac_vis_mat_view_tgr ON config.copy_status;
-DROP TRIGGER IF EXISTS a_opac_vis_mat_view_tgr ON actor.org_unit;
-
--- Upgrade the data!
-INSERT INTO asset.copy_vis_attr_cache (target_copy, record, vis_attr_vector)
-    SELECT  cp.id,
-            cn.record,
-            asset.calculate_copy_visibility_attribute_set(cp.id)
-      FROM  asset.copy cp
-            JOIN asset.call_number cn ON (cp.call_number = cn.id);
-
-UPDATE biblio.record_entry SET vis_attr_vector = biblio.calculate_bib_visibility_attribute_set(id);
-
-CREATE TRIGGER z_opac_vis_mat_view_tgr BEFORE INSERT OR UPDATE ON biblio.record_entry FOR EACH ROW EXECUTE PROCEDURE asset.cache_copy_visibility();
-CREATE TRIGGER z_opac_vis_mat_view_tgr AFTER INSERT OR DELETE ON biblio.peer_bib_copy_map FOR EACH ROW EXECUTE PROCEDURE asset.cache_copy_visibility();
-CREATE TRIGGER z_opac_vis_mat_view_tgr AFTER UPDATE ON asset.call_number FOR EACH ROW EXECUTE PROCEDURE asset.cache_copy_visibility();
-CREATE TRIGGER z_opac_vis_mat_view_del_tgr BEFORE DELETE ON asset.copy FOR EACH ROW EXECUTE PROCEDURE asset.cache_copy_visibility();
-CREATE TRIGGER z_opac_vis_mat_view_del_tgr BEFORE DELETE ON serial.unit FOR EACH ROW EXECUTE PROCEDURE asset.cache_copy_visibility();
-CREATE TRIGGER z_opac_vis_mat_view_tgr AFTER INSERT OR UPDATE ON asset.copy FOR EACH ROW EXECUTE PROCEDURE asset.cache_copy_visibility();
-CREATE TRIGGER z_opac_vis_mat_view_tgr AFTER INSERT OR UPDATE ON serial.unit FOR EACH ROW EXECUTE PROCEDURE asset.cache_copy_visibility();
-
-CREATE OR REPLACE FUNCTION asset.opac_ou_record_copy_count (org INT, rid BIGINT) RETURNS TABLE (depth INT, org_unit INT, visible BIGINT, available BIGINT, unshadow BIGINT, transcendant INT) AS $f$
-DECLARE
-    ans RECORD;
-    trans INT;
-BEGIN
-    SELECT 1 INTO trans FROM biblio.record_entry b JOIN config.bib_source src ON (b.source = src.id) WHERE src.transcendant AND b.id = rid;
-
-    FOR ans IN SELECT u.id, t.depth FROM actor.org_unit_ancestors(org) AS u JOIN actor.org_unit_type t ON (u.ou_type = t.id) LOOP
-        RETURN QUERY
-        WITH org_list AS (SELECT ARRAY_AGG(id)::BIGINT[] AS orgs FROM actor.org_unit_descendants(ans.id) x),
-             available_statuses AS (SELECT ARRAY_AGG(id) AS ids FROM config.copy_status WHERE is_available),
-             mask AS (SELECT c_attrs FROM asset.patron_default_visibility_mask() x)
-        SELECT  ans.depth,
-                ans.id,
-                COUNT( av.id ),
-                SUM( (cp.status = ANY (available_statuses.ids))::INT ),
-                COUNT( av.id ),
-                trans
-          FROM  mask,
-                available_statuses,
-                org_list,
-                asset.copy_vis_attr_cache av
-                JOIN asset.copy cp ON (cp.id = av.target_copy AND av.record = rid)
-          WHERE cp.circ_lib = ANY (org_list.orgs) AND av.vis_attr_vector @@ mask.c_attrs::query_int
-          GROUP BY 1,2,6;
-
-        IF NOT FOUND THEN
-            RETURN QUERY SELECT ans.depth, ans.id, 0::BIGINT, 0::BIGINT, 0::BIGINT, trans;
-        END IF;
-
-    END LOOP;
-
-    RETURN;
-END;
-$f$ LANGUAGE PLPGSQL;
-
-CREATE OR REPLACE FUNCTION asset.opac_lasso_record_copy_count (i_lasso INT, rid BIGINT) RETURNS TABLE (depth INT, org_unit INT, visible BIGINT, available BIGINT, unshadow BIGINT, transcendant INT) AS $f$
-DECLARE
-    ans RECORD;
-    trans INT;
-BEGIN
-    SELECT 1 INTO trans FROM biblio.record_entry b JOIN config.bib_source src ON (b.source = src.id) WHERE src.transcendant AND b.id = rid;
-
-    FOR ans IN SELECT u.org_unit AS id FROM actor.org_lasso_map AS u WHERE lasso = i_lasso LOOP
-        RETURN QUERY
-        WITH org_list AS (SELECT ARRAY_AGG(id)::BIGINT[] AS orgs FROM actor.org_unit_descendants(ans.id) x),
-             available_statuses AS (SELECT ARRAY_AGG(id) AS ids FROM config.copy_status WHERE is_available),
-             mask AS (SELECT c_attrs FROM asset.patron_default_visibility_mask() x)
-        SELECT  -1,
-                ans.id,
-                COUNT( av.id ),
-                SUM( (cp.status = ANY (available_statuses.ids))::INT ),
-                COUNT( av.id ),
-                trans
-          FROM  mask,
-                org_list,
-                asset.copy_vis_attr_cache av
-                JOIN asset.copy cp ON (cp.id = av.target_copy AND av.record = rid)
-          WHERE cp.circ_lib = ANY (org_list.orgs) AND av.vis_attr_vector @@ mask.c_attrs::query_int
-          GROUP BY 1,2,6;
-
-        IF NOT FOUND THEN
-            RETURN QUERY SELECT -1, ans.id, 0::BIGINT, 0::BIGINT, 0::BIGINT, trans;
-        END IF;
-
-    END LOOP;
-
-    RETURN;
-END;
-$f$ LANGUAGE PLPGSQL;
-
-CREATE OR REPLACE FUNCTION asset.opac_ou_metarecord_copy_count (org INT, rid BIGINT) RETURNS TABLE (depth INT, org_unit INT, visible BIGINT, available BIGINT, unshadow BIGINT, transcendant INT) AS $f$
-DECLARE
-    ans RECORD;
-    trans INT;
-BEGIN
-    SELECT 1 INTO trans FROM biblio.record_entry b JOIN config.bib_source src ON (b.source = src.id) JOIN metabib.metarecord_source_map m ON (m.source = b.id) WHERE src.transcendant AND m.metarecord = rid;
-
-    FOR ans IN SELECT u.id, t.depth FROM actor.org_unit_ancestors(org) AS u JOIN actor.org_unit_type t ON (u.ou_type = t.id) LOOP
-        RETURN QUERY
-        WITH org_list AS (SELECT ARRAY_AGG(id)::BIGINT[] AS orgs FROM actor.org_unit_descendants(ans.id) x),
-             available_statuses AS (SELECT ARRAY_AGG(id) AS ids FROM config.copy_status WHERE is_available),
-             mask AS (SELECT c_attrs FROM asset.patron_default_visibility_mask() x)
-        SELECT  ans.depth,
-                ans.id,
-                COUNT( av.id ),
-                SUM( (cp.status = ANY (available_statuses.ids))::INT ),
-                COUNT( av.id ),
-                trans
-          FROM  mask,
-                org_list,
-                available_statuses,
-                asset.copy_vis_attr_cache av
-                JOIN asset.copy cp ON (cp.id = av.target_copy)
-                JOIN metabib.metarecord_source_map m ON (m.metarecord = rid AND m.source = av.record)
-          WHERE cp.circ_lib = ANY (org_list.orgs) AND av.vis_attr_vector @@ mask.c_attrs::query_int
-          GROUP BY 1,2,6;
-
-        IF NOT FOUND THEN
-            RETURN QUERY SELECT ans.depth, ans.id, 0::BIGINT, 0::BIGINT, 0::BIGINT, trans;
-        END IF;
-
-    END LOOP;
-
-    RETURN;
-END;
-$f$ LANGUAGE PLPGSQL;
-
-CREATE OR REPLACE FUNCTION asset.opac_lasso_metarecord_copy_count (i_lasso INT, rid BIGINT) RETURNS TABLE (depth INT, org_unit INT, visible BIGINT, available BIGINT, unshadow BIGINT, transcendant INT) AS $f$
-DECLARE
-    ans RECORD;
-    trans INT;
-BEGIN
-    SELECT 1 INTO trans FROM biblio.record_entry b JOIN config.bib_source src ON (b.source = src.id) JOIN metabib.metarecord_source_map m ON (m.source = b.id) WHERE src.transcendant AND m.metarecord = rid;
-
-    FOR ans IN SELECT u.org_unit AS id FROM actor.org_lasso_map AS u WHERE lasso = i_lasso LOOP
-        RETURN QUERY
-        WITH org_list AS (SELECT ARRAY_AGG(id)::BIGINT[] AS orgs FROM actor.org_unit_descendants(ans.id) x),
-             available_statuses AS (SELECT ARRAY_AGG(id) AS ids FROM config.copy_status WHERE is_available),
-             mask AS (SELECT c_attrs FROM asset.patron_default_visibility_mask() x)
-        SELECT  -1,
-                ans.id,
-                COUNT( av.id ),
-                SUM( (cp.status = ANY (available_statuses.ids))::INT ),
-                COUNT( av.id ),
-                trans
-          FROM  mask,
-                org_list,
-                available_statuses,
-                asset.copy_vis_attr_cache av
-                JOIN asset.copy cp ON (cp.id = av.target_copy)
-                JOIN metabib.metarecord_source_map m ON (m.metarecord = rid AND m.source = av.record)
-          WHERE cp.circ_lib = ANY (org_list.orgs) AND av.vis_attr_vector @@ mask.c_attrs::query_int
-          GROUP BY 1,2,6;
-
-        IF NOT FOUND THEN
-            RETURN QUERY SELECT ans.depth, ans.id, 0::BIGINT, 0::BIGINT, 0::BIGINT, trans;
-        END IF;
-
-    END LOOP;
-
-    RETURN;
-END;
-$f$ LANGUAGE PLPGSQL;
-
-CREATE OR REPLACE FUNCTION unapi.mmr_mra (
-    obj_id BIGINT,
-    format TEXT,
-    ename TEXT,
-    includes TEXT[],
-    org TEXT,
-    depth INT DEFAULT NULL,
-    slimit HSTORE DEFAULT NULL,
-    soffset HSTORE DEFAULT NULL,
-    include_xmlns BOOL DEFAULT TRUE,
-    pref_lib INT DEFAULT NULL
-) RETURNS XML AS $F$
-    SELECT  XMLELEMENT(
-        name attributes,
-        XMLATTRIBUTES(
-            CASE WHEN $9 THEN 'http://open-ils.org/spec/indexing/v1' ELSE NULL END AS xmlns,
-            'tag:open-ils.org:U2@mmr/' || $1 AS metarecord
-        ),
-        (SELECT XMLAGG(foo.y)
-          FROM (
-            WITH sourcelist AS (
-                WITH aou AS (SELECT COALESCE(id, (evergreen.org_top()).id) AS id FROM actor.org_unit WHERE shortname = $5 LIMIT 1),
-                     basevm AS (SELECT c_attrs FROM  asset.patron_default_visibility_mask()),
-                     circvm AS (SELECT search.calculate_visibility_attribute_test('circ_lib', ARRAY_AGG(aoud.id)) AS mask
-                                  FROM aou, LATERAL actor.org_unit_descendants(aou.id, $6) aoud)
-                SELECT  source
-                  FROM  aou, circvm, basevm, metabib.metarecord_source_map mmsm
-                  WHERE mmsm.metarecord = $1 AND (
-                    EXISTS (
-                        SELECT  1
-                          FROM  circvm, basevm, asset.copy_vis_attr_cache acvac
-                          WHERE acvac.vis_attr_vector @@ (basevm.c_attrs || '&' || circvm.mask)::query_int
-                                AND acvac.record = mmsm.source
-                    )
-                    OR EXISTS (SELECT 1 FROM evergreen.located_uris(source, aou.id, $10) LIMIT 1)
-                    OR EXISTS (SELECT 1 FROM biblio.record_entry b JOIN config.bib_source src ON (b.source = src.id) WHERE src.transcendant AND b.id = mmsm.source)
-                )
-            )
-            SELECT  cmra.aid,
-                    XMLELEMENT(
-                        name field,
-                        XMLATTRIBUTES(
-                            cmra.attr AS name,
-                            cmra.value AS "coded-value",
-                            cmra.aid AS "cvmid",
-                            rad.composite,
-                            rad.multi,
-                            rad.filter,
-                            rad.sorter,
-                            cmra.source_list
-                        ),
-                        cmra.value
-                    )
-              FROM  (
-                SELECT DISTINCT aid, attr, value, STRING_AGG(x.id::TEXT, ',') AS source_list
-                  FROM (
-                    SELECT  v.source AS id,
-                            c.id AS aid,
-                            c.ctype AS attr,
-                            c.code AS value
-                      FROM  metabib.record_attr_vector_list v
-                            JOIN config.coded_value_map c ON ( c.id = ANY( v.vlist ) )
-                    ) AS x
-                    JOIN sourcelist ON (x.id = sourcelist.source)
-                    GROUP BY 1, 2, 3
-                ) AS cmra
-                JOIN config.record_attr_definition rad ON (cmra.attr = rad.name)
-                UNION ALL
-            SELECT  umra.aid,
-                    XMLELEMENT(
-                        name field,
-                        XMLATTRIBUTES(
-                            umra.attr AS name,
-                            rad.composite,
-                            rad.multi,
-                            rad.filter,
-                            rad.sorter
-                        ),
-                        umra.value
-                    )
-              FROM  (
-                SELECT DISTINCT aid, attr, value
-                  FROM (
-                    SELECT  v.source AS id,
-                            m.id AS aid,
-                            m.attr AS attr,
-                            m.value AS value
-                      FROM  metabib.record_attr_vector_list v
-                            JOIN metabib.uncontrolled_record_attr_value m ON ( m.id = ANY( v.vlist ) )
-                    ) AS x
-                    JOIN sourcelist ON (x.id = sourcelist.source)
-                ) AS umra
-                JOIN config.record_attr_definition rad ON (umra.attr = rad.name)
-                ORDER BY 1
-
-            )foo(id,y)
-        )
-    )
-$F$ LANGUAGE SQL STABLE;
-
-CREATE OR REPLACE FUNCTION evergreen.ranked_volumes(
-    bibid BIGINT[],
-    ouid INT,
-    depth INT DEFAULT NULL,
-    slimit HSTORE DEFAULT NULL,
-    soffset HSTORE DEFAULT NULL,
-    pref_lib INT DEFAULT NULL,
-    includes TEXT[] DEFAULT NULL::TEXT[]
-) RETURNS TABLE(id BIGINT, name TEXT, label_sortkey TEXT, rank BIGINT) AS $$
-    WITH RECURSIVE ou_depth AS (
-        SELECT COALESCE(
-            $3,
-            (
-                SELECT depth
-                FROM actor.org_unit_type aout
-                    INNER JOIN actor.org_unit ou ON ou_type = aout.id
-                WHERE ou.id = $2
-            )
-        ) AS depth
-    ), descendant_depth AS (
-        SELECT  ou.id,
-                ou.parent_ou,
-                out.depth
-        FROM  actor.org_unit ou
-                JOIN actor.org_unit_type out ON (out.id = ou.ou_type)
-                JOIN anscestor_depth ad ON (ad.id = ou.id),
-                ou_depth
-        WHERE ad.depth = ou_depth.depth
-            UNION ALL
-        SELECT  ou.id,
-                ou.parent_ou,
-                out.depth
-        FROM  actor.org_unit ou
-                JOIN actor.org_unit_type out ON (out.id = ou.ou_type)
-                JOIN descendant_depth ot ON (ot.id = ou.parent_ou)
-    ), anscestor_depth AS (
-        SELECT  ou.id,
-                ou.parent_ou,
-                out.depth
-        FROM  actor.org_unit ou
-                JOIN actor.org_unit_type out ON (out.id = ou.ou_type)
-        WHERE ou.id = $2
-            UNION ALL
-        SELECT  ou.id,
-                ou.parent_ou,
-                out.depth
-        FROM  actor.org_unit ou
-                JOIN actor.org_unit_type out ON (out.id = ou.ou_type)
-                JOIN anscestor_depth ot ON (ot.parent_ou = ou.id)
-    ), descendants as (
-        SELECT ou.* FROM actor.org_unit ou JOIN descendant_depth USING (id)
-    )
-
-    SELECT ua.id, ua.name, ua.label_sortkey, MIN(ua.rank) AS rank FROM (
-        SELECT acn.id, owning_lib.name, acn.label_sortkey,
-            evergreen.rank_cp(acp),
-            RANK() OVER w
-        FROM asset.call_number acn
-            JOIN asset.copy acp ON (acn.id = acp.call_number)
-            JOIN descendants AS aou ON (acp.circ_lib = aou.id)
-            JOIN actor.org_unit AS owning_lib ON (acn.owning_lib = owning_lib.id)
-        WHERE acn.record = ANY ($1)
-            AND acn.deleted IS FALSE
-            AND acp.deleted IS FALSE
-            AND CASE WHEN ('exclude_invisible_acn' = ANY($7)) THEN
-                EXISTS (
-                    WITH basevm AS (SELECT c_attrs FROM  asset.patron_default_visibility_mask()),
-                         circvm AS (SELECT search.calculate_visibility_attribute_test('circ_lib', ARRAY[acp.circ_lib]) AS mask)
-                    SELECT  1
-                      FROM  basevm, circvm, asset.copy_vis_attr_cache acvac
-                      WHERE acvac.vis_attr_vector @@ (basevm.c_attrs || '&' || circvm.mask)::query_int
-                            AND acvac.target_copy = acp.id
-                            AND acvac.record = acn.record
-                ) ELSE TRUE END
-        GROUP BY acn.id, evergreen.rank_cp(acp), owning_lib.name, acn.label_sortkey, aou.id
-        WINDOW w AS (
-            ORDER BY
-                COALESCE(
-                    CASE WHEN aou.id = $2 THEN -20000 END,
-                    CASE WHEN aou.id = $6 THEN -10000 END,
-                    (SELECT distance - 5000
-                        FROM actor.org_unit_descendants_distance($6) as x
-                        WHERE x.id = aou.id AND $6 IN (
-                            SELECT q.id FROM actor.org_unit_descendants($2) as q)),
-                    (SELECT e.distance FROM actor.org_unit_descendants_distance($2) as e WHERE e.id = aou.id),
-                    1000
-                ),
-                evergreen.rank_cp(acp)
-        )
-    ) AS ua
-    GROUP BY ua.id, ua.name, ua.label_sortkey
-    ORDER BY rank, ua.name, ua.label_sortkey
-    LIMIT ($4 -> 'acn')::INT
-    OFFSET ($5 -> 'acn')::INT;
-$$ LANGUAGE SQL STABLE ROWS 10;
-
-
--- Evergreen DB patch XXXX.schema.action-trigger.event_definition.sms_preminder.sql
---
--- New action trigger event definition: 3 Day Courtesy Notice by SMS
---
-
--- check whether patch can be applied
-SELECT evergreen.upgrade_deps_block_check('1058', :eg_version); -- mccanna/csharp/gmcharlt
-
-INSERT INTO action_trigger.event_definition (id, active, owner, name, hook,
-        validator, reactor, delay, max_delay, delay_field, group_field, template)
-    VALUES (54, FALSE, 1,
-        '3 Day Courtesy Notice by SMS',
-        'checkout.due',
-        'CircIsOpen', 'SendSMS', '-3 days', '-2 days', 'due_date', 'usr',
-$$
-[%- USE date -%]
-[%- user = target.0.usr -%]
-[%- homelib = user.home_ou -%]
-[%- sms_number = helpers.get_user_setting(user.id, 'opac.default_sms_notify') -%]
-[%- sms_carrier = helpers.get_user_setting(user.id, 'opac.default_sms_carrier') -%]
-From: [%- helpers.get_org_setting(homelib.id, 'org.bounced_emails') || homelib.email || params.sender_email || default_sender %]
-To: [%- helpers.get_sms_gateway_email(sms_carrier,sms_number) %]
-Subject: Library Materials Due Soon
-
-You have items due soon:
-
-[% FOR circ IN target %]
-[%- copy_details = helpers.get_copy_bib_basics(circ.target_copy.id) -%]
-[% copy_details.title FILTER ucfirst %] by [% copy_details.author FILTER ucfirst %] due on [% date.format(helpers.format_date(circ.due_date), '%m-%d-%Y') %]
-
-[% END %]
-
-$$);
-
-INSERT INTO action_trigger.environment (event_def, path) VALUES
-    (54, 'circ_lib.billing_address'),
-    (54, 'target_copy.call_number'),
-    (54, 'usr'),
-    (54, 'usr.home_ou');
-
-
--- check whether patch can be applied
-SELECT evergreen.upgrade_deps_block_check('1059', :eg_version); --Stompro/DPearl/kmlussier
-
-CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
-SELECT  r.id,
-    r.fingerprint,
-    r.quality,
-    r.tcn_source,
-    r.tcn_value,
-    CONCAT_WS(' ', FIRST(title.value),FIRST(title_np.val)) AS title,
-    FIRST(author.value) AS author,
-    STRING_AGG(DISTINCT publisher.value, ', ') AS publisher,
-    STRING_AGG(DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$), ', ') AS pubdate,
-    CASE WHEN ARRAY_AGG( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) = '{NULL}'
-        THEN NULL
-        ELSE ARRAY_AGG( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') )
-    END AS isbn,
-    CASE WHEN ARRAY_AGG( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) = '{NULL}'
-        THEN NULL
-        ELSE ARRAY_AGG( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') )
-    END AS issn
-  FROM  biblio.record_entry r
-    LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
-    LEFT JOIN ( -- Grab 245 N and P subfields in the order that they appear.
-      SELECT b.record, string_agg(val, ' ') AS val FROM (
-            SELECT title_np.record, title_np.value AS val
-             FROM metabib.full_rec title_np
-             WHERE
-             title_np.tag = '245'
-                       AND title_np.subfield IN ('p','n')                      
-                       ORDER BY title_np.id
-               ) b
-               GROUP BY 1
-        ) title_np ON (title_np.record=r.id) 
-    LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
-    LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND (publisher.tag = '260' OR (publisher.tag = '264' AND publisher.ind2 = '1')) AND publisher.subfield = 'b')
-    LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND (pubdate.tag = '260' OR (pubdate.tag = '264' AND pubdate.ind2 = '1')) AND pubdate.subfield = 'c')
-    LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
-    LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
-  GROUP BY 1,2,3,4,5;
-
-  
-  -- Remove trigger on biblio.record_entry
-  SELECT reporter.disable_materialized_simple_record_trigger();
-  
-  -- Rebuild reporter.materialized_simple_record
-  SELECT reporter.enable_materialized_simple_record_trigger();
-  
-
-SELECT evergreen.upgrade_deps_block_check('1060', :eg_version);
-
-DROP VIEW IF EXISTS extend_reporter.copy_count_per_org;
-
-
-CREATE OR REPLACE VIEW extend_reporter.copy_count_per_org AS
- SELECT acn.record AS bibid,
-    ac.circ_lib,
-    acn.owning_lib,
-    max(ac.edit_date) AS last_edit_time,
-    min(ac.deleted::integer) AS has_only_deleted_copies,
-    count(
-        CASE
-            WHEN ac.deleted THEN ac.id
-            ELSE NULL::bigint
-        END) AS deleted_count,
-    count(
-        CASE
-            WHEN NOT ac.deleted THEN ac.id
-            ELSE NULL::bigint
-        END) AS visible_count,
-    count(*) AS total_count
-   FROM asset.call_number acn,
-    asset.copy ac
-  WHERE ac.call_number = acn.id
-  GROUP BY acn.record, acn.owning_lib, ac.circ_lib;
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1061', :eg_version);
-
-INSERT INTO config.org_unit_setting_type
-    (name, label, description, grp, datatype)
-VALUES (
-    'ui.staff.max_recent_patrons',
-    oils_i18n_gettext(
-        'ui.staff.max_recent_patrons',
-        'Number of Retrievable Recent Patrons',
-        'coust',
-        'label'
-    ),
-    oils_i18n_gettext(
-        'ui.staff.max_recent_patrons',
-        'Number of most recently accessed patrons that can be re-retrieved ' ||
-        'in the staff client.  A value of 0 or less disables the feature. Defaults to 1.',
-        'coust',
-        'description'
-    ),
-    'circ',
-    'integer'
-);
-
-
-SELECT evergreen.upgrade_deps_block_check('1062', :eg_version);
-
-CREATE TABLE acq.edi_attr (
-    key     TEXT PRIMARY KEY,
-    label   TEXT NOT NULL UNIQUE
-);
-
-CREATE TABLE acq.edi_attr_set (
-    id      SERIAL  PRIMARY KEY,
-    label   TEXT NOT NULL UNIQUE
-);
-
-CREATE TABLE acq.edi_attr_set_map (
-    id          SERIAL  PRIMARY KEY,
-    attr_set    INTEGER NOT NULL REFERENCES acq.edi_attr_set(id) 
-                ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
-    attr        TEXT NOT NULL REFERENCES acq.edi_attr(key) 
-                ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
-    CONSTRAINT edi_attr_set_map_attr_once UNIQUE (attr_set, attr)
-);
-
--- An attr_set is not strictly required, since some edi_accounts/vendors 
--- may not need to apply any attributes.
-ALTER TABLE acq.edi_account 
-    ADD COLUMN attr_set INTEGER REFERENCES acq.edi_attr_set(id),
-    ADD COLUMN use_attrs BOOLEAN NOT NULL DEFAULT FALSE;
-
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1063', :eg_version);
-
-DO $temp$
-DECLARE
-       r RECORD;
-BEGIN
-
-       FOR r IN SELECT t.table_schema AS sname,
-                       t.table_name AS tname,
-                       t.column_name AS colname,
-                       t.constraint_name
-                 FROM  information_schema.referential_constraints ref
-                       JOIN information_schema.key_column_usage t USING (constraint_schema,constraint_name)
-                 WHERE ref.unique_constraint_schema = 'asset'
-                       AND ref.unique_constraint_name = 'copy_pkey'
-       LOOP
-
-               EXECUTE 'ALTER TABLE '||r.sname||'.'||r.tname||' DROP CONSTRAINT '||r.constraint_name||';';
-
-               EXECUTE '
-                       CREATE OR REPLACE FUNCTION evergreen.'||r.sname||'_'||r.tname||'_'||r.colname||'_inh_fkey() RETURNS TRIGGER AS $f$
-                       BEGIN
-                               PERFORM 1 FROM asset.copy WHERE id = NEW.'||r.colname||';
-                               IF NOT FOUND THEN
-                                       RAISE foreign_key_violation USING MESSAGE = FORMAT(
-                                               $$Referenced asset.copy id not found, '||r.colname||':%s$$, NEW.'||r.colname||'
-                                       );
-                               END IF;
-                               RETURN NEW;
-                       END;
-                       $f$ LANGUAGE PLPGSQL VOLATILE COST 50;
-               ';
-
-               EXECUTE '
-                       CREATE CONSTRAINT TRIGGER inherit_'||r.constraint_name||'
-                               AFTER UPDATE OR INSERT OR DELETE ON '||r.sname||'.'||r.tname||'
-                               DEFERRABLE FOR EACH ROW EXECUTE PROCEDURE evergreen.'||r.sname||'_'||r.tname||'_'||r.colname||'_inh_fkey();
-               ';
-       END LOOP;
-END
-$temp$;
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1064', :eg_version);
-
-ALTER TABLE serial.issuance DROP CONSTRAINT IF EXISTS issuance_caption_and_pattern_fkey;
-
--- Using NOT VALID and VALIDATE CONSTRAINT limits the impact to concurrent work.
--- For details, see: https://www.postgresql.org/docs/current/static/sql-altertable.html
-
-ALTER TABLE serial.issuance ADD CONSTRAINT issuance_caption_and_pattern_fkey
-    FOREIGN KEY (caption_and_pattern)
-    REFERENCES serial.caption_and_pattern (id)
-    ON DELETE CASCADE
-    DEFERRABLE INITIALLY DEFERRED
-    NOT VALID;
-
-ALTER TABLE serial.issuance VALIDATE CONSTRAINT issuance_caption_and_pattern_fkey;
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1065', :eg_version);
-
-CREATE TABLE serial.pattern_template (
-    id            SERIAL PRIMARY KEY,
-    name          TEXT NOT NULL,
-    pattern_code  TEXT NOT NULL,
-    owning_lib    INTEGER REFERENCES actor.org_unit(id) DEFERRABLE INITIALLY DEFERRED,
-    share_depth   INTEGER NOT NULL DEFAULT 0
-);
-CREATE INDEX serial_pattern_template_name_idx ON serial.pattern_template (evergreen.lowercase(name));
-
-CREATE OR REPLACE FUNCTION serial.pattern_templates_visible_to(org_unit INT) RETURNS SETOF serial.pattern_template AS $func$
-BEGIN
-    RETURN QUERY SELECT *
-           FROM serial.pattern_template spt
-           WHERE (
-             SELECT ARRAY_AGG(id)
-             FROM actor.org_unit_descendants(spt.owning_lib, spt.share_depth)
-           ) @@ org_unit::TEXT::QUERY_INT;
-END;
-$func$ LANGUAGE PLPGSQL;
-
-
-SELECT evergreen.upgrade_deps_block_check('1066', :eg_version);
-
-INSERT INTO permission.perm_list ( id, code, description ) VALUES
- ( 593, 'ADMIN_SERIAL_PATTERN_TEMPLATE', oils_i18n_gettext( 593,
-    'Administer serial prediction pattern templates', 'ppl', 'description' ))
-;
-
-INSERT INTO permission.grp_perm_map (grp, perm, depth, grantable)
-    SELECT
-        pgt.id, perm.id, aout.depth, FALSE
-    FROM
-        permission.grp_tree pgt,
-        permission.perm_list perm,
-        actor.org_unit_type aout
-    WHERE
-        pgt.name = 'Serials' AND
-        aout.name = 'System' AND
-        perm.code IN (
-            'ADMIN_SERIAL_PATTERN_TEMPLATE'
-        );
-
-
-SELECT evergreen.upgrade_deps_block_check('1067', :eg_version);
-
-INSERT INTO acq.edi_attr (key, label) VALUES
-    ('INCLUDE_PO_NAME', 
-        oils_i18n_gettext('INCLUDE_PO_NAME', 
-        'Orders Include PO Name', 'aea', 'label')),
-    ('INCLUDE_COPIES', 
-        oils_i18n_gettext('INCLUDE_COPIES', 
-        'Orders Include Copy Data', 'aea', 'label')),
-    ('INCLUDE_FUND', 
-        oils_i18n_gettext('INCLUDE_FUND', 
-        'Orders Include Copy Funds', 'aea', 'label')),
-    ('INCLUDE_CALL_NUMBER', 
-        oils_i18n_gettext('INCLUDE_CALL_NUMBER', 
-        'Orders Include Copy Call Numbers', 'aea', 'label')),
-    ('INCLUDE_ITEM_TYPE', 
-        oils_i18n_gettext('INCLUDE_ITEM_TYPE', 
-        'Orders Include Copy Item Types', 'aea', 'label')),
-    ('INCLUDE_ITEM_BARCODE',
-        oils_i18n_gettext('INCLUDE_ITEM_BARCODE',
-        'Orders Include Copy Barcodes', 'aea', 'label')),
-    ('INCLUDE_LOCATION', 
-        oils_i18n_gettext('INCLUDE_LOCATION', 
-        'Orders Include Copy Locations', 'aea', 'label')),
-    ('INCLUDE_COLLECTION_CODE', 
-        oils_i18n_gettext('INCLUDE_COLLECTION_CODE', 
-        'Orders Include Copy Collection Codes', 'aea', 'label')),
-    ('INCLUDE_OWNING_LIB', 
-        oils_i18n_gettext('INCLUDE_OWNING_LIB', 
-        'Orders Include Copy Owning Library', 'aea', 'label')),
-    ('USE_ID_FOR_OWNING_LIB',
-        oils_i18n_gettext('USE_ID_FOR_OWNING_LIB',
-        'Emit Owning Library ID Rather Than Short Name. Takes effect only if INCLUDE_OWNING_LIB is in use', 'aea', 'label')),
-    ('INCLUDE_QUANTITY', 
-        oils_i18n_gettext('INCLUDE_QUANTITY', 
-        'Orders Include Copy Quantities', 'aea', 'label')),
-    ('INCLUDE_COPY_ID', 
-        oils_i18n_gettext('INCLUDE_COPY_ID', 
-        'Orders Include Copy IDs', 'aea', 'label')),
-    ('BUYER_ID_INCLUDE_VENDCODE', 
-        oils_i18n_gettext('BUYER_ID_INCLUDE_VENDCODE', 
-        'Buyer ID Qualifier Includes Vendcode', 'aea', 'label')),
-    ('BUYER_ID_ONLY_VENDCODE', 
-        oils_i18n_gettext('BUYER_ID_ONLY_VENDCODE', 
-        'Buyer ID Qualifier Only Contains Vendcode', 'aea', 'label')),
-    ('INCLUDE_BIB_EDITION', 
-        oils_i18n_gettext('INCLUDE_BIB_EDITION', 
-        'Order Lineitems Include Edition Info', 'aea', 'label')),
-    ('INCLUDE_BIB_AUTHOR', 
-        oils_i18n_gettext('INCLUDE_BIB_AUTHOR', 
-        'Order Lineitems Include Author Info', 'aea', 'label')),
-    ('INCLUDE_BIB_PAGINATION', 
-        oils_i18n_gettext('INCLUDE_BIB_PAGINATION', 
-        'Order Lineitems Include Pagination Info', 'aea', 'label')),
-    ('COPY_SPEC_CODES', 
-        oils_i18n_gettext('COPY_SPEC_CODES', 
-        'Order Lineitem Notes Include Copy Spec Codes', 'aea', 'label')),
-    ('INCLUDE_EMPTY_IMD_VALUES', 
-        oils_i18n_gettext('INCLUDE_EMPTY_IMD_VALUES',
-        'Lineitem Title, Author, etc. Fields Are Present Even if Empty', 'aea', 'label')),
-    ('INCLUDE_EMPTY_LI_NOTE', 
-        oils_i18n_gettext('INCLUDE_EMPTY_LI_NOTE', 
-        'Order Lineitem Notes Always Present (Even if Empty)', 'aea', 'label')),
-    ('INCLUDE_EMPTY_CALL_NUMBER', 
-        oils_i18n_gettext('INCLUDE_EMPTY_CALL_NUMBER', 
-        'Order Copies Always Include Call Number (Even if Empty)', 'aea', 'label')),
-    ('INCLUDE_EMPTY_ITEM_TYPE', 
-        oils_i18n_gettext('INCLUDE_EMPTY_ITEM_TYPE', 
-        'Order Copies Always Include Item Type (Even if Empty)', 'aea', 'label')),
-    ('INCLUDE_EMPTY_LOCATION', 
-        oils_i18n_gettext('INCLUDE_EMPTY_LOCATION', 
-        'Order Copies Always Include Location (Even if Empty)', 'aea', 'label')),
-    ('INCLUDE_EMPTY_COLLECTION_CODE', 
-        oils_i18n_gettext('INCLUDE_EMPTY_COLLECTION_CODE', 
-        'Order Copies Always Include Collection Code (Even if Empty)', 'aea', 'label')),
-    ('LINEITEM_IDENT_VENDOR_NUMBER',
-        oils_i18n_gettext('LINEITEM_IDENT_VENDOR_NUMBER',
-        'Lineitem Identifier Fields (LIN/PIA) Use Vendor-Encoded ID Value When Available', 'aea', 'label')),
-    ('LINEITEM_REF_ID_ONLY',
-        oils_i18n_gettext('LINEITEM_REF_ID_ONLY',
-        'Lineitem Reference Field (RFF) Uses Lineitem ID Only', 'aea', 'label'))
-
-;
-
-INSERT INTO acq.edi_attr_set (id, label) VALUES (1, 'Ingram Default');
-INSERT INTO acq.edi_attr_set (id, label) VALUES (2, 'Baker & Taylor Default');
-INSERT INTO acq.edi_attr_set (id, label) VALUES (3, 'Brodart Default');
-INSERT INTO acq.edi_attr_set (id, label) VALUES (4, 'Midwest Tape Default');
-INSERT INTO acq.edi_attr_set (id, label) VALUES (5, 'ULS Default');
-INSERT INTO acq.edi_attr_set (id, label) VALUES (6, 'Recorded Books Default');
-INSERT INTO acq.edi_attr_set (id, label) VALUES (7, 'Midwest Library Service');
-
--- carve out space for mucho defaults
-SELECT SETVAL('acq.edi_attr_set_id_seq'::TEXT, 1000);
-
-INSERT INTO acq.edi_attr_set_map (attr_set, attr) VALUES
-
-    -- Ingram
-    (1, 'INCLUDE_PO_NAME'),
-    (1, 'INCLUDE_COPIES'),
-    (1, 'INCLUDE_ITEM_TYPE'),
-    (1, 'INCLUDE_COLLECTION_CODE'),
-    (1, 'INCLUDE_OWNING_LIB'),
-    (1, 'INCLUDE_QUANTITY'),
-    (1, 'INCLUDE_BIB_PAGINATION'),
-
-    -- B&T
-    (2, 'INCLUDE_COPIES'),
-    (2, 'INCLUDE_ITEM_TYPE'),
-    (2, 'INCLUDE_COLLECTION_CODE'),
-    (2, 'INCLUDE_CALL_NUMBER'),
-    (2, 'INCLUDE_OWNING_LIB'),
-    (2, 'INCLUDE_QUANTITY'),
-    (2, 'INCLUDE_BIB_PAGINATION'),
-    (2, 'BUYER_ID_INCLUDE_VENDCODE'),
-    (2, 'INCLUDE_EMPTY_LI_NOTE'),
-    (2, 'INCLUDE_EMPTY_CALL_NUMBER'),
-    (2, 'INCLUDE_EMPTY_ITEM_TYPE'),
-    (2, 'INCLUDE_EMPTY_COLLECTION_CODE'),
-    (2, 'INCLUDE_EMPTY_LOCATION'),
-    (2, 'LINEITEM_IDENT_VENDOR_NUMBER'),
-    (2, 'LINEITEM_REF_ID_ONLY'),
-
-    -- Brodart
-    (3, 'INCLUDE_COPIES'),
-    (3, 'INCLUDE_FUND'),
-    (3, 'INCLUDE_ITEM_TYPE'),
-    (3, 'INCLUDE_COLLECTION_CODE'),
-    (3, 'INCLUDE_OWNING_LIB'),
-    (3, 'INCLUDE_QUANTITY'),
-    (3, 'INCLUDE_BIB_PAGINATION'),
-    (3, 'COPY_SPEC_CODES'),
-
-    -- Midwest
-    (4, 'INCLUDE_COPIES'),
-    (4, 'INCLUDE_FUND'),
-    (4, 'INCLUDE_OWNING_LIB'),
-    (4, 'INCLUDE_QUANTITY'),
-    (4, 'INCLUDE_BIB_PAGINATION'),
-
-    -- ULS
-    (5, 'INCLUDE_COPIES'),
-    (5, 'INCLUDE_ITEM_TYPE'),
-    (5, 'INCLUDE_COLLECTION_CODE'),
-    (5, 'INCLUDE_OWNING_LIB'),
-    (5, 'INCLUDE_QUANTITY'),
-    (5, 'INCLUDE_BIB_AUTHOR'),
-    (5, 'INCLUDE_BIB_EDITION'),
-    (5, 'INCLUDE_EMPTY_LI_NOTE'),
-
-    -- Recorded Books
-    (6, 'INCLUDE_COPIES'),
-    (6, 'INCLUDE_ITEM_TYPE'),
-    (6, 'INCLUDE_COLLECTION_CODE'),
-    (6, 'INCLUDE_OWNING_LIB'),
-    (6, 'INCLUDE_QUANTITY'),
-    (6, 'INCLUDE_BIB_PAGINATION'),
-
-    -- Midwest Library Service
-    (7, 'INCLUDE_BIB_AUTHOR'),
-    (7, 'INCLUDE_BIB_EDITION'),
-    (7, 'BUYER_ID_ONLY_VENDCODE'),
-    (7, 'INCLUDE_EMPTY_IMD_VALUES')
-;
-
-
-
-
-
-SELECT evergreen.upgrade_deps_block_check('1068', :eg_version); --miker/gmcharlt/kmlussier
-
-INSERT INTO config.xml_transform (name,namespace_uri,prefix,xslt) VALUES ('mads21','http://www.loc.gov/mads/v2','mads21',$XSLT$<?xml version="1.0" encoding="UTF-8"?>
-<xsl:stylesheet version="1.0" xmlns:mads="http://www.loc.gov/mads/v2"
-       xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:marc="http://www.loc.gov/MARC21/slim"
-       xmlns:xsl="http://www.w3.org/1999/XSL/Transform" exclude-result-prefixes="marc">
-       <xsl:output method="xml" indent="yes" encoding="UTF-8"/>
-       <xsl:strip-space elements="*"/>
-
-       <xsl:variable name="ascii">
-               <xsl:text> !"#$%&amp;'()*+,-./0123456789:;&lt;=&gt;?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~</xsl:text>
-       </xsl:variable>
-
-       <xsl:variable name="latin1">
-               <xsl:text> ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ</xsl:text>
-       </xsl:variable>
-       <!-- Characters that usually don't need to be escaped -->
-       <xsl:variable name="safe">
-               <xsl:text>!'()*-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~</xsl:text>
-       </xsl:variable>
-
-       <xsl:variable name="hex">0123456789ABCDEF</xsl:variable>
-
-
-       <xsl:template name="datafield">
-               <xsl:param name="tag"/>
-               <xsl:param name="ind1">
-                       <xsl:text> </xsl:text>
-               </xsl:param>
-               <xsl:param name="ind2">
-                       <xsl:text> </xsl:text>
-               </xsl:param>
-               <xsl:param name="subfields"/>
-               <xsl:element name="marc:datafield">
-                       <xsl:attribute name="tag">
-                               <xsl:value-of select="$tag"/>
-                       </xsl:attribute>
-                       <xsl:attribute name="ind1">
-                               <xsl:value-of select="$ind1"/>
-                       </xsl:attribute>
-                       <xsl:attribute name="ind2">
-                               <xsl:value-of select="$ind2"/>
-                       </xsl:attribute>
-                       <xsl:copy-of select="$subfields"/>
-               </xsl:element>
-       </xsl:template>
-
-       <xsl:template name="subfieldSelect">
-               <xsl:param name="codes">abcdefghijklmnopqrstuvwxyz</xsl:param>
-               <xsl:param name="delimeter">
-                       <xsl:text> </xsl:text>
-               </xsl:param>
-               <xsl:variable name="str">
-                       <xsl:for-each select="marc:subfield">
-                               <xsl:if test="contains($codes, @code)">
-                                       <xsl:value-of select="text()"/>
-                                       <xsl:value-of select="$delimeter"/>
-                               </xsl:if>
-                       </xsl:for-each>
-               </xsl:variable>
-               <xsl:value-of select="substring($str,1,string-length($str)-string-length($delimeter))"/>
-       </xsl:template>
-
-       <xsl:template name="buildSpaces">
-               <xsl:param name="spaces"/>
-               <xsl:param name="char">
-                       <xsl:text> </xsl:text>
-               </xsl:param>
-               <xsl:if test="$spaces>0">
-                       <xsl:value-of select="$char"/>
-                       <xsl:call-template name="buildSpaces">
-                               <xsl:with-param name="spaces" select="$spaces - 1"/>
-                               <xsl:with-param name="char" select="$char"/>
-                       </xsl:call-template>
-               </xsl:if>
-       </xsl:template>
-
-       <xsl:template name="chopPunctuation">
-               <xsl:param name="chopString"/>
-               <xsl:param name="punctuation">
-                       <xsl:text>.:,;/ </xsl:text>
-               </xsl:param>
-               <xsl:variable name="length" select="string-length($chopString)"/>
-               <xsl:choose>
-                       <xsl:when test="$length=0"/>
-                       <xsl:when test="contains($punctuation, substring($chopString,$length,1))">
-                               <xsl:call-template name="chopPunctuation">
-                                       <xsl:with-param name="chopString" select="substring($chopString,1,$length - 1)"/>
-                                       <xsl:with-param name="punctuation" select="$punctuation"/>
-                               </xsl:call-template>
-                       </xsl:when>
-                       <xsl:when test="not($chopString)"/>
-                       <xsl:otherwise>
-                               <xsl:value-of select="$chopString"/>
-                       </xsl:otherwise>
-               </xsl:choose>
-       </xsl:template>
-
-       <xsl:template name="chopPunctuationFront">
-               <xsl:param name="chopString"/>
-               <xsl:variable name="length" select="string-length($chopString)"/>
-               <xsl:choose>
-                       <xsl:when test="$length=0"/>
-                       <xsl:when test="contains('.:,;/[ ', substring($chopString,1,1))">
-                               <xsl:call-template name="chopPunctuationFront">
-                                       <xsl:with-param name="chopString" select="substring($chopString,2,$length - 1)"
-                                       />
-                               </xsl:call-template>
-                       </xsl:when>
-                       <xsl:when test="not($chopString)"/>
-                       <xsl:otherwise>
-                               <xsl:value-of select="$chopString"/>
-                       </xsl:otherwise>
-               </xsl:choose>
-       </xsl:template>
-
-       <xsl:template name="chopPunctuationBack">
-               <xsl:param name="chopString"/>
-               <xsl:param name="punctuation">
-                       <xsl:text>.:,;/] </xsl:text>
-               </xsl:param>
-               <xsl:variable name="length" select="string-length($chopString)"/>
-               <xsl:choose>
-                       <xsl:when test="$length=0"/>
-                       <xsl:when test="contains($punctuation, substring($chopString,$length,1))">
-                               <xsl:call-template name="chopPunctuation">
-                                       <xsl:with-param name="chopString" select="substring($chopString,1,$length - 1)"/>
-                                       <xsl:with-param name="punctuation" select="$punctuation"/>
-                               </xsl:call-template>
-                       </xsl:when>
-                       <xsl:when test="not($chopString)"/>
-                       <xsl:otherwise>
-                               <xsl:value-of select="$chopString"/>
-                       </xsl:otherwise>
-               </xsl:choose>
-       </xsl:template>
-
-       <!-- nate added 12/14/2007 for lccn.loc.gov: url encode ampersand, etc. -->
-       <xsl:template name="url-encode">
-
-               <xsl:param name="str"/>
-
-               <xsl:if test="$str">
-                       <xsl:variable name="first-char" select="substring($str,1,1)"/>
-                       <xsl:choose>
-                               <xsl:when test="contains($safe,$first-char)">
-                                       <xsl:value-of select="$first-char"/>
-                               </xsl:when>
-                               <xsl:otherwise>
-                                       <xsl:variable name="codepoint">
-                                               <xsl:choose>
-                                                       <xsl:when test="contains($ascii,$first-char)">
-                                                               <xsl:value-of
-                                                                       select="string-length(substring-before($ascii,$first-char)) + 32"
-                                                               />
-                                                       </xsl:when>
-                                                       <xsl:when test="contains($latin1,$first-char)">
-                                                               <xsl:value-of
-                                                                       select="string-length(substring-before($latin1,$first-char)) + 160"/>
-                                                               <!-- was 160 -->
-                                                       </xsl:when>
-                                                       <xsl:otherwise>
-                                                               <xsl:message terminate="no">Warning: string contains a character
-                                                                       that is out of range! Substituting "?".</xsl:message>
-                                                               <xsl:text>63</xsl:text>
-                                                       </xsl:otherwise>
-                                               </xsl:choose>
-                                       </xsl:variable>
-                                       <xsl:variable name="hex-digit1"
-                                               select="substring($hex,floor($codepoint div 16) + 1,1)"/>
-                                       <xsl:variable name="hex-digit2" select="substring($hex,$codepoint mod 16 + 1,1)"/>
-                                       <!-- <xsl:value-of select="concat('%',$hex-digit2)"/> -->
-                                       <xsl:value-of select="concat('%',$hex-digit1,$hex-digit2)"/>
-                               </xsl:otherwise>
-                       </xsl:choose>
-                       <xsl:if test="string-length($str) &gt; 1">
-                               <xsl:call-template name="url-encode">
-                                       <xsl:with-param name="str" select="substring($str,2)"/>
-                               </xsl:call-template>
-                       </xsl:if>
-               </xsl:if>
-       </xsl:template>
-
-
-<!--
-2.14    Fixed bug in mads:geographic attributes syntax                                      ws   05/04/2016            
-2.13   fixed repeating <geographic>                                                                                                            tmee 01/31/2014
-2.12   added $2 authority for <classification>                                                                                         tmee 09/18/2012
-2.11   added delimiters between <classification> subfields                                                                     tmee 09/18/2012
-2.10   fixed type="other" and type="otherType" for mads:related                                                        tmee 09/16/2011
-2.09   fixed professionTerm and genreTerm empty tag error                                                                      tmee 09/16/2011
-2.08   fixed marc:subfield @code='i' matching error                                                                            tmee 09/16/2011
-2.07   fixed 555 duplication error                                                                                                                     tmee 08/10/2011 
-2.06   fixed topic subfield error                                                                                                                      tmee 08/10/2011 
-2.05   fixed title subfield error                                                                                                                      tmee 06/20/2011 
-2.04   fixed geographicSubdivision mapping for authority element                                                       tmee 06/16/2011
-2.03   added classification for 053, 055, 060, 065, 070, 080, 082, 083, 086, 087                       tmee 06/03/2011         
-2.02   added descriptionStandard for 008/10                                                                                            tmee 04/27/2011
-2.01   added extensions for 046, 336, 370, 374, 375, 376                                                                       tmee 04/08/2011
-2.00   redefined imported MODS elements in version 1.0 to MADS elements in version 2.0         tmee 02/08/2011
-1.08   added 372 subfields $a $s $t for <fieldOfActivity>                                                                      tmee 06/24/2010
-1.07   removed role/roleTerm 100, 110, 111, 400, 410, 411, 500, 510, 511, 700, 710, 711        tmee 06/24/2010
-1.06   added strip-space                                                                                                                                       tmee 06/24/2010
-1.05   added subfield $a for 130, 430, 530                                                                                                     tmee 06/21/2010
-1.04   fixed 550 z omission                                                                                                                            ntra 08/11/2008
-1.03   removed duplication of 550 $a text                                                                                                      tmee 11/01/2006
-1.02   fixed namespace references between mads and mods                                                                        ntra 10/06/2006
-1.01   revised                                                                                                                                                         rgue/jrad 11/29/05
-1.00   adapted from MARC21Slim2MODS3.xsl                                                                                                       ntra 07/06/05
--->
-
-       <!-- authority attribute defaults to 'naf' if not set using this authority parameter, for <authority> descriptors: name, titleInfo, geographic -->
-       <xsl:param name="authority"/>
-       <xsl:variable name="auth">
-               <xsl:choose>
-                       <xsl:when test="$authority">
-                               <xsl:value-of select="$authority"/>
-                       </xsl:when>
-                       <xsl:otherwise>naf</xsl:otherwise>
-               </xsl:choose>
-       </xsl:variable>
-       <xsl:variable name="controlField008" select="marc:controlfield[@tag='008']"/>
-       <xsl:variable name="controlField008-06"
-               select="substring(descendant-or-self::marc:controlfield[@tag=008],7,1)"/>
-       <xsl:variable name="controlField008-11"
-               select="substring(descendant-or-self::marc:controlfield[@tag=008],12,1)"/>
-       <xsl:variable name="controlField008-14"
-               select="substring(descendant-or-self::marc:controlfield[@tag=008],15,1)"/>
-       <xsl:template match="/">
-               <xsl:choose>
-                       <xsl:when test="descendant-or-self::marc:collection">
-                               <mads:madsCollection xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-                                       xsi:schemaLocation="http://www.loc.gov/mads/v2 http://www.loc.gov/standards/mads/v2/mads-2-0.xsd">
-                                       <xsl:for-each select="descendant-or-self::marc:collection/marc:record">
-                                               <mads:mads version="2.0">
-                 &nbs