1 --Upgrade Script for 2.3 to 2.4.0
2 \set eg_version '''2.4.0'''
4 INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.4.0', :eg_version);
5 -- remove the Bypass hold capture during clear shelf process setting
7 SELECT evergreen.upgrade_deps_block_check('0739', :eg_version);
10 DELETE FROM actor.org_unit_setting WHERE name = 'circ.holds.clear_shelf.no_capture_holds';
11 DELETE FROM config.org_unit_setting_type_log WHERE field_name = 'circ.holds.clear_shelf.no_capture_holds';
14 DELETE FROM config.org_unit_setting_type WHERE name = 'circ.holds.clear_shelf.no_capture_holds';
17 SELECT evergreen.upgrade_deps_block_check('0741', :eg_version);
19 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
21 'ADMIN_TOOLBAR_FOR_ORG',
24 'Allows a user to create, edit, and delete custom toolbars for org units',
30 'ADMIN_TOOLBAR_FOR_WORKSTATION',
33 'Allows a user to create, edit, and delete custom toolbars for workstations',
39 'ADMIN_TOOLBAR_FOR_USER',
42 'Allows a user to create, edit, and delete custom toolbars for users',
49 -- Evergreen DB patch 0743.schema.remove_tsearch2.sql
51 -- Enable native full-text search to be used, and drop TSearch2 extension
54 -- check whether patch can be applied
55 SELECT evergreen.upgrade_deps_block_check('0743', :eg_version);
57 -- FIXME: add/check SQL statements to perform the upgrade
58 -- First up, these functions depend on metabib.full_rec. They have to go for now.
59 DROP FUNCTION IF EXISTS biblio.flatten_marc(bigint);
60 DROP FUNCTION IF EXISTS biblio.flatten_marc(text);
62 -- These views depend on metabib.full_rec as well. Bye-bye!
63 DROP VIEW IF EXISTS reporter.old_super_simple_record;
64 DROP VIEW IF EXISTS reporter.simple_record;
65 DROP VIEW IF EXISTS reporter.classic_item_list;
67 \echo WARNING: The reporter.classic_item_list view was dropped if it existed.
68 \echo If you use that view, please run the example.reporter-extension.sql script
69 \echo to recreate it after rest of the schema upgrade is complete.
71 -- Now we can drop metabib.full_rec.
72 DROP VIEW IF EXISTS metabib.full_rec;
74 -- These indexes have to go. BEFORE we alter the tables, otherwise things take extra time when we alter the tables.
75 DROP INDEX IF EXISTS metabib.metabib_author_field_entry_value_idx;
76 DROP INDEX IF EXISTS metabib.metabib_identifier_field_entry_value_idx;
77 DROP INDEX IF EXISTS metabib.metabib_keyword_field_entry_value_idx;
78 DROP INDEX IF EXISTS metabib.metabib_series_field_entry_value_idx;
79 DROP INDEX IF EXISTS metabib.metabib_subject_field_entry_value_idx;
80 DROP INDEX IF EXISTS metabib.metabib_title_field_entry_value_idx;
82 -- Now grab all of the tsvector-enabled columns and switch them to the non-wrapper version of the type.
83 ALTER TABLE authority.full_rec ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
84 ALTER TABLE authority.simple_heading ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
85 ALTER TABLE metabib.real_full_rec ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
86 ALTER TABLE metabib.author_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
87 ALTER TABLE metabib.browse_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
88 ALTER TABLE metabib.identifier_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
89 ALTER TABLE metabib.keyword_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
90 ALTER TABLE metabib.series_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
91 ALTER TABLE metabib.subject_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
92 ALTER TABLE metabib.title_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
94 -- Make sure that tsearch2 exists as an extension (for a sufficiently
95 -- old Evergreen database, it might still be an unpackaged contrib).
96 CREATE EXTENSION IF NOT EXISTS tsearch2 SCHEMA public FROM unpackaged;
97 -- Halfway there! Goodbye tsearch2 extension!
98 DROP EXTENSION tsearch2;
100 -- Next up, re-creating all of the stuff we just dropped.
102 -- Indexes! Note to whomever: Do we even need these anymore?
103 CREATE INDEX metabib_author_field_entry_value_idx ON metabib.author_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
104 CREATE INDEX metabib_identifier_field_entry_value_idx ON metabib.identifier_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
105 CREATE INDEX metabib_keyword_field_entry_value_idx ON metabib.keyword_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
106 CREATE INDEX metabib_series_field_entry_value_idx ON metabib.series_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
107 CREATE INDEX metabib_subject_field_entry_value_idx ON metabib.subject_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
108 CREATE INDEX metabib_title_field_entry_value_idx ON metabib.title_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
110 -- metabib.full_rec, with insert/update/delete rules
111 CREATE OR REPLACE VIEW metabib.full_rec AS
118 SUBSTRING(value,1,1024) AS value,
120 FROM metabib.real_full_rec;
122 CREATE OR REPLACE RULE metabib_full_rec_insert_rule
123 AS ON INSERT TO metabib.full_rec
125 INSERT INTO metabib.real_full_rec VALUES (
126 COALESCE(NEW.id, NEXTVAL('metabib.full_rec_id_seq'::REGCLASS)),
136 CREATE OR REPLACE RULE metabib_full_rec_update_rule
137 AS ON UPDATE TO metabib.full_rec
139 UPDATE metabib.real_full_rec SET
145 subfield = NEW.subfield,
147 index_vector = NEW.index_vector
150 CREATE OR REPLACE RULE metabib_full_rec_delete_rule
151 AS ON DELETE TO metabib.full_rec
153 DELETE FROM metabib.real_full_rec WHERE id = OLD.id;
155 -- reporter views that depended on metabib.full_rec are up next
156 CREATE OR REPLACE VIEW reporter.simple_record AS
163 title.value AS title,
164 uniform_title.value AS uniform_title,
165 author.value AS author,
166 publisher.value AS publisher,
167 SUBSTRING(pubdate.value FROM $$\d+$$) AS pubdate,
168 series_title.value AS series_title,
169 series_statement.value AS series_statement,
170 summary.value AS summary,
171 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
172 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn,
173 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '650' AND subfield = 'a' AND record = r.id)) AS topic_subject,
174 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '651' AND subfield = 'a' AND record = r.id)) AS geographic_subject,
175 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '655' AND subfield = 'a' AND record = r.id)) AS genre,
176 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '600' AND subfield = 'a' AND record = r.id)) AS name_subject,
177 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '610' AND subfield = 'a' AND record = r.id)) AS corporate_subject,
178 ARRAY((SELECT value FROM metabib.full_rec WHERE tag = '856' AND subfield IN ('3','y','u') AND record = r.id ORDER BY CASE WHEN subfield IN ('3','y') THEN 0 ELSE 1 END)) AS external_uri
179 FROM biblio.record_entry r
180 JOIN metabib.metarecord_source_map s ON (s.source = r.id)
181 LEFT JOIN metabib.full_rec uniform_title ON (r.id = uniform_title.record AND uniform_title.tag = '240' AND uniform_title.subfield = 'a')
182 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
183 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag = '100' AND author.subfield = 'a')
184 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND publisher.tag = '260' AND publisher.subfield = 'b')
185 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND pubdate.tag = '260' AND pubdate.subfield = 'c')
186 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
187 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
188 LEFT JOIN metabib.full_rec series_title ON (r.id = series_title.record AND series_title.tag IN ('830','440') AND series_title.subfield = 'a')
189 LEFT JOIN metabib.full_rec series_statement ON (r.id = series_statement.record AND series_statement.tag = '490' AND series_statement.subfield = 'a')
190 LEFT JOIN metabib.full_rec summary ON (r.id = summary.record AND summary.tag = '520' AND summary.subfield = 'a')
191 GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14;
193 CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
199 FIRST(title.value) AS title,
200 FIRST(author.value) AS author,
201 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT publisher.value), ', ') AS publisher,
202 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$) ), ', ') AS pubdate,
203 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
204 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn
205 FROM biblio.record_entry r
206 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
207 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
208 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND publisher.tag = '260' AND publisher.subfield = 'b')
209 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND pubdate.tag = '260' AND pubdate.subfield = 'c')
210 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
211 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
214 -- And finally, the biblio functions. NOTE: I can't find the original source of the second one, so I skipped it as old cruft that was in our production DB.
215 CREATE OR REPLACE FUNCTION biblio.flatten_marc ( rid BIGINT ) RETURNS SETOF metabib.full_rec AS $func$
217 bib biblio.record_entry%ROWTYPE;
218 output metabib.full_rec%ROWTYPE;
221 SELECT INTO bib * FROM biblio.record_entry WHERE id = rid;
223 FOR field IN SELECT * FROM vandelay.flatten_marc( bib.marc ) LOOP
224 output.record := rid;
225 output.ind1 := field.ind1;
226 output.ind2 := field.ind2;
227 output.tag := field.tag;
228 output.subfield := field.subfield;
229 output.value := field.value;
234 $func$ LANGUAGE PLPGSQL;
236 -- Evergreen DB patch 0745.data.prewarn_expire_setting.sql
238 -- Configuration setting to warn staff when an account is about to expire
241 -- check whether patch can be applied
242 SELECT evergreen.upgrade_deps_block_check('0745', :eg_version);
244 INSERT INTO config.org_unit_setting_type
245 (name, grp, label, description, datatype)
247 'circ.patron_expires_soon_warning',
250 'circ.patron_expires_soon_warning',
251 'Warn when patron account is about to expire',
256 'circ.patron_expires_soon_warning',
257 'Warn when patron account is about to expire. If set, the staff client displays a warning this many days before the expiry of a patron account. Value is in number of days, for example: 3 for 3 days.',
264 -- LP1076399: Prevent reactivated holds from canceling immediately.
265 -- Set the expire_time to NULL on all frozen/suspended holds.
267 SELECT evergreen.upgrade_deps_block_check('0747', :eg_version);
269 UPDATE action.hold_request
270 SET expire_time = NULL
274 SELECT evergreen.upgrade_deps_block_check('0752', :eg_version);
276 INSERT INTO container.biblio_record_entry_bucket_type (code, label) VALUES ('url_verify', 'URL Verification Queue');
278 DROP SCHEMA IF EXISTS url_verify CASCADE;
280 CREATE SCHEMA url_verify;
282 CREATE TABLE url_verify.session (
283 id SERIAL PRIMARY KEY,
285 owning_lib INT NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED,
286 creator INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
287 container INT NOT NULL REFERENCES container.biblio_record_entry_bucket (id) DEFERRABLE INITIALLY DEFERRED,
288 create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
289 search TEXT NOT NULL,
290 CONSTRAINT uvs_name_once_per_lib UNIQUE (name, owning_lib)
293 CREATE TABLE url_verify.url_selector (
294 id SERIAL PRIMARY KEY,
296 session INT NOT NULL REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
297 CONSTRAINT tag_once_per_sess UNIQUE (xpath, session)
300 CREATE TABLE url_verify.url (
301 id SERIAL PRIMARY KEY,
302 redirect_from INT REFERENCES url_verify.url(id) DEFERRABLE INITIALLY DEFERRED,
303 item INT REFERENCES container.biblio_record_entry_bucket_item (id) DEFERRABLE INITIALLY DEFERRED,
304 url_selector INT REFERENCES url_verify.url_selector (id) DEFERRABLE INITIALLY DEFERRED,
305 session INT REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
309 full_url TEXT NOT NULL,
321 CONSTRAINT redirect_or_from_item CHECK (
322 redirect_from IS NOT NULL OR (
324 url_selector IS NOT NULL AND
326 subfield IS NOT NULL AND
332 CREATE TABLE url_verify.verification_attempt (
333 id SERIAL PRIMARY KEY,
334 usr INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
335 session INT NOT NULL REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
336 start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
337 finish_time TIMESTAMP WITH TIME ZONE
340 CREATE TABLE url_verify.url_verification (
341 id SERIAL PRIMARY KEY,
342 url INT NOT NULL REFERENCES url_verify.url (id) DEFERRABLE INITIALLY DEFERRED,
343 attempt INT NOT NULL REFERENCES url_verify.verification_attempt (id) DEFERRABLE INITIALLY DEFERRED,
344 req_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
345 res_time TIMESTAMP WITH TIME ZONE,
346 res_code INT CHECK (res_code BETWEEN 100 AND 999), -- we know > 599 will never be valid HTTP code, but we use 9XX for other stuff
348 redirect_to INT REFERENCES url_verify.url (id) DEFERRABLE INITIALLY DEFERRED -- if redirected
351 CREATE TABLE config.filter_dialog_interface (
352 key TEXT PRIMARY KEY,
356 CREATE TABLE config.filter_dialog_filter_set (
357 id SERIAL PRIMARY KEY,
359 owning_lib INT NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED,
360 creator INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
361 create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
362 interface TEXT NOT NULL REFERENCES config.filter_dialog_interface (key) DEFERRABLE INITIALLY DEFERRED,
363 filters TEXT NOT NULL CHECK (is_json(filters)),
364 CONSTRAINT cfdfs_name_once_per_lib UNIQUE (name, owning_lib)
368 SELECT evergreen.upgrade_deps_block_check('0753', :eg_version);
370 CREATE OR REPLACE FUNCTION url_verify.parse_url (url_in TEXT) RETURNS url_verify.url AS $$
375 my $url = Rose::URI->new($url_in);
377 my %parts = map { $_ => $url->$_ } qw/scheme username password host port path query fragment/;
379 $parts{full_url} = $url_in;
380 ($parts{domain} = $parts{host}) =~ s/^[^.]+\.//;
381 ($parts{tld} = $parts{domain}) =~ s/(?:[^.]+\.)+//;
382 ($parts{page} = $parts{path}) =~ s#(?:[^/]*/)+##;
388 CREATE OR REPLACE FUNCTION url_verify.ingest_url () RETURNS TRIGGER AS $$
390 tmp_row url_verify.url%ROWTYPE;
392 SELECT * INTO tmp_row FROM url_verify.parse_url(NEW.full_url);
394 NEW.scheme := tmp_row.scheme;
395 NEW.username := tmp_row.username;
396 NEW.password := tmp_row.password;
397 NEW.host := tmp_row.host;
398 NEW.domain := tmp_row.domain;
399 NEW.tld := tmp_row.tld;
400 NEW.port := tmp_row.port;
401 NEW.path := tmp_row.path;
402 NEW.page := tmp_row.page;
403 NEW.query := tmp_row.query;
404 NEW.fragment := tmp_row.fragment;
410 CREATE TRIGGER ingest_url_tgr
411 BEFORE INSERT ON url_verify.url
412 FOR EACH ROW EXECUTE PROCEDURE url_verify.ingest_url();
414 CREATE OR REPLACE FUNCTION url_verify.extract_urls ( session_id INT, item_id INT ) RETURNS INT AS $$
422 current_selector url_verify.url_selector%ROWTYPE;
426 FOR current_selector IN SELECT * FROM url_verify.url_selector s WHERE s.session = session_id LOOP
427 current_url_pos := 1;
429 SELECT (XPATH(current_selector.xpath || '/text()', b.marc::XML))[current_url_pos]::TEXT INTO current_url
430 FROM biblio.record_entry b
431 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
432 WHERE c.id = item_id;
434 EXIT WHEN current_url IS NULL;
436 SELECT (XPATH(current_selector.xpath || '/../@tag', b.marc::XML))[current_url_pos]::TEXT INTO current_tag
437 FROM biblio.record_entry b
438 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
439 WHERE c.id = item_id;
441 IF current_tag IS NULL THEN
442 current_tag := last_seen_tag;
444 last_seen_tag := current_tag;
447 SELECT (XPATH(current_selector.xpath || '/@code', b.marc::XML))[current_url_pos]::TEXT INTO current_sf
448 FROM biblio.record_entry b
449 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
450 WHERE c.id = item_id;
452 INSERT INTO url_verify.url (session, item, url_selector, tag, subfield, ord, full_url)
453 VALUES ( session_id, item_id, current_selector.id, current_tag, current_sf, current_ord, current_url);
455 current_url_pos := current_url_pos + 1;
456 current_ord := current_ord + 1;
460 RETURN current_ord - 1;
466 -- NOTE: beware the use of bare perm IDs in the update_perm's below and in
467 -- the 950 seed data file. Update before merge to match current perm IDs! XXX
470 SELECT evergreen.upgrade_deps_block_check('0754', :eg_version);
472 INSERT INTO permission.perm_list (id, code, description)
478 'Allows a user to process and verify ULSs',
485 INSERT INTO permission.perm_list (id, code, description)
488 'URL_VERIFY_UPDATE_SETTINGS',
491 'Allows a user to configure URL verification org unit settings',
498 INSERT INTO permission.perm_list (id, code, description)
501 'SAVED_FILTER_DIALOG_FILTERS',
504 'Allows users to save and load sets of filters for filter dialogs, available in certain staff interfaces',
511 INSERT INTO config.settings_group (name, label)
522 INSERT INTO config.org_unit_setting_type
523 (name, grp, label, description, datatype, update_perm)
525 'url_verify.url_verification_delay',
528 'url_verify.url_verification_delay',
529 'Number of seconds to wait between URL test attempts.',
534 'url_verify.url_verification_delay',
535 'Throttling mechanism for batch URL verification runs. Each running process will wait this number of seconds after a URL test before performing the next.',
543 INSERT INTO config.org_unit_setting_type
544 (name, grp, label, description, datatype, update_perm)
546 'url_verify.url_verification_max_redirects',
549 'url_verify.url_verification_max_redirects',
550 'Maximum redirect lookups',
555 'url_verify.url_verification_max_redirects',
556 'For URLs returning 3XX redirects, this is the maximum number of redirects we will follow before giving up.',
564 INSERT INTO config.org_unit_setting_type
565 (name, grp, label, description, datatype, update_perm)
567 'url_verify.url_verification_max_wait',
570 'url_verify.url_verification_max_wait',
571 'Maximum wait time (in seconds) for a URL to lookup',
576 'url_verify.url_verification_max_wait',
577 'If we exceed the wait time, the URL is marked as a "timeout" and the system moves on to the next URL',
586 INSERT INTO config.org_unit_setting_type
587 (name, grp, label, description, datatype, update_perm)
589 'url_verify.verification_batch_size',
592 'url_verify.verification_batch_size',
593 'Number of URLs to test in parallel',
598 'url_verify.verification_batch_size',
599 'URLs are tested in batches. This number defines the size of each batch and it directly relates to the number of back-end processes performing URL verification.',
608 INSERT INTO config.filter_dialog_interface (key, description) VALUES (
612 'All Link Checker filter dialogs',
619 INSERT INTO config.usr_setting_type (name,grp,opac_visible,label,description,datatype) VALUES (
620 'ui.grid_columns.url_verify.select_urls',
624 'ui.grid_columns.url_verify.select_urls',
625 'Link Checker''s URL Selection interface''s saved columns',
630 'ui.grid_columns.url_verify.select_urls',
631 'Link Checker''s URL Selection interface''s saved columns',
638 INSERT INTO config.usr_setting_type (name,grp,opac_visible,label,description,datatype) VALUES (
639 'ui.grid_columns.url_verify.review_attempt',
643 'ui.grid_columns.url_verify.review_attempt',
644 'Link Checker''s Review Attempt interface''s saved columns',
649 'ui.grid_columns.url_verify.review_attempt',
650 'Link Checker''s Review Attempt interface''s saved columns',
660 SELECT evergreen.upgrade_deps_block_check('0755', :eg_version);
662 INSERT INTO config.org_unit_setting_type
663 (name, label, description, grp, datatype, fm_class) VALUES
665 'acq.upload.default.create_po',
667 'acq.upload.default.create_po',
673 'acq.upload.default.create_po',
674 'Create a purchase order by default during ACQ file upload',
682 'acq.upload.default.activate_po',
684 'acq.upload.default.activate_po',
685 'Upload Activate PO',
690 'acq.upload.default.activate_po',
691 'Activate the purchase order by default during ACQ file upload',
699 'acq.upload.default.provider',
701 'acq.upload.default.provider',
702 'Upload Default Provider',
707 'acq.upload.default.provider',
708 'Default provider to use during ACQ file upload',
716 'acq.upload.default.vandelay.match_set',
718 'acq.upload.default.vandelay.match_set',
719 'Upload Default Match Set',
724 'acq.upload.default.vandelay.match_set',
725 'Default match set to use during ACQ file upload',
733 'acq.upload.default.vandelay.merge_profile',
735 'acq.upload.default.vandelay.merge_profile',
736 'Upload Default Merge Profile',
741 'acq.upload.default.vandelay.merge_profile',
742 'Default merge profile to use during ACQ file upload',
750 'acq.upload.default.vandelay.import_non_matching',
752 'acq.upload.default.vandelay.import_non_matching',
753 'Upload Import Non Matching by Default',
758 'acq.upload.default.vandelay.import_non_matching',
759 'Import non-matching records by default during ACQ file upload',
767 'acq.upload.default.vandelay.merge_on_exact',
769 'acq.upload.default.vandelay.merge_on_exact',
770 'Upload Merge on Exact Match by Default',
775 'acq.upload.default.vandelay.merge_on_exact',
776 'Merge records on exact match by default during ACQ file upload',
784 'acq.upload.default.vandelay.merge_on_best',
786 'acq.upload.default.vandelay.merge_on_best',
787 'Upload Merge on Best Match by Default',
792 'acq.upload.default.vandelay.merge_on_best',
793 'Merge records on best match by default during ACQ file upload',
801 'acq.upload.default.vandelay.merge_on_single',
803 'acq.upload.default.vandelay.merge_on_single',
804 'Upload Merge on Single Match by Default',
809 'acq.upload.default.vandelay.merge_on_single',
810 'Merge records on single match by default during ACQ file upload',
818 'acq.upload.default.vandelay.quality_ratio',
820 'acq.upload.default.vandelay.quality_ratio',
821 'Upload Default Min. Quality Ratio',
826 'acq.upload.default.vandelay.quality_ratio',
827 'Default minimum quality ratio used during ACQ file upload',
835 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
837 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
838 'Upload Default Insufficient Quality Fall-Thru Profile',
843 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
844 'Default low-quality fall through profile used during ACQ file upload',
852 'acq.upload.default.vandelay.load_item_for_imported',
854 'acq.upload.default.vandelay.load_item_for_imported',
855 'Upload Load Items for Imported Records by Default',
860 'acq.upload.default.vandelay.load_item_for_imported',
861 'Load items for imported records by default during ACQ file upload',
871 SELECT evergreen.upgrade_deps_block_check('0756', :eg_version);
873 -- Drop some lingering old functions in search schema
874 DROP FUNCTION IF EXISTS search.staged_fts(INT,INT,TEXT,INT[],INT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT,TEXT,TEXT,TEXT[],TEXT,REAL,TEXT,BOOL,BOOL,BOOL,INT,INT,INT);
875 DROP FUNCTION IF EXISTS search.parse_search_args(TEXT);
876 DROP FUNCTION IF EXISTS search.explode_array(ANYARRAY);
877 DROP FUNCTION IF EXISTS search.pick_table(TEXT);
879 -- Now drop query_parser_fts and related
880 DROP FUNCTION IF EXISTS search.query_parser_fts(INT,INT,TEXT,INT[],INT[],INT,INT,INT,BOOL,BOOL,INT);
881 DROP TYPE IF EXISTS search.search_result;
882 DROP TYPE IF EXISTS search.search_args;
885 SELECT evergreen.upgrade_deps_block_check('0757', :eg_version);
887 SET search_path = public, pg_catalog;
893 FOR lang IN SELECT substring(pptsd.dictname from '(.*)_stem$') AS lang FROM pg_catalog.pg_ts_dict pptsd JOIN pg_catalog.pg_namespace ppn ON ppn.oid = pptsd.dictnamespace
894 WHERE ppn.nspname = 'pg_catalog' AND pptsd.dictname LIKE '%_stem' LOOP
895 RAISE NOTICE 'FOUND LANGUAGE %', lang;
897 EXECUTE 'DROP TEXT SEARCH DICTIONARY IF EXISTS ' || lang || '_nostop CASCADE;
898 CREATE TEXT SEARCH DICTIONARY ' || lang || '_nostop (TEMPLATE=pg_catalog.snowball, language=''' || lang || ''');
899 COMMENT ON TEXT SEARCH DICTIONARY ' || lang || '_nostop IS ''' ||lang || ' snowball stemmer with no stopwords for ASCII words only.'';
900 CREATE TEXT SEARCH CONFIGURATION ' || lang || '_nostop ( COPY = pg_catalog.' || lang || ' );
901 ALTER TEXT SEARCH CONFIGURATION ' || lang || '_nostop ALTER MAPPING FOR word, hword, hword_part WITH pg_catalog.simple;
902 ALTER TEXT SEARCH CONFIGURATION ' || lang || '_nostop ALTER MAPPING FOR asciiword, asciihword, hword_asciipart WITH ' || lang || '_nostop;';
907 CREATE TEXT SEARCH CONFIGURATION keyword ( COPY = english_nostop );
908 CREATE TEXT SEARCH CONFIGURATION "default" ( COPY = english_nostop );
910 SET search_path = evergreen, public, pg_catalog;
912 ALTER TABLE config.metabib_class
913 ADD COLUMN a_weight NUMERIC DEFAULT 1.0 NOT NULL,
914 ADD COLUMN b_weight NUMERIC DEFAULT 0.4 NOT NULL,
915 ADD COLUMN c_weight NUMERIC DEFAULT 0.2 NOT NULL,
916 ADD COLUMN d_weight NUMERIC DEFAULT 0.1 NOT NULL;
918 CREATE TABLE config.ts_config_list (
922 COMMENT ON TABLE config.ts_config_list IS $$
925 A list of full text configs with names and descriptions.
928 CREATE TABLE config.metabib_class_ts_map (
929 id SERIAL PRIMARY KEY,
930 field_class TEXT NOT NULL REFERENCES config.metabib_class (name),
931 ts_config TEXT NOT NULL REFERENCES config.ts_config_list (id),
932 active BOOL NOT NULL DEFAULT TRUE,
933 index_weight CHAR(1) NOT NULL DEFAULT 'C' CHECK (index_weight IN ('A','B','C','D')),
934 index_lang TEXT NULL,
935 search_lang TEXT NULL,
936 always BOOL NOT NULL DEFAULT true
938 COMMENT ON TABLE config.metabib_class_ts_map IS $$
939 Text Search Configs for metabib class indexing
941 This table contains text search config definitions for
942 storing index_vector values.
945 CREATE TABLE config.metabib_field_ts_map (
946 id SERIAL PRIMARY KEY,
947 metabib_field INT NOT NULL REFERENCES config.metabib_field (id),
948 ts_config TEXT NOT NULL REFERENCES config.ts_config_list (id),
949 active BOOL NOT NULL DEFAULT TRUE,
950 index_weight CHAR(1) NOT NULL DEFAULT 'C' CHECK (index_weight IN ('A','B','C','D')),
951 index_lang TEXT NULL,
952 search_lang TEXT NULL
954 COMMENT ON TABLE config.metabib_field_ts_map IS $$
955 Text Search Configs for metabib field indexing
957 This table contains text search config definitions for
958 storing index_vector values.
961 CREATE TABLE metabib.combined_identifier_field_entry (
962 record BIGINT NOT NULL,
963 metabib_field INT NULL,
964 index_vector tsvector NOT NULL
966 CREATE UNIQUE INDEX metabib_combined_identifier_field_entry_fakepk_idx ON metabib.combined_identifier_field_entry (record, COALESCE(metabib_field::TEXT,''));
967 CREATE INDEX metabib_combined_identifier_field_entry_index_vector_idx ON metabib.combined_identifier_field_entry USING GIST (index_vector);
968 CREATE INDEX metabib_combined_identifier_field_source_idx ON metabib.combined_identifier_field_entry (metabib_field);
970 CREATE TABLE metabib.combined_title_field_entry (
971 record BIGINT NOT NULL,
972 metabib_field INT NULL,
973 index_vector tsvector NOT NULL
975 CREATE UNIQUE INDEX metabib_combined_title_field_entry_fakepk_idx ON metabib.combined_title_field_entry (record, COALESCE(metabib_field::TEXT,''));
976 CREATE INDEX metabib_combined_title_field_entry_index_vector_idx ON metabib.combined_title_field_entry USING GIST (index_vector);
977 CREATE INDEX metabib_combined_title_field_source_idx ON metabib.combined_title_field_entry (metabib_field);
979 CREATE TABLE metabib.combined_author_field_entry (
980 record BIGINT NOT NULL,
981 metabib_field INT NULL,
982 index_vector tsvector NOT NULL
984 CREATE UNIQUE INDEX metabib_combined_author_field_entry_fakepk_idx ON metabib.combined_author_field_entry (record, COALESCE(metabib_field::TEXT,''));
985 CREATE INDEX metabib_combined_author_field_entry_index_vector_idx ON metabib.combined_author_field_entry USING GIST (index_vector);
986 CREATE INDEX metabib_combined_author_field_source_idx ON metabib.combined_author_field_entry (metabib_field);
988 CREATE TABLE metabib.combined_subject_field_entry (
989 record BIGINT NOT NULL,
990 metabib_field INT NULL,
991 index_vector tsvector NOT NULL
993 CREATE UNIQUE INDEX metabib_combined_subject_field_entry_fakepk_idx ON metabib.combined_subject_field_entry (record, COALESCE(metabib_field::TEXT,''));
994 CREATE INDEX metabib_combined_subject_field_entry_index_vector_idx ON metabib.combined_subject_field_entry USING GIST (index_vector);
995 CREATE INDEX metabib_combined_subject_field_source_idx ON metabib.combined_subject_field_entry (metabib_field);
997 CREATE TABLE metabib.combined_keyword_field_entry (
998 record BIGINT NOT NULL,
999 metabib_field INT NULL,
1000 index_vector tsvector NOT NULL
1002 CREATE UNIQUE INDEX metabib_combined_keyword_field_entry_fakepk_idx ON metabib.combined_keyword_field_entry (record, COALESCE(metabib_field::TEXT,''));
1003 CREATE INDEX metabib_combined_keyword_field_entry_index_vector_idx ON metabib.combined_keyword_field_entry USING GIST (index_vector);
1004 CREATE INDEX metabib_combined_keyword_field_source_idx ON metabib.combined_keyword_field_entry (metabib_field);
1006 CREATE TABLE metabib.combined_series_field_entry (
1007 record BIGINT NOT NULL,
1008 metabib_field INT NULL,
1009 index_vector tsvector NOT NULL
1011 CREATE UNIQUE INDEX metabib_combined_series_field_entry_fakepk_idx ON metabib.combined_series_field_entry (record, COALESCE(metabib_field::TEXT,''));
1012 CREATE INDEX metabib_combined_series_field_entry_index_vector_idx ON metabib.combined_series_field_entry USING GIST (index_vector);
1013 CREATE INDEX metabib_combined_series_field_source_idx ON metabib.combined_series_field_entry (metabib_field);
1015 CREATE OR REPLACE FUNCTION metabib.update_combined_index_vectors(bib_id BIGINT) RETURNS VOID AS $func$
1017 DELETE FROM metabib.combined_keyword_field_entry WHERE record = bib_id;
1018 INSERT INTO metabib.combined_keyword_field_entry(record, metabib_field, index_vector)
1019 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1020 FROM metabib.keyword_field_entry WHERE source = bib_id GROUP BY field;
1021 INSERT INTO metabib.combined_keyword_field_entry(record, metabib_field, index_vector)
1022 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1023 FROM metabib.keyword_field_entry WHERE source = bib_id;
1025 DELETE FROM metabib.combined_title_field_entry WHERE record = bib_id;
1026 INSERT INTO metabib.combined_title_field_entry(record, metabib_field, index_vector)
1027 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1028 FROM metabib.title_field_entry WHERE source = bib_id GROUP BY field;
1029 INSERT INTO metabib.combined_title_field_entry(record, metabib_field, index_vector)
1030 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1031 FROM metabib.title_field_entry WHERE source = bib_id;
1033 DELETE FROM metabib.combined_author_field_entry WHERE record = bib_id;
1034 INSERT INTO metabib.combined_author_field_entry(record, metabib_field, index_vector)
1035 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1036 FROM metabib.author_field_entry WHERE source = bib_id GROUP BY field;
1037 INSERT INTO metabib.combined_author_field_entry(record, metabib_field, index_vector)
1038 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1039 FROM metabib.author_field_entry WHERE source = bib_id;
1041 DELETE FROM metabib.combined_subject_field_entry WHERE record = bib_id;
1042 INSERT INTO metabib.combined_subject_field_entry(record, metabib_field, index_vector)
1043 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1044 FROM metabib.subject_field_entry WHERE source = bib_id GROUP BY field;
1045 INSERT INTO metabib.combined_subject_field_entry(record, metabib_field, index_vector)
1046 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1047 FROM metabib.subject_field_entry WHERE source = bib_id;
1049 DELETE FROM metabib.combined_series_field_entry WHERE record = bib_id;
1050 INSERT INTO metabib.combined_series_field_entry(record, metabib_field, index_vector)
1051 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1052 FROM metabib.series_field_entry WHERE source = bib_id GROUP BY field;
1053 INSERT INTO metabib.combined_series_field_entry(record, metabib_field, index_vector)
1054 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1055 FROM metabib.series_field_entry WHERE source = bib_id;
1057 DELETE FROM metabib.combined_identifier_field_entry WHERE record = bib_id;
1058 INSERT INTO metabib.combined_identifier_field_entry(record, metabib_field, index_vector)
1059 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1060 FROM metabib.identifier_field_entry WHERE source = bib_id GROUP BY field;
1061 INSERT INTO metabib.combined_identifier_field_entry(record, metabib_field, index_vector)
1062 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1063 FROM metabib.identifier_field_entry WHERE source = bib_id;
1066 $func$ LANGUAGE PLPGSQL;
1068 CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
1071 ind_data metabib.field_entry_template%ROWTYPE;
1072 mbe_row metabib.browse_entry%ROWTYPE;
1075 PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
1077 IF NOT skip_search THEN
1078 FOR fclass IN SELECT * FROM config.metabib_class LOOP
1079 -- RAISE NOTICE 'Emptying out %', fclass.name;
1080 EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
1083 IF NOT skip_facet THEN
1084 DELETE FROM metabib.facet_entry WHERE source = bib_id;
1086 IF NOT skip_browse THEN
1087 DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
1091 FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
1092 IF ind_data.field < 0 THEN
1093 ind_data.field = -1 * ind_data.field;
1096 IF ind_data.facet_field AND NOT skip_facet THEN
1097 INSERT INTO metabib.facet_entry (field, source, value)
1098 VALUES (ind_data.field, ind_data.source, ind_data.value);
1101 IF ind_data.browse_field AND NOT skip_browse THEN
1102 -- A caveat about this SELECT: this should take care of replacing
1103 -- old mbe rows when data changes, but not if normalization (by
1104 -- which I mean specifically the output of
1105 -- evergreen.oils_tsearch2()) changes. It may or may not be
1106 -- expensive to add a comparison of index_vector to index_vector
1107 -- to the WHERE clause below.
1108 SELECT INTO mbe_row * FROM metabib.browse_entry WHERE value = ind_data.value;
1110 mbe_id := mbe_row.id;
1112 INSERT INTO metabib.browse_entry (value) VALUES
1113 (metabib.browse_normalize(ind_data.value, ind_data.field));
1114 mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
1117 INSERT INTO metabib.browse_entry_def_map (entry, def, source)
1118 VALUES (mbe_id, ind_data.field, ind_data.source);
1121 IF ind_data.search_field AND NOT skip_search THEN
1123 INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
1125 quote_literal(ind_data.field) || $$, $$ ||
1126 quote_literal(ind_data.source) || $$, $$ ||
1127 quote_literal(ind_data.value) ||
1133 IF NOT skip_search THEN
1134 PERFORM metabib.update_combined_index_vectors(bib_id);
1139 $func$ LANGUAGE PLPGSQL;
1141 DROP FUNCTION IF EXISTS evergreen.oils_tsearch2() CASCADE;
1142 DROP FUNCTION IF EXISTS public.oils_tsearch2() CASCADE;
1144 CREATE OR REPLACE FUNCTION public.oils_tsearch2 () RETURNS TRIGGER AS $$
1148 temp_vector TEXT := '';
1153 NEW.index_vector = ''::tsvector;
1155 IF TG_TABLE_NAME::TEXT ~ 'field_entry$' THEN
1157 SELECT n.func AS func,
1158 n.param_count AS param_count,
1160 FROM config.index_normalizer n
1161 JOIN config.metabib_field_index_norm_map m ON (m.norm = n.id)
1162 WHERE field = NEW.field
1164 EXECUTE 'SELECT ' || normalizer.func || '(' ||
1165 quote_literal( value ) ||
1167 WHEN normalizer.param_count > 0
1168 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
1177 IF TG_TABLE_NAME::TEXT ~ 'browse_entry$' THEN
1178 value := ARRAY_TO_STRING(
1179 evergreen.regexp_split_to_array(value, E'\\W+'), ' '
1181 value := public.search_normalize(value);
1182 NEW.index_vector = to_tsvector(TG_ARGV[0]::regconfig, value);
1183 ELSIF TG_TABLE_NAME::TEXT ~ 'field_entry$' THEN
1185 SELECT ts_config, index_weight
1186 FROM config.metabib_class_ts_map
1187 WHERE field_class = TG_ARGV[0]
1188 AND index_lang IS NULL OR EXISTS (SELECT 1 FROM metabib.record_attr WHERE id = NEW.source AND index_lang IN(attrs->'item_lang',attrs->'language'))
1189 AND always OR NOT EXISTS (SELECT 1 FROM config.metabib_field_ts_map WHERE metabib_field = NEW.field)
1191 SELECT ts_config, index_weight
1192 FROM config.metabib_field_ts_map
1193 WHERE metabib_field = NEW.field
1194 AND index_lang IS NULL OR EXISTS (SELECT 1 FROM metabib.record_attr WHERE id = NEW.source AND index_lang IN(attrs->'item_lang',attrs->'language'))
1195 ORDER BY index_weight ASC
1197 IF cur_weight IS NOT NULL AND cur_weight != ts_rec.index_weight THEN
1198 NEW.index_vector = NEW.index_vector || setweight(temp_vector::tsvector,cur_weight);
1201 cur_weight = ts_rec.index_weight;
1202 SELECT INTO temp_vector temp_vector || ' ' || to_tsvector(ts_rec.ts_config::regconfig, value)::TEXT;
1204 NEW.index_vector = NEW.index_vector || setweight(temp_vector::tsvector,cur_weight);
1206 NEW.index_vector = to_tsvector(TG_ARGV[0]::regconfig, value);
1211 $$ LANGUAGE PLPGSQL;
1213 CREATE TRIGGER authority_full_rec_fti_trigger
1214 BEFORE UPDATE OR INSERT ON authority.full_rec
1215 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1217 CREATE TRIGGER authority_simple_heading_fti_trigger
1218 BEFORE UPDATE OR INSERT ON authority.simple_heading
1219 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1221 CREATE TRIGGER metabib_identifier_field_entry_fti_trigger
1222 BEFORE UPDATE OR INSERT ON metabib.identifier_field_entry
1223 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('identifier');
1225 CREATE TRIGGER metabib_title_field_entry_fti_trigger
1226 BEFORE UPDATE OR INSERT ON metabib.title_field_entry
1227 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('title');
1229 CREATE TRIGGER metabib_author_field_entry_fti_trigger
1230 BEFORE UPDATE OR INSERT ON metabib.author_field_entry
1231 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('author');
1233 CREATE TRIGGER metabib_subject_field_entry_fti_trigger
1234 BEFORE UPDATE OR INSERT ON metabib.subject_field_entry
1235 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('subject');
1237 CREATE TRIGGER metabib_keyword_field_entry_fti_trigger
1238 BEFORE UPDATE OR INSERT ON metabib.keyword_field_entry
1239 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1241 CREATE TRIGGER metabib_series_field_entry_fti_trigger
1242 BEFORE UPDATE OR INSERT ON metabib.series_field_entry
1243 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('series');
1245 CREATE TRIGGER metabib_browse_entry_fti_trigger
1246 BEFORE INSERT OR UPDATE ON metabib.browse_entry
1247 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1249 CREATE TRIGGER metabib_full_rec_fti_trigger
1250 BEFORE UPDATE OR INSERT ON metabib.real_full_rec
1251 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('default');
1253 INSERT INTO config.ts_config_list(id, name) VALUES
1254 ('simple','Non-Stemmed Simple'),
1255 ('danish_nostop','Danish Stemmed'),
1256 ('dutch_nostop','Dutch Stemmed'),
1257 ('english_nostop','English Stemmed'),
1258 ('finnish_nostop','Finnish Stemmed'),
1259 ('french_nostop','French Stemmed'),
1260 ('german_nostop','German Stemmed'),
1261 ('hungarian_nostop','Hungarian Stemmed'),
1262 ('italian_nostop','Italian Stemmed'),
1263 ('norwegian_nostop','Norwegian Stemmed'),
1264 ('portuguese_nostop','Portuguese Stemmed'),
1265 ('romanian_nostop','Romanian Stemmed'),
1266 ('russian_nostop','Russian Stemmed'),
1267 ('spanish_nostop','Spanish Stemmed'),
1268 ('swedish_nostop','Swedish Stemmed'),
1269 ('turkish_nostop','Turkish Stemmed');
1271 INSERT INTO config.metabib_class_ts_map(field_class, ts_config, index_weight, always) VALUES
1272 ('keyword','simple','A',true),
1273 ('keyword','english_nostop','C',true),
1274 ('title','simple','A',true),
1275 ('title','english_nostop','C',true),
1276 ('author','simple','A',true),
1277 ('author','english_nostop','C',true),
1278 ('series','simple','A',true),
1279 ('series','english_nostop','C',true),
1280 ('subject','simple','A',true),
1281 ('subject','english_nostop','C',true),
1282 ('identifier','simple','A',true);
1284 CREATE OR REPLACE FUNCTION evergreen.rel_bump(terms TEXT[], value TEXT, bumps TEXT[], mults NUMERIC[]) RETURNS NUMERIC AS
1287 my ($terms,$value,$bumps,$mults) = @_;
1291 for (my $id = 0; $id < @$bumps; $id++) {
1292 if ($bumps->[$id] eq 'first_word') {
1293 $retval *= $mults->[$id] if ($value =~ /^$terms->[0]/);
1294 } elsif ($bumps->[$id] eq 'full_match') {
1295 my $fullmatch = join(' ', @$terms);
1296 $retval *= $mults->[$id] if ($value =~ /^$fullmatch$/);
1297 } elsif ($bumps->[$id] eq 'word_order') {
1298 my $wordorder = join('.*', @$terms);
1299 $retval *= $mults->[$id] if ($value =~ /$wordorder/);
1303 $BODY$ LANGUAGE plperlu IMMUTABLE STRICT COST 100;
1305 /* ** This happens in the supplemental script **
1307 UPDATE metabib.identifier_field_entry set value = value;
1308 UPDATE metabib.title_field_entry set value = value;
1309 UPDATE metabib.author_field_entry set value = value;
1310 UPDATE metabib.subject_field_entry set value = value;
1311 UPDATE metabib.keyword_field_entry set value = value;
1312 UPDATE metabib.series_field_entry set value = value;
1314 SELECT metabib.update_combined_index_vectors(id)
1315 FROM biblio.record_entry
1320 SELECT evergreen.upgrade_deps_block_check('0758', :eg_version);
1322 INSERT INTO config.settings_group (name, label) VALUES
1323 ('vandelay', 'Vandelay');
1325 INSERT INTO config.org_unit_setting_type (name, grp, label, datatype, fm_class) VALUES
1326 ('vandelay.default_match_set', 'vandelay', 'Default Record Match Set', 'link', 'vms');
1329 SELECT evergreen.upgrade_deps_block_check('0759', :eg_version);
1331 CREATE TABLE actor.org_unit_proximity_adjustment (
1332 id SERIAL PRIMARY KEY,
1333 item_circ_lib INT REFERENCES actor.org_unit (id),
1334 item_owning_lib INT REFERENCES actor.org_unit (id),
1335 copy_location INT REFERENCES asset.copy_location (id),
1336 hold_pickup_lib INT REFERENCES actor.org_unit (id),
1337 hold_request_lib INT REFERENCES actor.org_unit (id),
1338 pos INT NOT NULL DEFAULT 0,
1339 absolute_adjustment BOOL NOT NULL DEFAULT FALSE,
1340 prox_adjustment NUMERIC,
1341 circ_mod TEXT, -- REFERENCES config.circ_modifier (code),
1342 CONSTRAINT prox_adj_criterium CHECK (COALESCE(item_circ_lib::TEXT,item_owning_lib::TEXT,copy_location::TEXT,hold_pickup_lib::TEXT,hold_request_lib::TEXT,circ_mod) IS NOT NULL)
1344 CREATE UNIQUE INDEX prox_adj_once_idx ON actor.org_unit_proximity_adjustment (item_circ_lib,item_owning_lib,copy_location,hold_pickup_lib,hold_request_lib,circ_mod);
1345 CREATE INDEX prox_adj_circ_lib_idx ON actor.org_unit_proximity_adjustment (item_circ_lib);
1346 CREATE INDEX prox_adj_owning_lib_idx ON actor.org_unit_proximity_adjustment (item_owning_lib);
1347 CREATE INDEX prox_adj_copy_location_idx ON actor.org_unit_proximity_adjustment (copy_location);
1348 CREATE INDEX prox_adj_pickup_lib_idx ON actor.org_unit_proximity_adjustment (hold_pickup_lib);
1349 CREATE INDEX prox_adj_request_lib_idx ON actor.org_unit_proximity_adjustment (hold_request_lib);
1350 CREATE INDEX prox_adj_circ_mod_idx ON actor.org_unit_proximity_adjustment (circ_mod);
1352 CREATE OR REPLACE FUNCTION actor.org_unit_ancestors_distance( INT ) RETURNS TABLE (id INT, distance INT) AS $$
1353 WITH RECURSIVE org_unit_ancestors_distance(id, distance) AS (
1356 SELECT ou.parent_ou, ouad.distance+1
1357 FROM actor.org_unit ou JOIN org_unit_ancestors_distance ouad ON (ou.id = ouad.id)
1358 WHERE ou.parent_ou IS NOT NULL
1360 SELECT * FROM org_unit_ancestors_distance;
1361 $$ LANGUAGE SQL STABLE ROWS 1;
1363 CREATE OR REPLACE FUNCTION action.hold_copy_calculated_proximity(
1366 copy_context_ou INT DEFAULT NULL
1367 -- TODO maybe? hold_context_ou INT DEFAULT NULL. This would optionally
1368 -- support an "ahprox" measurement: adjust prox between copy circ lib and
1369 -- hold request lib, but I'm unsure whether to use this theoretical
1370 -- argument only in the baseline calculation or later in the other
1371 -- queries in this function.
1372 ) RETURNS NUMERIC AS $f$
1374 aoupa actor.org_unit_proximity_adjustment%ROWTYPE;
1375 ahr action.hold_request%ROWTYPE;
1376 acp asset.copy%ROWTYPE;
1377 acn asset.call_number%ROWTYPE;
1378 acl asset.copy_location%ROWTYPE;
1379 baseline_prox NUMERIC;
1389 SELECT * INTO ahr FROM action.hold_request WHERE id = ahr_id;
1390 SELECT * INTO acp FROM asset.copy WHERE id = acp_id;
1391 SELECT * INTO acn FROM asset.call_number WHERE id = acp.call_number;
1392 SELECT * INTO acl FROM asset.copy_location WHERE id = acp.location;
1394 IF copy_context_ou IS NULL THEN
1395 copy_context_ou := acp.circ_lib;
1398 -- First, gather the baseline proximity of "here" to pickup lib
1399 SELECT prox INTO baseline_prox FROM actor.org_unit_proximity WHERE from_org = copy_context_ou AND to_org = ahr.pickup_lib;
1401 -- Find any absolute adjustments, and set the baseline prox to that
1402 SELECT adj.* INTO aoupa
1403 FROM actor.org_unit_proximity_adjustment adj
1404 LEFT JOIN actor.org_unit_ancestors_distance(copy_context_ou) acp_cl ON (acp_cl.id = adj.item_circ_lib)
1405 LEFT JOIN actor.org_unit_ancestors_distance(acn.owning_lib) acn_ol ON (acn_ol.id = adj.item_owning_lib)
1406 LEFT JOIN actor.org_unit_ancestors_distance(acl.owning_lib) acl_ol ON (acn_ol.id = adj.copy_location)
1407 LEFT JOIN actor.org_unit_ancestors_distance(ahr.pickup_lib) ahr_pl ON (ahr_pl.id = adj.hold_pickup_lib)
1408 LEFT JOIN actor.org_unit_ancestors_distance(ahr.request_lib) ahr_rl ON (ahr_rl.id = adj.hold_request_lib)
1409 WHERE (adj.circ_mod IS NULL OR adj.circ_mod = acp.circ_modifier) AND
1410 absolute_adjustment AND
1411 COALESCE(acp_cl.id, acn_ol.id, acl_ol.id, ahr_pl.id, ahr_rl.id) IS NOT NULL
1413 COALESCE(acp_cl.distance,999)
1414 + COALESCE(acn_ol.distance,999)
1415 + COALESCE(acl_ol.distance,999)
1416 + COALESCE(ahr_pl.distance,999)
1417 + COALESCE(ahr_rl.distance,999),
1422 baseline_prox := aoupa.prox_adjustment;
1425 -- Now find any relative adjustments, and change the baseline prox based on them
1428 FROM actor.org_unit_proximity_adjustment adj
1429 LEFT JOIN actor.org_unit_ancestors_distance(copy_context_ou) acp_cl ON (acp_cl.id = adj.item_circ_lib)
1430 LEFT JOIN actor.org_unit_ancestors_distance(acn.owning_lib) acn_ol ON (acn_ol.id = adj.item_owning_lib)
1431 LEFT JOIN actor.org_unit_ancestors_distance(acl.owning_lib) acl_ol ON (acn_ol.id = adj.copy_location)
1432 LEFT JOIN actor.org_unit_ancestors_distance(ahr.pickup_lib) ahr_pl ON (ahr_pl.id = adj.hold_pickup_lib)
1433 LEFT JOIN actor.org_unit_ancestors_distance(ahr.request_lib) ahr_rl ON (ahr_rl.id = adj.hold_request_lib)
1434 WHERE (adj.circ_mod IS NULL OR adj.circ_mod = acp.circ_modifier) AND
1435 NOT absolute_adjustment AND
1436 COALESCE(acp_cl.id, acn_ol.id, acl_ol.id, ahr_pl.id, ahr_rl.id) IS NOT NULL
1438 baseline_prox := baseline_prox + aoupa.prox_adjustment;
1441 RETURN baseline_prox;
1443 $f$ LANGUAGE PLPGSQL;
1445 ALTER TABLE actor.org_unit_proximity_adjustment
1446 ADD CONSTRAINT actor_org_unit_proximity_adjustment_circ_mod_fkey
1447 FOREIGN KEY (circ_mod) REFERENCES config.circ_modifier (code)
1448 DEFERRABLE INITIALLY DEFERRED;
1450 ALTER TABLE action.hold_copy_map ADD COLUMN proximity NUMERIC;
1453 SELECT evergreen.upgrade_deps_block_check('0760', :eg_version);
1455 CREATE TABLE config.best_hold_order(
1456 id SERIAL PRIMARY KEY, -- (metadata)
1457 name TEXT UNIQUE, -- i18n (metadata)
1458 pprox INT, -- copy capture <-> pickup lib prox
1459 hprox INT, -- copy circ lib <-> request lib prox
1460 aprox INT, -- copy circ lib <-> pickup lib ADJUSTED prox on ahcm
1461 approx INT, -- copy capture <-> pickup lib ADJUSTED prox from function
1462 priority INT, -- group hold priority
1463 cut INT, -- cut-in-line
1464 depth INT, -- selection depth
1465 htime INT, -- time since last home-lib circ exceeds org-unit setting
1466 rtime INT, -- request time
1467 shtime INT -- time since copy last trip home exceeds org-unit setting
1470 -- At least one of these columns must contain a non-null value
1471 ALTER TABLE config.best_hold_order ADD CHECK ((
1472 pprox IS NOT NULL OR
1473 hprox IS NOT NULL OR
1474 aprox IS NOT NULL OR
1475 priority IS NOT NULL OR
1477 depth IS NOT NULL OR
1478 htime IS NOT NULL OR
1482 INSERT INTO config.best_hold_order (
1484 pprox, aprox, priority, cut, depth, rtime, htime, hprox
1487 1, 2, 3, 4, 5, 6, 7, 8
1490 INSERT INTO config.best_hold_order (
1492 hprox, pprox, aprox, priority, cut, depth, rtime, htime
1494 'Traditional with Holds-always-go-home',
1495 1, 2, 3, 4, 5, 6, 7, 8
1498 INSERT INTO config.best_hold_order (
1500 htime, hprox, pprox, aprox, priority, cut, depth, rtime
1502 'Traditional with Holds-go-home',
1503 1, 2, 3, 4, 5, 6, 7, 8
1506 INSERT INTO config.best_hold_order (
1508 priority, cut, rtime, depth, pprox, hprox, aprox, htime
1511 1, 2, 3, 4, 5, 6, 7, 8
1514 INSERT INTO config.best_hold_order (
1516 hprox, priority, cut, rtime, depth, pprox, aprox, htime
1518 'FIFO with Holds-always-go-home',
1519 1, 2, 3, 4, 5, 6, 7, 8
1522 INSERT INTO config.best_hold_order (
1524 htime, priority, cut, rtime, depth, pprox, aprox, hprox
1526 'FIFO with Holds-go-home',
1527 1, 2, 3, 4, 5, 6, 7, 8
1530 INSERT INTO permission.perm_list (
1531 id, code, description
1534 'ADMIN_HOLD_CAPTURE_SORT',
1537 'Allows a user to make changes to best-hold selection sort order',
1543 INSERT INTO config.org_unit_setting_type (
1544 name, label, description, datatype, fm_class, update_perm, grp
1546 'circ.hold_capture_order',
1548 'circ.hold_capture_order',
1549 'Best-hold selection sort order',
1554 'circ.hold_capture_order',
1555 'Defines the sort order of holds when selecting a hold to fill using a given copy at capture time',
1565 INSERT INTO config.org_unit_setting_type (
1566 name, label, description, datatype, update_perm, grp
1568 'circ.hold_go_home_interval',
1570 'circ.hold_go_home_interval',
1571 'Max foreign-circulation time',
1576 'circ.hold_go_home_interval',
1577 'Time a copy can spend circulating away from its circ lib before returning there to fill a hold (if one exists there)',
1586 INSERT INTO actor.org_unit_setting (
1587 org_unit, name, value
1589 (SELECT id FROM actor.org_unit WHERE parent_ou IS NULL),
1590 'circ.hold_go_home_interval',
1594 UPDATE actor.org_unit_setting SET
1595 name = 'circ.hold_capture_order',
1596 value = (SELECT id FROM config.best_hold_order WHERE name = 'FIFO')
1598 name = 'circ.holds_fifo' AND value ILIKE '%true%';
1601 SELECT evergreen.upgrade_deps_block_check('0762', :eg_version);
1603 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_browse_indexing');
1604 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_search_indexing');
1605 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_facet_indexing');
1607 CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
1610 ind_data metabib.field_entry_template%ROWTYPE;
1611 mbe_row metabib.browse_entry%ROWTYPE;
1618 SELECT COALESCE(NULLIF(skip_facet, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_facet_indexing' AND enabled)) INTO b_skip_facet;
1619 SELECT COALESCE(NULLIF(skip_browse, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_browse_indexing' AND enabled)) INTO b_skip_browse;
1620 SELECT COALESCE(NULLIF(skip_search, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_search_indexing' AND enabled)) INTO b_skip_search;
1622 PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
1624 IF NOT b_skip_search THEN
1625 FOR fclass IN SELECT * FROM config.metabib_class LOOP
1626 -- RAISE NOTICE 'Emptying out %', fclass.name;
1627 EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
1630 IF NOT b_skip_facet THEN
1631 DELETE FROM metabib.facet_entry WHERE source = bib_id;
1633 IF NOT b_skip_browse THEN
1634 DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
1638 FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
1639 IF ind_data.field < 0 THEN
1640 ind_data.field = -1 * ind_data.field;
1643 IF ind_data.facet_field AND NOT b_skip_facet THEN
1644 INSERT INTO metabib.facet_entry (field, source, value)
1645 VALUES (ind_data.field, ind_data.source, ind_data.value);
1648 IF ind_data.browse_field AND NOT b_skip_browse THEN
1649 -- A caveat about this SELECT: this should take care of replacing
1650 -- old mbe rows when data changes, but not if normalization (by
1651 -- which I mean specifically the output of
1652 -- evergreen.oils_tsearch2()) changes. It may or may not be
1653 -- expensive to add a comparison of index_vector to index_vector
1654 -- to the WHERE clause below.
1655 SELECT INTO mbe_row * FROM metabib.browse_entry WHERE value = ind_data.value;
1657 mbe_id := mbe_row.id;
1659 INSERT INTO metabib.browse_entry (value) VALUES
1660 (metabib.browse_normalize(ind_data.value, ind_data.field));
1661 mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
1664 INSERT INTO metabib.browse_entry_def_map (entry, def, source)
1665 VALUES (mbe_id, ind_data.field, ind_data.source);
1668 IF ind_data.search_field AND NOT b_skip_search THEN
1670 INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
1672 quote_literal(ind_data.field) || $$, $$ ||
1673 quote_literal(ind_data.source) || $$, $$ ||
1674 quote_literal(ind_data.value) ||
1680 IF NOT b_skip_search THEN
1681 PERFORM metabib.update_combined_index_vectors(bib_id);
1686 $func$ LANGUAGE PLPGSQL;
1689 SELECT evergreen.upgrade_deps_block_check('0763', :eg_version);
1691 INSERT INTO config.org_unit_setting_type (
1692 name, label, grp, datatype
1694 'circ.fines.truncate_to_max_fine',
1695 'Truncate fines to max fine amount',
1702 SELECT evergreen.upgrade_deps_block_check('0765', :eg_version);
1704 ALTER TABLE acq.provider
1705 ADD COLUMN default_copy_count INTEGER NOT NULL DEFAULT 0;
1708 SELECT evergreen.upgrade_deps_block_check('0768', :eg_version);
1710 CREATE OR REPLACE FUNCTION evergreen.rank_ou(lib INT, search_lib INT, pref_lib INT DEFAULT NULL)
1711 RETURNS INTEGER AS $$
1714 -- lib matches search_lib
1715 (SELECT CASE WHEN $1 = $2 THEN -20000 END),
1717 -- lib matches pref_lib
1718 (SELECT CASE WHEN $1 = $3 THEN -10000 END),
1721 -- pref_lib is a child of search_lib and lib is a child of pref lib.
1722 (SELECT distance - 5000
1723 FROM actor.org_unit_descendants_distance($3)
1724 WHERE id = $1 AND $3 IN (
1725 SELECT id FROM actor.org_unit_descendants($2))),
1727 -- lib is a child of search_lib
1728 (SELECT distance FROM actor.org_unit_descendants_distance($2) WHERE id = $1),
1730 -- all others pay cash
1733 $$ LANGUAGE SQL STABLE;
1738 SELECT evergreen.upgrade_deps_block_check('0769', :eg_version);
1740 DROP FUNCTION IF EXISTS
1741 evergreen.ranked_volumes(BIGINT, INT, INT, HSTORE, HSTORE, INT);
1743 CREATE OR REPLACE FUNCTION evergreen.ranked_volumes(
1746 depth INT DEFAULT NULL,
1747 slimit HSTORE DEFAULT NULL,
1748 soffset HSTORE DEFAULT NULL,
1749 pref_lib INT DEFAULT NULL,
1750 includes TEXT[] DEFAULT NULL::TEXT[]
1751 ) RETURNS TABLE (id BIGINT, name TEXT, label_sortkey TEXT, rank BIGINT) AS $$
1752 SELECT ua.id, ua.name, ua.label_sortkey, MIN(ua.rank) AS rank FROM (
1753 SELECT acn.id, aou.name, acn.label_sortkey,
1754 evergreen.rank_ou(aou.id, $2, $6), evergreen.rank_cp_status(acp.status),
1756 FROM asset.call_number acn
1757 JOIN asset.copy acp ON (acn.id = acp.call_number)
1758 JOIN actor.org_unit_descendants( $2, COALESCE(
1761 FROM actor.org_unit_type aout
1762 INNER JOIN actor.org_unit ou ON ou_type = aout.id
1765 ) AS aou ON (acp.circ_lib = aou.id)
1766 WHERE acn.record = $1
1767 AND acn.deleted IS FALSE
1768 AND acp.deleted IS FALSE
1769 AND CASE WHEN ('exclude_invisible_acn' = ANY($7)) THEN
1772 FROM asset.opac_visible_copies
1773 WHERE copy_id = acp.id AND record = acn.record
1775 GROUP BY acn.id, acp.status, aou.name, acn.label_sortkey, aou.id
1777 ORDER BY evergreen.rank_ou(aou.id, $2, $6), evergreen.rank_cp_status(acp.status)
1780 GROUP BY ua.id, ua.name, ua.label_sortkey
1781 ORDER BY rank, ua.name, ua.label_sortkey
1782 LIMIT ($4 -> 'acn')::INT
1783 OFFSET ($5 -> 'acn')::INT;
1785 LANGUAGE SQL STABLE;
1787 CREATE OR REPLACE FUNCTION unapi.holdings_xml (
1791 depth INT DEFAULT NULL,
1792 includes TEXT[] DEFAULT NULL::TEXT[],
1793 slimit HSTORE DEFAULT NULL,
1794 soffset HSTORE DEFAULT NULL,
1795 include_xmlns BOOL DEFAULT TRUE,
1796 pref_lib INT DEFAULT NULL
1802 CASE WHEN $8 THEN 'http://open-ils.org/spec/holdings/v1' ELSE NULL END AS xmlns,
1803 CASE WHEN ('bre' = ANY ($5)) THEN 'tag:open-ils.org:U2@bre/' || $1 || '/' || $3 ELSE NULL END AS id,
1804 (SELECT record_has_holdable_copy FROM asset.record_has_holdable_copy($1)) AS has_holdable
1808 (SELECT XMLAGG(XMLELEMENT::XML) FROM (
1811 XMLATTRIBUTES('public' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1813 FROM asset.opac_ou_record_copy_count($2, $1)
1817 XMLATTRIBUTES('staff' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1819 FROM asset.staff_ou_record_copy_count($2, $1)
1823 XMLATTRIBUTES('pref_lib' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1825 FROM asset.opac_ou_record_copy_count($9, $1)
1830 WHEN ('bmp' = ANY ($5)) THEN
1832 name monograph_parts,
1833 (SELECT XMLAGG(bmp) FROM (
1834 SELECT unapi.bmp( id, 'xml', 'monograph_part', evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'bre'), 'holdings_xml'), $3, $4, $6, $7, FALSE)
1835 FROM biblio.monograph_part
1843 (SELECT XMLAGG(acn ORDER BY rank, name, label_sortkey) FROM (
1845 SELECT unapi.acn(y.id,'xml','volume',evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'holdings_xml'),'bre'), $3, $4, $6, $7, FALSE), y.rank, name, label_sortkey
1846 FROM evergreen.ranked_volumes($1, $2, $4, $6, $7, $9, $5) AS y
1849 SELECT unapi.acn(uris.id,'xml','volume',evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'holdings_xml'),'bre'), $3, $4, $6, $7, FALSE), 0, name, label_sortkey
1850 FROM evergreen.located_uris($1, $2, $9) AS uris
1853 CASE WHEN ('ssub' = ANY ($5)) THEN
1856 (SELECT XMLAGG(ssub) FROM (
1857 SELECT unapi.ssub(id,'xml','subscription','{}'::TEXT[], $3, $4, $6, $7, FALSE)
1858 FROM serial.subscription
1859 WHERE record_entry = $1
1863 CASE WHEN ('acp' = ANY ($5)) THEN
1865 name foreign_copies,
1866 (SELECT XMLAGG(acp) FROM (
1867 SELECT unapi.acp(p.target_copy,'xml','copy',evergreen.array_remove_item_by_value($5,'acp'), $3, $4, $6, $7, FALSE)
1868 FROM biblio.peer_bib_copy_map p
1869 JOIN asset.copy c ON (p.target_copy = c.id)
1870 WHERE NOT c.deleted AND p.peer_record = $1
1871 LIMIT ($6 -> 'acp')::INT
1872 OFFSET ($7 -> 'acp')::INT
1877 $F$ LANGUAGE SQL STABLE;
1881 SELECT evergreen.upgrade_deps_block_check('0771', :eg_version);
1883 INSERT INTO action_trigger.hook (
1891 'A user was barred by staff',
1895 INSERT INTO action_trigger.hook (
1903 'A user was un-barred by staff',
1907 INSERT INTO action_trigger.validator (
1912 'Tests if a patron is currently marked as barred'
1915 INSERT INTO action_trigger.validator (
1920 'Tests if a patron is currently not marked as barred'
1924 SELECT evergreen.upgrade_deps_block_check('0772', :eg_version);
1926 INSERT INTO config.internal_flag (name) VALUES ('ingest.metarecord_mapping.preserve_on_delete'); -- defaults to false/off
1928 DROP RULE IF EXISTS protect_bib_rec_delete ON biblio.record_entry;
1929 CREATE RULE protect_bib_rec_delete AS
1930 ON DELETE TO biblio.record_entry DO INSTEAD (
1931 UPDATE biblio.record_entry
1933 WHERE OLD.id = biblio.record_entry.id
1937 -- AFTER UPDATE OR INSERT trigger for biblio.record_entry
1938 CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
1940 transformed_xml TEXT;
1943 xfrm config.xml_transform%ROWTYPE;
1945 new_attrs HSTORE := ''::HSTORE;
1946 attr_def config.record_attr_definition%ROWTYPE;
1949 IF NEW.deleted IS TRUE THEN -- If this bib is deleted
1950 PERFORM * FROM config.internal_flag WHERE
1951 name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
1953 -- One needs to keep these around to support searches
1954 -- with the #deleted modifier, so one should turn on the named
1955 -- internal flag for that functionality.
1956 DELETE FROM metabib.metarecord_source_map WHERE source = NEW.id;
1957 DELETE FROM metabib.record_attr WHERE id = NEW.id;
1960 DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
1961 DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
1962 DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
1963 RETURN NEW; -- and we're done
1966 IF TG_OP = 'UPDATE' THEN -- re-ingest?
1967 PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
1969 IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
1974 -- Record authority linking
1975 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
1977 PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
1980 -- Flatten and insert the mfr data
1981 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
1983 PERFORM metabib.reingest_metabib_full_rec(NEW.id);
1985 -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
1986 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
1988 FOR attr_def IN SELECT * FROM config.record_attr_definition ORDER BY format LOOP
1990 IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection
1991 SELECT ARRAY_TO_STRING(ARRAY_ACCUM(value), COALESCE(attr_def.joiner,' ')) INTO attr_value
1992 FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x
1993 WHERE record = NEW.id
1994 AND tag LIKE attr_def.tag
1996 WHEN attr_def.sf_list IS NOT NULL
1997 THEN POSITION(subfield IN attr_def.sf_list) > 0
2004 ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field
2005 attr_value := biblio.marc21_extract_fixed_field(NEW.id, attr_def.fixed_field);
2007 ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression
2009 SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format;
2011 -- See if we can skip the XSLT ... it's expensive
2012 IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN
2013 -- Can't skip the transform
2014 IF xfrm.xslt <> '---' THEN
2015 transformed_xml := oils_xslt_process(NEW.marc,xfrm.xslt);
2017 transformed_xml := NEW.marc;
2020 prev_xfrm := xfrm.name;
2023 IF xfrm.name IS NULL THEN
2024 -- just grab the marcxml (empty) transform
2025 SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1;
2026 prev_xfrm := xfrm.name;
2029 attr_value := oils_xpath_string(attr_def.xpath, transformed_xml, COALESCE(attr_def.joiner,' '), ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]]);
2031 ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map
2032 SELECT m.value INTO attr_value
2033 FROM biblio.marc21_physical_characteristics(NEW.id) v
2034 JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value)
2035 WHERE v.subfield = attr_def.phys_char_sf
2036 LIMIT 1; -- Just in case ...
2040 -- apply index normalizers to attr_value
2042 SELECT n.func AS func,
2043 n.param_count AS param_count,
2045 FROM config.index_normalizer n
2046 JOIN config.record_attr_index_norm_map m ON (m.norm = n.id)
2047 WHERE attr = attr_def.name
2049 EXECUTE 'SELECT ' || normalizer.func || '(' ||
2050 COALESCE( quote_literal( attr_value ), 'NULL' ) ||
2052 WHEN normalizer.param_count > 0
2053 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
2056 ')' INTO attr_value;
2060 -- Add the new value to the hstore
2061 new_attrs := new_attrs || hstore( attr_def.name, attr_value );
2065 IF TG_OP = 'INSERT' OR OLD.deleted THEN -- initial insert OR revivication
2066 INSERT INTO metabib.record_attr (id, attrs) VALUES (NEW.id, new_attrs);
2068 UPDATE metabib.record_attr SET attrs = new_attrs WHERE id = NEW.id;
2074 -- Gather and insert the field entry data
2075 PERFORM metabib.reingest_metabib_field_entries(NEW.id);
2077 -- Located URI magic
2078 IF TG_OP = 'INSERT' THEN
2079 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
2081 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
2084 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
2086 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
2090 -- (re)map metarecord-bib linking
2091 IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
2092 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
2094 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
2096 ELSE -- we're doing an update, and we're not deleted, remap
2097 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
2099 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
2105 $func$ LANGUAGE PLPGSQL;
2108 -- Evergreen DB patch xxxx.data.authority_thesaurus_update.sql
2111 -- check whether patch can be applied
2112 SELECT evergreen.upgrade_deps_block_check('0773', :eg_version);
2115 INSERT INTO authority.thesaurus (code, name, control_set) VALUES
2116 (' ', oils_i18n_gettext(' ','Alternate no attempt to code','at','name'), NULL);
2120 SELECT evergreen.upgrade_deps_block_check('0774', :eg_version);
2122 CREATE TABLE config.z3950_source_credentials (
2123 id SERIAL PRIMARY KEY,
2124 owner INTEGER NOT NULL REFERENCES actor.org_unit(id),
2125 source TEXT NOT NULL REFERENCES config.z3950_source(name) ON DELETE CASCADE ON UPDATE CASCADE DEFERRABLE INITIALLY DEFERRED,
2126 -- do some Z servers require a username but no password or vice versa?
2129 CONSTRAINT czsc_source_once_per_lib UNIQUE (source, owner)
2132 -- find the most relevant set of credentials for the Z source and org
2133 CREATE OR REPLACE FUNCTION config.z3950_source_credentials_lookup
2134 (source TEXT, owner INTEGER)
2135 RETURNS config.z3950_source_credentials AS $$
2138 FROM config.z3950_source_credentials creds
2139 JOIN actor.org_unit aou ON (aou.id = creds.owner)
2140 JOIN actor.org_unit_type aout ON (aout.id = aou.ou_type)
2141 WHERE creds.source = $1 AND creds.owner IN (
2142 SELECT id FROM actor.org_unit_ancestors($2)
2144 ORDER BY aout.depth DESC LIMIT 1;
2146 $$ LANGUAGE SQL STABLE;
2148 -- since we are not exposing config.z3950_source_credentials
2149 -- via the IDL, providing a stored proc gives us a way to
2150 -- set values in the table via cstore
2151 CREATE OR REPLACE FUNCTION config.z3950_source_credentials_apply
2152 (src TEXT, org INTEGER, uname TEXT, passwd TEXT)
2155 PERFORM 1 FROM config.z3950_source_credentials
2156 WHERE owner = org AND source = src;
2159 IF COALESCE(uname, '') = '' AND COALESCE(passwd, '') = '' THEN
2160 DELETE FROM config.z3950_source_credentials
2161 WHERE owner = org AND source = src;
2163 UPDATE config.z3950_source_credentials
2164 SET username = uname, password = passwd
2165 WHERE owner = org AND source = src;
2168 IF COALESCE(uname, '') <> '' OR COALESCE(passwd, '') <> '' THEN
2169 INSERT INTO config.z3950_source_credentials
2170 (source, owner, username, password)
2171 VALUES (src, org, uname, passwd);
2175 $$ LANGUAGE PLPGSQL;
2180 SELECT evergreen.upgrade_deps_block_check('0775', :eg_version);
2182 ALTER TABLE config.z3950_attr
2183 DROP CONSTRAINT IF EXISTS z3950_attr_source_fkey,
2184 ADD CONSTRAINT z3950_attr_source_fkey
2185 FOREIGN KEY (source)
2186 REFERENCES config.z3950_source(name)
2189 DEFERRABLE INITIALLY DEFERRED;
2192 SELECT evergreen.upgrade_deps_block_check('0776', :eg_version);
2194 ALTER TABLE acq.lineitem_attr
2195 ADD COLUMN order_ident BOOLEAN NOT NULL DEFAULT FALSE;
2197 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
2199 'ACQ_ADD_LINEITEM_IDENTIFIER',
2202 'When granted, newly added lineitem identifiers will propagate to linked bib records',
2208 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
2210 'ACQ_SET_LINEITEM_IDENTIFIER',
2213 'Allows staff to change the lineitem identifier',
2220 SELECT evergreen.upgrade_deps_block_check('0777', :eg_version);
2222 -- Listed here for reference / ease of access. The update
2223 -- is not applied here (see the WHERE clause).
2224 UPDATE action_trigger.event_definition SET template =
2228 # extract some commonly used variables
2230 VENDOR_SAN = target.provider.san;
2231 VENDCODE = target.provider.edi_default.vendcode;
2232 VENDACCT = target.provider.edi_default.vendacct;
2233 ORG_UNIT_SAN = target.ordering_agency.mailing_address.san;
2235 # set the vendor / provider
2237 VENDOR_BT = 0; # Baker & Taylor
2240 VENDOR_MW_TAPE = 0; # Midwest Tape
2241 VENDOR_RB = 0; # Recorded Books
2242 VENDOR_ULS = 0; # ULS
2244 IF VENDOR_SAN == '1556150'; VENDOR_BT = 1;
2245 ELSIF VENDOR_SAN == '1697684'; VENDOR_BRODART = 1;
2246 ELSIF VENDOR_SAN == '1697978'; VENDOR_INGRAM = 1;
2247 ELSIF VENDOR_SAN == '2549913'; VENDOR_MW_TAPE = 1;
2248 ELSIF VENDOR_SAN == '1113984'; VENDOR_RB = 1;
2249 ELSIF VENDOR_SAN == '1699342'; VENDOR_ULS = 1;
2252 # if true, pass the PO name as a secondary identifier
2253 # RFF+LI:<name>/li_id
2259 # GIR configuration --------------------------------------
2261 INC_COPIES = 1; # copies on/off switch
2266 INC_COLLECTION_CODE = 1;
2281 INC_COLLECTION_CODE = 0;
2285 # END GIR configuration ---------------------------------
2288 [%- BLOCK big_block -%]
2290 "recipient":"[% VENDOR_SAN %]",
2291 "sender":"[% ORG_UNIT_SAN %]",
2293 "ORDERS":[ "order", {
2295 "po_number":[% target.id %],
2297 [% IF INC_PO_NAME %]
2298 "po_name":"[% target.name | replace('\/', ' ') | replace('"', '\"') %]",
2301 "date":"[% date.format(date.now, '%Y%m%d') %]",
2305 {"id-qualifier": 91, "id":"[% ORG_UNIT_SAN %] [% VENDCODE %]"}
2307 {"id":"[% ORG_UNIT_SAN %]"},
2308 {"id-qualifier": 91, "id":"[% VENDACCT %]"}
2314 {"id-qualifier": 92, "id":"[% target.provider.id %]"}
2317 "currency":"[% target.provider.currency_type %]",
2320 [%- FOR li IN target.lineitems %]
2322 "line_index":"[% li.id %]",
2326 idqual = 'EN'; # default ISBN/UPC/EAN-13
2327 ident_attr = helpers.get_li_order_ident(li.attributes);
2329 idname = ident_attr.attr_name;
2330 idval = ident_attr.attr_value;
2331 IF idname == 'isbn' AND idval.length != 13;
2333 ELSIF idname == 'issn';
2340 {"id-qualifier":"[% idqual %]","id":"[% idval %]"}
2342 "price":[% li.estimated_unit_price || '0.00' %],
2344 {"BTI":"[% helpers.get_li_attr_jedi('title', '', li.attributes) %]"},
2345 {"BPU":"[% helpers.get_li_attr_jedi('publisher', '', li.attributes) %]"},
2346 {"BPD":"[% helpers.get_li_attr_jedi('pubdate', '', li.attributes) %]"},
2347 [% IF VENDOR_ULS -%]
2348 {"BEN":"[% helpers.get_li_attr_jedi('edition', '', li.attributes) %]"},
2349 {"BAU":"[% helpers.get_li_attr_jedi('author', '', li.attributes) %]"}
2351 {"BPH":"[% helpers.get_li_attr_jedi('pagination','', li.attributes) %]"}
2355 FOR note IN li.lineitem_notes;
2356 NEXT UNLESS note.vendor_public == 't';
2357 ftx_vals.push(note.value);
2359 IF VENDOR_BRODART; # look for copy-level spec code
2360 FOR lid IN li.lineitem_details;
2362 spec_note = lid.note.match('spec code ([a-zA-Z0-9_])');
2363 IF spec_note.0; ftx_vals.push(spec_note.0); END;
2367 IF xtra_ftx; ftx_vals.unshift(xtra_ftx); END;
2369 # BT & ULS want FTX+LIN for every LI, even if empty
2370 IF ((VENDOR_BT OR VENDOR_ULS) AND ftx_vals.size == 0);
2371 ftx_vals.unshift('');
2376 [% FOR note IN ftx_vals -%] "[% note %]"[% UNLESS loop.last %], [% END %][% END %]
2379 "quantity":[% li.lineitem_details.size %],
2381 [%- IF INC_COPIES -%]
2383 [%- compressed_copies = [];
2384 FOR lid IN li.lineitem_details;
2385 fund = lid.fund.code;
2386 item_type = lid.circ_modifier;
2387 callnumber = lid.cn_label;
2388 owning_lib = lid.owning_lib.shortname;
2389 location = lid.location;
2390 collection_code = lid.collection_code;
2392 # when we have real copy data, treat it as authoritative for some fields
2393 acp = lid.eg_copy_id;
2395 item_type = acp.circ_modifier;
2396 callnumber = acp.call_number.label;
2397 location = acp.location.name;
2401 # collapse like copies into groups w/ quantity
2404 IF !INC_COPY_ID; # INC_COPY_ID implies 1 copy per GIR
2405 FOR copy IN compressed_copies;
2406 IF (fund == copy.fund OR (!fund AND !copy.fund)) AND
2407 (item_type == copy.item_type OR (!item_type AND !copy.item_type)) AND
2408 (callnumber == copy.callnumber OR (!callnumber AND !copy.callnumber)) AND
2409 (owning_lib == copy.owning_lib OR (!owning_lib AND !copy.owning_lib)) AND
2410 (location == copy.location OR (!location AND !copy.location)) AND
2411 (collection_code == copy.collection_code OR (!collection_code AND !copy.collection_code));
2413 copy.quantity = copy.quantity + 1;
2420 compressed_copies.push({
2422 item_type => item_type,
2423 callnumber => callnumber,
2424 owning_lib => owning_lib,
2425 location => location,
2426 collection_code => collection_code,
2427 copy_id => lid.id, # for INC_COPY_ID
2432 FOR copy IN compressed_copies;
2434 # If we assume owning_lib is required and set,
2435 # it is safe to prepend each following copy field w/ a ","
2437 # B&T EDI requires expected GIR fields to be
2438 # present regardless of whether a value exists.
2439 # some fields are required to have a value in ACQ,
2440 # though, so they are not forced into place below.
2442 %]{[%- IF INC_OWNING_LIB AND copy.owning_lib %] "owning_lib":"[% copy.owning_lib %]"[% END -%]
2443 [%- IF INC_FUND AND copy.fund %],"fund":"[% copy.fund %]"[% END -%]
2444 [%- IF INC_CALLNUMBER AND (VENDOR_BT OR copy.callnumber) %],"call_number":"[% copy.callnumber %]"[% END -%]
2445 [%- IF INC_ITEM_TYPE AND (VENDOR_BT OR copy.item_type) %],"item_type":"[% copy.item_type %]"[% END -%]
2446 [%- IF INC_LOCATION AND copy.location %],"copy_location":"[% copy.location %]"[% END -%]
2447 [%- IF INC_COLLECTION_CODE AND (VENDOR_BT OR copy.collection_code) %],"collection_code":"[% copy.collection_code %]"[% END -%]
2448 [%- IF INC_QUANTITY %],"quantity":"[% copy.quantity %]"[% END -%]
2449 [%- IF INC_COPY_ID %],"copy_id":"[% copy.copy_id %]" [% END %]}[% ',' UNLESS loop.last -%]
2450 [%- END -%] [%# FOR compressed_copies -%]
2452 [%- END -%] [%# IF INC_COPIES %]
2454 }[% UNLESS loop.last %],[% END -%]
2456 [% END %] [%# END lineitems %]
2458 "line_items":[% target.lineitems.size %]
2459 }] [%# close ORDERS array %]
2460 }] [%# close body array %]
2463 [% tempo = PROCESS big_block; helpers.escape_json(tempo) %]
2465 WHERE ID = 23 AND FALSE; -- remove 'AND FALSE' to apply this update
2468 -- lineitem worksheet
2469 UPDATE action_trigger.event_definition SET template =
2473 # find a lineitem attribute by name and optional type
2475 FOR attr IN li.attributes;
2476 IF attr.attr_name == attr_name;
2477 IF !attr_type OR attr_type == attr.attr_type;
2486 <h2>Purchase Order [% target.id %]</h2>
2488 date <b>[% date.format(date.now, '%Y%m%d') %]</b>
2492 table td { padding:5px; border:1px solid #aaa;}
2493 table { width:95%; border-collapse:collapse; }
2494 #vendor-notes { padding:5px; border:1px solid #aaa; }
2496 <table id='vendor-table'>
2498 <td valign='top'>Vendor</td>
2500 <div>[% target.provider.name %]</div>
2501 <div>[% target.provider.addresses.0.street1 %]</div>
2502 <div>[% target.provider.addresses.0.street2 %]</div>
2503 <div>[% target.provider.addresses.0.city %]</div>
2504 <div>[% target.provider.addresses.0.state %]</div>
2505 <div>[% target.provider.addresses.0.country %]</div>
2506 <div>[% target.provider.addresses.0.post_code %]</div>
2508 <td valign='top'>Ship to / Bill to</td>
2510 <div>[% target.ordering_agency.name %]</div>
2511 <div>[% target.ordering_agency.billing_address.street1 %]</div>
2512 <div>[% target.ordering_agency.billing_address.street2 %]</div>
2513 <div>[% target.ordering_agency.billing_address.city %]</div>
2514 <div>[% target.ordering_agency.billing_address.state %]</div>
2515 <div>[% target.ordering_agency.billing_address.country %]</div>
2516 <div>[% target.ordering_agency.billing_address.post_code %]</div>
2522 <fieldset id='vendor-notes'>
2523 <legend>Notes to the Vendor</legend>
2525 [% FOR note IN target.notes %]
2526 [% IF note.vendor_public == 't' %]
2527 <li>[% note.value %]</li>
2538 <th>ISBN or Item #</th>
2549 [% FOR li IN target.lineitems %]
2552 [% count = li.lineitem_details.size %]
2553 [% price = li.estimated_unit_price %]
2554 [% litotal = (price * count) %]
2555 [% subtotal = subtotal + litotal %]
2557 ident_attr = helpers.get_li_order_ident(li.attributes);
2558 SET ident_value = ident_attr.attr_value IF ident_attr;
2560 <td>[% target.id %]</td>
2561 <td>[% ident_value %]</td>
2562 <td>[% PROCESS get_li_attr attr_name = 'title' %]</td>
2563 <td>[% count %]</td>
2564 <td>[% price %]</td>
2565 <td>[% litotal %]</td>
2568 [% FOR note IN li.lineitem_notes %]
2569 [% IF note.vendor_public == 't' %]
2570 <li>[% note.value %]</li>
2578 <td/><td/><td/><td/>
2580 <td>[% subtotal %]</td>
2587 Total Line Item Count: [% target.lineitems.size %]
2589 WHERE ID = 4; -- PO HTML
2592 SELECT evergreen.upgrade_deps_block_check('0778', :eg_version);
2594 CREATE OR REPLACE FUNCTION extract_marc_field_set
2595 (TEXT, BIGINT, TEXT, TEXT) RETURNS SETOF TEXT AS $$
2603 FROM oils_xpath_table(
2604 'id', 'marc', $1, $3, 'id = ' || $2)
2605 AS t(id int, t text))x
2607 IF $4 IS NOT NULL THEN
2608 SELECT INTO output (SELECT regexp_replace(output, $4, '', 'g'));
2614 $$ LANGUAGE PLPGSQL IMMUTABLE;
2617 CREATE OR REPLACE FUNCTION
2618 public.extract_acq_marc_field_set ( BIGINT, TEXT, TEXT)
2619 RETURNS SETOF TEXT AS $$
2620 SELECT extract_marc_field_set('acq.lineitem', $1, $2, $3);
2624 CREATE OR REPLACE FUNCTION public.ingest_acq_marc ( ) RETURNS TRIGGER AS $function$
2633 FOR adef IN SELECT *,tableoid FROM acq.lineitem_attr_definition LOOP
2635 SELECT relname::TEXT INTO atype FROM pg_class WHERE oid = adef.tableoid;
2637 IF (atype NOT IN ('lineitem_usr_attr_definition','lineitem_local_attr_definition')) THEN
2638 IF (atype = 'lineitem_provider_attr_definition') THEN
2639 SELECT provider INTO prov FROM acq.lineitem_provider_attr_definition WHERE id = adef.id;
2640 CONTINUE WHEN NEW.provider IS NULL OR prov <> NEW.provider;
2643 IF (atype = 'lineitem_provider_attr_definition') THEN
2644 SELECT xpath INTO xpath_string FROM acq.lineitem_provider_attr_definition WHERE id = adef.id;
2645 ELSIF (atype = 'lineitem_marc_attr_definition') THEN
2646 SELECT xpath INTO xpath_string FROM acq.lineitem_marc_attr_definition WHERE id = adef.id;
2647 ELSIF (atype = 'lineitem_generated_attr_definition') THEN
2648 SELECT xpath INTO xpath_string FROM acq.lineitem_generated_attr_definition WHERE id = adef.id;
2651 xpath_string := REGEXP_REPLACE(xpath_string,$re$//?text\(\)$$re$,'');
2653 IF (adef.code = 'title' OR adef.code = 'author') THEN
2654 -- title and author should not be split
2655 -- FIXME: once oils_xpath can grok XPATH 2.0 functions, we can use
2656 -- string-join in the xpath and remove this special case
2657 SELECT extract_acq_marc_field(id, xpath_string, adef.remove) INTO value FROM acq.lineitem WHERE id = NEW.id;
2658 IF (value IS NOT NULL AND value <> '') THEN
2659 INSERT INTO acq.lineitem_attr (lineitem, definition, attr_type, attr_name, attr_value)
2660 VALUES (NEW.id, adef.id, atype, adef.code, value);
2665 -- each application of the regex may produce multiple values
2667 SELECT * FROM extract_acq_marc_field_set(
2668 NEW.id, xpath_string || '[' || pos || ']', adef.remove)
2671 IF (value IS NOT NULL AND value <> '') THEN
2672 INSERT INTO acq.lineitem_attr
2673 (lineitem, definition, attr_type, attr_name, attr_value)
2674 VALUES (NEW.id, adef.id, atype, adef.code, value);
2692 $function$ LANGUAGE PLPGSQL;
2695 SELECT evergreen.upgrade_deps_block_check('0779', :eg_version);
2697 CREATE TABLE vandelay.import_bib_trash_group(
2698 id SERIAL PRIMARY KEY,
2699 owner INT NOT NULL REFERENCES actor.org_unit(id),
2700 label TEXT NOT NULL, --i18n
2701 always_apply BOOLEAN NOT NULL DEFAULT FALSE,
2702 CONSTRAINT vand_import_bib_trash_grp_owner_label UNIQUE (owner, label)
2705 -- otherwise, the ALTER TABLE statement below
2706 -- will fail with pending trigger events.
2707 SET CONSTRAINTS ALL IMMEDIATE;
2709 ALTER TABLE vandelay.import_bib_trash_fields
2710 -- allow null-able for now..
2711 ADD COLUMN grp INTEGER REFERENCES vandelay.import_bib_trash_group;
2713 -- add any existing trash_fields to "Legacy" groups (one per unique field
2714 -- owner) as part of the upgrade, since grp is now required.
2715 -- note that vandelay.import_bib_trash_fields was never used before,
2716 -- so in most cases this should be a no-op.
2718 INSERT INTO vandelay.import_bib_trash_group (owner, label)
2719 SELECT DISTINCT(owner), 'Legacy' FROM vandelay.import_bib_trash_fields;
2721 UPDATE vandelay.import_bib_trash_fields field SET grp = tgroup.id
2722 FROM vandelay.import_bib_trash_group tgroup
2723 WHERE tgroup.owner = field.owner;
2725 ALTER TABLE vandelay.import_bib_trash_fields
2726 -- now that have values, we can make this non-null
2727 ALTER COLUMN grp SET NOT NULL,
2728 -- drop outdated constraint
2729 DROP CONSTRAINT IF EXISTS vand_import_bib_trash_fields_idx,
2730 -- owner is implied by the grp
2732 -- make grp+field unique
2733 ADD CONSTRAINT vand_import_bib_trash_fields_once_per UNIQUE (grp, field);
2736 SELECT evergreen.upgrade_deps_block_check('0780', :eg_version);
2738 ALTER TABLE acq.distribution_formula_entry
2739 ADD COLUMN fund INT REFERENCES acq.fund (id),
2740 ADD COLUMN circ_modifier TEXT REFERENCES config.circ_modifier (code),
2741 ADD COLUMN collection_code TEXT ;
2744 -- support option to roll distribution formula funds
2745 CREATE OR REPLACE FUNCTION acq.rollover_funds_by_org_tree(
2748 org_unit_id INTEGER,
2749 encumb_only BOOL DEFAULT FALSE,
2750 include_desc BOOL DEFAULT TRUE
2751 ) RETURNS VOID AS $$
2755 new_year INT := old_year + 1;
2758 xfer_amount NUMERIC := 0;
2762 roll_distrib_forms BOOL;
2768 IF old_year IS NULL THEN
2769 RAISE EXCEPTION 'Input year argument is NULL';
2770 ELSIF old_year NOT BETWEEN 2008 and 2200 THEN
2771 RAISE EXCEPTION 'Input year is out of range';
2774 IF user_id IS NULL THEN
2775 RAISE EXCEPTION 'Input user id argument is NULL';
2778 IF org_unit_id IS NULL THEN
2779 RAISE EXCEPTION 'Org unit id argument is NULL';
2782 -- Validate the org unit
2787 WHERE id = org_unit_id;
2789 IF org_found IS NULL THEN
2790 RAISE EXCEPTION 'Org unit id % is invalid', org_unit_id;
2791 ELSIF encumb_only THEN
2792 SELECT INTO perm_ous value::BOOL FROM
2793 actor.org_unit_ancestor_setting(
2794 'acq.fund.allow_rollover_without_money', org_unit_id
2796 IF NOT FOUND OR NOT perm_ous THEN
2797 RAISE EXCEPTION 'Encumbrance-only rollover not permitted at org %', org_unit_id;
2802 -- Loop over the propagable funds to identify the details
2803 -- from the old fund plus the id of the new one, if it exists.
2807 oldf.id AS old_fund,
2813 newf.id AS new_fund_id
2816 LEFT JOIN acq.fund AS newf
2817 ON ( oldf.code = newf.code )
2819 oldf.year = old_year
2821 AND newf.year = new_year
2822 AND ( ( include_desc AND oldf.org IN ( SELECT id FROM actor.org_unit_descendants( org_unit_id ) ) )
2823 OR (NOT include_desc AND oldf.org = org_unit_id ) )
2825 --RAISE NOTICE 'Processing fund %', roll_fund.old_fund;
2827 IF roll_fund.new_fund_id IS NULL THEN
2829 -- The old fund hasn't been propagated yet. Propagate it now.
2831 INSERT INTO acq.fund (
2839 balance_warning_percent,
2840 balance_stop_percent
2845 roll_fund.currency_type,
2849 roll_fund.balance_warning_percent,
2850 roll_fund.balance_stop_percent
2852 RETURNING id INTO new_fund;
2854 new_fund = roll_fund.new_fund_id;
2857 -- Determine the amount to transfer
2861 FROM acq.fund_spent_balance
2862 WHERE fund = roll_fund.old_fund;
2864 IF xfer_amount <> 0 THEN
2865 IF NOT encumb_only AND roll_fund.rollover THEN
2867 -- Transfer balance from old fund to new
2869 --RAISE NOTICE 'Transferring % from fund % to %', xfer_amount, roll_fund.old_fund, new_fund;
2871 PERFORM acq.transfer_fund(
2881 -- Transfer balance from old fund to the void
2883 -- RAISE NOTICE 'Transferring % from fund % to the void', xfer_amount, roll_fund.old_fund;
2885 PERFORM acq.transfer_fund(
2891 'Rollover into the void'
2896 IF roll_fund.rollover THEN
2898 -- Move any lineitems from the old fund to the new one
2899 -- where the associated debit is an encumbrance.
2901 -- Any other tables tying expenditure details to funds should
2902 -- receive similar treatment. At this writing there are none.
2904 UPDATE acq.lineitem_detail
2907 fund = roll_fund.old_fund -- this condition may be redundant
2913 fund = roll_fund.old_fund
2917 -- Move encumbrance debits from the old fund to the new fund
2919 UPDATE acq.fund_debit
2922 fund = roll_fund.old_fund
2926 -- Rollover distribution formulae funds
2927 SELECT INTO roll_distrib_forms value::BOOL FROM
2928 actor.org_unit_ancestor_setting(
2929 'acq.fund.rollover_distrib_forms', org_unit_id
2932 IF roll_distrib_forms THEN
2933 UPDATE acq.distribution_formula_entry
2934 SET fund = roll_fund.new_fund_id
2935 WHERE fund = roll_fund.old_fund;
2939 -- Mark old fund as inactive, now that we've closed it
2943 WHERE id = roll_fund.old_fund;
2946 $$ LANGUAGE plpgsql;
2950 SELECT evergreen.upgrade_deps_block_check('0781', :eg_version);
2952 INSERT INTO config.org_unit_setting_type
2953 (name, label, description, grp, datatype)
2955 'acq.fund.rollover_distrib_forms',
2957 'acq.fund.rollover_distrib_forms',
2958 'Rollover Distribution Formulae Funds',
2963 'acq.fund.rollover_distrib_forms',
2964 'During fiscal rollover, update distribution formalae to use new funds',
2973 -- No transaction needed. This can be run on a live, production server.
2974 SELECT evergreen.upgrade_deps_block_check('0782', :eg_version);
2976 /* ** Handled by the supplemental script ** */
2977 -- On a heavily used system, user activity lookup is painful. This is used
2978 -- on the patron display in the staff client.
2980 -- Measured speed increase: ~2s -> .01s
2981 -- CREATE INDEX CONCURRENTLY usr_activity_usr_idx on actor.usr_activity (usr);
2983 -- Finding open holds, often as a subquery within larger hold-related logic,
2984 -- can be sped up with the following.
2986 -- Measured speed increase: ~3s -> .02s
2987 -- CREATE INDEX CONCURRENTLY hold_request_open_idx on action.hold_request (id) where cancel_time IS NULL AND fulfillment_time IS NULL;
2989 -- Hold queue position is a particularly difficult thing to calculate
2990 -- efficiently. Recent changes in the query structure now allow some
2991 -- optimization via indexing. These do that.
2993 -- Measured speed increase: ~6s -> ~0.4s
2994 -- CREATE INDEX CONCURRENTLY cp_available_by_circ_lib_idx on asset.copy (circ_lib) where status IN (0,7);
2995 -- CREATE INDEX CONCURRENTLY hold_request_current_copy_before_cap_idx on action.hold_request (current_copy) where capture_time IS NULL AND cancel_time IS NULL;
2997 -- After heavy use, fetching EDI messages becomes time consuming. The following
2998 -- index addresses that for large-data environments.
3000 -- Measured speed increase: ~3s -> .1s
3001 -- CREATE INDEX CONCURRENTLY edi_message_account_status_idx on acq.edi_message (account,status);
3003 -- After heavy use, fetching POs becomes time consuming. The following
3004 -- index addresses that for large-data environments.
3006 -- Measured speed increase: ~1.5s -> .1s
3007 -- CREATE INDEX CONCURRENTLY edi_message_po_idx on acq.edi_message (purchase_order);
3009 -- Related to EDI messages, fetching of certain A/T events benefit from specific
3010 -- indexing. This index is more general than necessary for the observed query
3011 -- but ends up speeding several other (already relatively fast) queries.
3013 -- Measured speed increase: ~2s -> .06s
3014 -- CREATE INDEX CONCURRENTLY atev_def_state on action_trigger.event (event_def,state);
3016 -- Retrieval of hold transit by hold id (for transit completion or cancelation)
3017 -- is slow in some query formulations.
3019 -- Measured speed increase: ~.5s -> .1s
3020 -- CREATE INDEX CONCURRENTLY hold_transit_copy_hold_idx on action.hold_transit_copy (hold);
3023 SELECT evergreen.upgrade_deps_block_check('0785', :eg_version);
3025 DROP INDEX IF EXISTS actor.prox_adj_once_idx;
3027 CREATE UNIQUE INDEX prox_adj_once_idx ON actor.org_unit_proximity_adjustment (
3028 COALESCE(item_circ_lib, -1),
3029 COALESCE(item_owning_lib, -1),
3030 COALESCE(copy_location, -1),
3031 COALESCE(hold_pickup_lib, -1),
3032 COALESCE(hold_request_lib, -1),
3033 COALESCE(circ_mod, ''),
3038 --Check if we can apply the upgrade.
3039 SELECT evergreen.upgrade_deps_block_check('0786', :eg_version);
3042 CREATE TYPE search.search_result AS ( id BIGINT, rel NUMERIC, record INT, total INT, checked INT, visible INT, deleted INT, excluded INT );
3043 CREATE TYPE search.search_args AS ( id INT, field_class TEXT, field_name TEXT, table_alias TEXT, term TEXT, term_type TEXT );
3045 CREATE OR REPLACE FUNCTION search.query_parser_fts (
3047 param_search_ou INT,
3050 param_statuses INT[],
3051 param_locations INT[],
3057 param_pref_ou INT DEFAULT NULL
3058 ) RETURNS SETOF search.search_result AS $func$
3061 current_res search.search_result%ROWTYPE;
3062 search_org_list INT[];
3063 luri_org_list INT[];
3072 core_cursor REFCURSOR;
3073 core_rel_query TEXT;
3075 total_count INT := 0;
3076 check_count INT := 0;
3077 deleted_count INT := 0;
3078 visible_count INT := 0;
3079 excluded_count INT := 0;
3083 check_limit := COALESCE( param_check, 1000 );
3084 core_limit := COALESCE( param_limit, 25000 );
3085 core_offset := COALESCE( param_offset, 0 );
3087 -- core_skip_chk := COALESCE( param_skip_chk, 1 );
3089 IF param_search_ou > 0 THEN
3090 IF param_depth IS NOT NULL THEN
3091 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
3093 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
3096 SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
3098 ELSIF param_search_ou < 0 THEN
3099 SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
3101 FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
3102 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
3103 luri_org_list := luri_org_list || tmp_int_list;
3106 SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
3108 ELSIF param_search_ou = 0 THEN
3109 -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
3112 IF param_pref_ou IS NOT NULL THEN
3113 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
3114 luri_org_list := luri_org_list || tmp_int_list;
3117 OPEN core_cursor FOR EXECUTE param_query;
3121 FETCH core_cursor INTO core_result;
3122 EXIT WHEN NOT FOUND;
3123 EXIT WHEN total_count >= core_limit;
3125 total_count := total_count + 1;
3127 CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset;
3129 check_count := check_count + 1;
3131 PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3133 -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
3134 deleted_count := deleted_count + 1;
3139 FROM biblio.record_entry b
3140 JOIN config.bib_source s ON (b.source = s.id)
3141 WHERE s.transcendant
3142 AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3145 -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
3146 visible_count := visible_count + 1;
3148 current_res.id = core_result.id;
3149 current_res.rel = core_result.rel;
3153 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3157 current_res.record = core_result.records[1];
3159 current_res.record = NULL;
3162 RETURN NEXT current_res;
3168 FROM asset.call_number cn
3169 JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
3170 JOIN asset.uri uri ON (map.uri = uri.id)
3171 WHERE NOT cn.deleted
3172 AND cn.label = '##URI##'
3174 AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
3175 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3176 AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
3180 -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
3181 visible_count := visible_count + 1;
3183 current_res.id = core_result.id;
3184 current_res.rel = core_result.rel;
3188 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3192 current_res.record = core_result.records[1];
3194 current_res.record = NULL;
3197 RETURN NEXT current_res;
3202 IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
3205 FROM asset.call_number cn
3206 JOIN asset.copy cp ON (cp.call_number = cn.id)
3207 WHERE NOT cn.deleted
3209 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3210 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3211 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3216 FROM biblio.peer_bib_copy_map pr
3217 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3218 WHERE NOT cp.deleted
3219 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3220 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3221 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3225 -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
3226 excluded_count := excluded_count + 1;
3233 IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
3236 FROM asset.call_number cn
3237 JOIN asset.copy cp ON (cp.call_number = cn.id)
3238 WHERE NOT cn.deleted
3240 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3241 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3242 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3247 FROM biblio.peer_bib_copy_map pr
3248 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3249 WHERE NOT cp.deleted
3250 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3251 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3252 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3256 -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
3257 excluded_count := excluded_count + 1;
3264 IF staff IS NULL OR NOT staff THEN
3267 FROM asset.opac_visible_copies
3268 WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3269 AND record IN ( SELECT * FROM unnest( core_result.records ) )
3274 FROM biblio.peer_bib_copy_map pr
3275 JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
3276 WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3277 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3282 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3283 excluded_count := excluded_count + 1;
3291 FROM asset.call_number cn
3292 JOIN asset.copy cp ON (cp.call_number = cn.id)
3293 WHERE NOT cn.deleted
3295 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3296 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3302 FROM biblio.peer_bib_copy_map pr
3303 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3304 WHERE NOT cp.deleted
3305 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3306 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3312 FROM asset.call_number cn
3313 JOIN asset.copy cp ON (cp.call_number = cn.id)
3314 WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3319 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3320 excluded_count := excluded_count + 1;
3329 visible_count := visible_count + 1;
3331 current_res.id = core_result.id;
3332 current_res.rel = core_result.rel;
3336 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3340 current_res.record = core_result.records[1];
3342 current_res.record = NULL;
3345 RETURN NEXT current_res;
3347 IF visible_count % 1000 = 0 THEN
3348 -- RAISE NOTICE ' % visible so far ... ', visible_count;
3353 current_res.id = NULL;
3354 current_res.rel = NULL;
3355 current_res.record = NULL;
3356 current_res.total = total_count;
3357 current_res.checked = check_count;
3358 current_res.deleted = deleted_count;
3359 current_res.visible = visible_count;
3360 current_res.excluded = excluded_count;
3364 RETURN NEXT current_res;
3367 $func$ LANGUAGE PLPGSQL;
3370 SELECT evergreen.upgrade_deps_block_check('0789', :eg_version);
3371 SELECT evergreen.upgrade_deps_block_check('0790', :eg_version);
3373 ALTER TABLE config.metabib_class ADD COLUMN combined BOOL NOT NULL DEFAULT FALSE;
3374 UPDATE config.metabib_class SET combined = TRUE WHERE name = 'subject';
3377 --Check if we can apply the upgrade.
3378 SELECT evergreen.upgrade_deps_block_check('0791', :eg_version);
3382 CREATE OR REPLACE FUNCTION search.query_parser_fts (
3384 param_search_ou INT,
3387 param_statuses INT[],
3388 param_locations INT[],
3394 deleted_search BOOL,
3395 param_pref_ou INT DEFAULT NULL
3396 ) RETURNS SETOF search.search_result AS $func$
3399 current_res search.search_result%ROWTYPE;
3400 search_org_list INT[];
3401 luri_org_list INT[];
3410 core_cursor REFCURSOR;
3411 core_rel_query TEXT;
3413 total_count INT := 0;
3414 check_count INT := 0;
3415 deleted_count INT := 0;
3416 visible_count INT := 0;
3417 excluded_count INT := 0;
3421 check_limit := COALESCE( param_check, 1000 );
3422 core_limit := COALESCE( param_limit, 25000 );
3423 core_offset := COALESCE( param_offset, 0 );
3425 -- core_skip_chk := COALESCE( param_skip_chk, 1 );
3427 IF param_search_ou > 0 THEN
3428 IF param_depth IS NOT NULL THEN
3429 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
3431 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
3434 SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
3436 ELSIF param_search_ou < 0 THEN
3437 SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
3439 FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
3440 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
3441 luri_org_list := luri_org_list || tmp_int_list;
3444 SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
3446 ELSIF param_search_ou = 0 THEN
3447 -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
3450 IF param_pref_ou IS NOT NULL THEN
3451 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
3452 luri_org_list := luri_org_list || tmp_int_list;
3455 OPEN core_cursor FOR EXECUTE param_query;
3459 FETCH core_cursor INTO core_result;
3460 EXIT WHEN NOT FOUND;
3461 EXIT WHEN total_count >= core_limit;
3463 total_count := total_count + 1;
3465 CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset;
3467 check_count := check_count + 1;
3469 IF NOT deleted_search THEN
3471 PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3473 -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
3474 deleted_count := deleted_count + 1;
3479 FROM biblio.record_entry b
3480 JOIN config.bib_source s ON (b.source = s.id)
3481 WHERE s.transcendant
3482 AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3485 -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
3486 visible_count := visible_count + 1;
3488 current_res.id = core_result.id;
3489 current_res.rel = core_result.rel;
3493 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3497 current_res.record = core_result.records[1];
3499 current_res.record = NULL;
3502 RETURN NEXT current_res;
3508 FROM asset.call_number cn
3509 JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
3510 JOIN asset.uri uri ON (map.uri = uri.id)
3511 WHERE NOT cn.deleted
3512 AND cn.label = '##URI##'
3514 AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
3515 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3516 AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
3520 -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
3521 visible_count := visible_count + 1;
3523 current_res.id = core_result.id;
3524 current_res.rel = core_result.rel;
3528 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3532 current_res.record = core_result.records[1];
3534 current_res.record = NULL;
3537 RETURN NEXT current_res;
3542 IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
3545 FROM asset.call_number cn
3546 JOIN asset.copy cp ON (cp.call_number = cn.id)
3547 WHERE NOT cn.deleted
3549 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3550 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3551 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3556 FROM biblio.peer_bib_copy_map pr
3557 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3558 WHERE NOT cp.deleted
3559 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3560 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3561 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3565 -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
3566 excluded_count := excluded_count + 1;
3573 IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
3576 FROM asset.call_number cn
3577 JOIN asset.copy cp ON (cp.call_number = cn.id)
3578 WHERE NOT cn.deleted
3580 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3581 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3582 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3587 FROM biblio.peer_bib_copy_map pr
3588 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3589 WHERE NOT cp.deleted
3590 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3591 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3592 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3596 -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
3597 excluded_count := excluded_count + 1;
3604 IF staff IS NULL OR NOT staff THEN
3607 FROM asset.opac_visible_copies
3608 WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3609 AND record IN ( SELECT * FROM unnest( core_result.records ) )
3614 FROM biblio.peer_bib_copy_map pr
3615 JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
3616 WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3617 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3622 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3623 excluded_count := excluded_count + 1;
3631 FROM asset.call_number cn
3632 JOIN asset.copy cp ON (cp.call_number = cn.id)
3633 WHERE NOT cn.deleted
3635 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3636 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3642 FROM biblio.peer_bib_copy_map pr
3643 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3644 WHERE NOT cp.deleted
3645 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3646 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3652 FROM asset.call_number cn
3653 JOIN asset.copy cp ON (cp.call_number = cn.id)
3654 WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3659 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3660 excluded_count := excluded_count + 1;
3671 visible_count := visible_count + 1;
3673 current_res.id = core_result.id;
3674 current_res.rel = core_result.rel;
3678 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3682 current_res.record = core_result.records[1];
3684 current_res.record = NULL;
3687 RETURN NEXT current_res;
3689 IF visible_count % 1000 = 0 THEN
3690 -- RAISE NOTICE ' % visible so far ... ', visible_count;
3695 current_res.id = NULL;
3696 current_res.rel = NULL;
3697 current_res.record = NULL;
3698 current_res.total = total_count;
3699 current_res.checked = check_count;
3700 current_res.deleted = deleted_count;
3701 current_res.visible = visible_count;
3702 current_res.excluded = excluded_count;
3706 RETURN NEXT current_res;
3709 $func$ LANGUAGE PLPGSQL;
3712 -- AFTER UPDATE OR INSERT trigger for biblio.record_entry
3713 CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
3715 transformed_xml TEXT;
3718 xfrm config.xml_transform%ROWTYPE;
3720 new_attrs HSTORE := ''::HSTORE;
3721 attr_def config.record_attr_definition%ROWTYPE;
3724 IF NEW.deleted IS TRUE THEN -- If this bib is deleted
3725 PERFORM * FROM config.internal_flag WHERE
3726 name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
3728 -- One needs to keep these around to support searches
3729 -- with the #deleted modifier, so one should turn on the named
3730 -- internal flag for that functionality.
3731 DELETE FROM metabib.metarecord_source_map WHERE source = NEW.id;
3732 DELETE FROM metabib.record_attr WHERE id = NEW.id;
3735 DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
3736 DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
3737 DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
3738 RETURN NEW; -- and we're done
3741 IF TG_OP = 'UPDATE' THEN -- re-ingest?
3742 PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
3744 IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
3749 -- Record authority linking
3750 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
3752 PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
3755 -- Flatten and insert the mfr data
3756 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
3758 PERFORM metabib.reingest_metabib_full_rec(NEW.id);
3760 -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
3761 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
3763 FOR attr_def IN SELECT * FROM config.record_attr_definition ORDER BY format LOOP
3765 IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection
3766 SELECT ARRAY_TO_STRING(ARRAY_ACCUM(value), COALESCE(attr_def.joiner,' ')) INTO attr_value
3767 FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x
3768 WHERE record = NEW.id
3769 AND tag LIKE attr_def.tag
3771 WHEN attr_def.sf_list IS NOT NULL
3772 THEN POSITION(subfield IN attr_def.sf_list) > 0
3779 ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field
3780 attr_value := biblio.marc21_extract_fixed_field(NEW.id, attr_def.fixed_field);
3782 ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression
3784 SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format;
3786 -- See if we can skip the XSLT ... it's expensive
3787 IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN
3788 -- Can't skip the transform
3789 IF xfrm.xslt <> '---' THEN
3790 transformed_xml := oils_xslt_process(NEW.marc,xfrm.xslt);
3792 transformed_xml := NEW.marc;
3795 prev_xfrm := xfrm.name;
3798 IF xfrm.name IS NULL THEN
3799 -- just grab the marcxml (empty) transform
3800 SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1;
3801 prev_xfrm := xfrm.name;
3804 attr_value := oils_xpath_string(attr_def.xpath, transformed_xml, COALESCE(attr_def.joiner,' '), ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]]);
3806 ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map
3807 SELECT m.value INTO attr_value
3808 FROM biblio.marc21_physical_characteristics(NEW.id) v
3809 JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value)
3810 WHERE v.subfield = attr_def.phys_char_sf
3811 LIMIT 1; -- Just in case ...
3815 -- apply index normalizers to attr_value
3817 SELECT n.func AS func,
3818 n.param_count AS param_count,
3820 FROM config.index_normalizer n
3821 JOIN config.record_attr_index_norm_map m ON (m.norm = n.id)
3822 WHERE attr = attr_def.name
3824 EXECUTE 'SELECT ' || normalizer.func || '(' ||
3825 COALESCE( quote_literal( attr_value ), 'NULL' ) ||
3827 WHEN normalizer.param_count > 0
3828 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
3831 ')' INTO attr_value;
3835 -- Add the new value to the hstore
3836 new_attrs := new_attrs || hstore( attr_def.name, attr_value );
3840 IF TG_OP = 'INSERT' OR OLD.deleted THEN -- initial insert OR revivication
3841 DELETE FROM metabib.record_attr WHERE id = NEW.id;
3842 INSERT INTO metabib.record_attr (id, attrs) VALUES (NEW.id, new_attrs);
3844 UPDATE metabib.record_attr SET attrs = new_attrs WHERE id = NEW.id;
3850 -- Gather and insert the field entry data
3851 PERFORM metabib.reingest_metabib_field_entries(NEW.id);
3853 -- Located URI magic
3854 IF TG_OP = 'INSERT' THEN
3855 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
3857 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
3860 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
3862 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
3866 -- (re)map metarecord-bib linking
3867 IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
3868 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
3870 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
3872 ELSE -- we're doing an update, and we're not deleted, remap
3873 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
3875 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
3881 $func$ LANGUAGE PLPGSQL;
3883 SELECT evergreen.upgrade_deps_block_check('0792', :eg_version);
3885 UPDATE permission.perm_list SET code = 'URL_VERIFY_UPDATE_SETTINGS' WHERE id = 544 AND code = '544';
3888 SELECT evergreen.upgrade_deps_block_check('0793', :eg_version);
3890 UPDATE config.best_hold_order
3901 WHERE name = 'Traditional' AND
3909 UPDATE config.best_hold_order
3920 WHERE name = 'Traditional with Holds-always-go-home' AND
3930 UPDATE config.best_hold_order
3941 WHERE name = 'Traditional with Holds-go-home' AND
3954 \qecho 'These are from 0788, and should be run in their own transaction'
3955 \qecho 'If these fail due to 0788 already having been applied during'
3956 \qecho '2.3 upgrades, that is good; you have just saved yourself a lot of time.'
3960 SELECT evergreen.upgrade_deps_block_check('0788', :eg_version);
3962 -- New view including 264 as a potential tag for publisher and pubdate
3963 CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
3969 FIRST(title.value) AS title,
3970 FIRST(author.value) AS author,
3971 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT publisher.value), ', ') AS publisher,
3972 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$) ), ', ') AS pubdate,
3973 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
3974 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn
3975 FROM biblio.record_entry r
3976 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
3977 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
3978 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND (publisher.tag = '260' OR (publisher.tag = '264' AND publisher.ind2 = '1')) AND publisher.subfield = 'b')
3979 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND (pubdate.tag = '260' OR (pubdate.tag = '264' AND pubdate.ind2 = '1')) AND pubdate.subfield = 'c')
3980 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
3981 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
3984 -- Update reporter.materialized_simple_record with new 264-based values for publisher and pubdate
3985 DELETE FROM reporter.materialized_simple_record WHERE id IN (
3986 SELECT DISTINCT record FROM metabib.full_rec WHERE tag = '264' AND subfield IN ('b', 'c')
3989 INSERT INTO reporter.materialized_simple_record
3990 SELECT DISTINCT rossr.* FROM reporter.old_super_simple_record rossr INNER JOIN metabib.full_rec mfr ON mfr.record = rossr.id
3991 WHERE mfr.tag = '264' AND mfr.subfield IN ('b', 'c')
3995 -- These are from 0789, and can and should be run outside of a transaction
3996 CREATE TEXT SEARCH CONFIGURATION title ( COPY = english_nostop );
3997 CREATE TEXT SEARCH CONFIGURATION author ( COPY = english_nostop );
3998 CREATE TEXT SEARCH CONFIGURATION subject ( COPY = english_nostop );
3999 CREATE TEXT SEARCH CONFIGURATION series ( COPY = english_nostop );
4000 CREATE TEXT SEARCH CONFIGURATION identifier ( COPY = english_nostop );