1 --Upgrade Script for 2.3 to 2.4.0RC
2 \set eg_version '''2.4.0RC'''
4 INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.4.0RC', :eg_version);
5 -- remove the Bypass hold capture during clear shelf process setting
7 SELECT evergreen.upgrade_deps_block_check('0739', :eg_version);
10 DELETE FROM actor.org_unit_setting WHERE name = 'circ.holds.clear_shelf.no_capture_holds';
11 DELETE FROM config.org_unit_setting_type_log WHERE field_name = 'circ.holds.clear_shelf.no_capture_holds';
14 DELETE FROM config.org_unit_setting_type WHERE name = 'circ.holds.clear_shelf.no_capture_holds';
17 SELECT evergreen.upgrade_deps_block_check('0741', :eg_version);
19 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
21 'ADMIN_TOOLBAR_FOR_ORG',
24 'Allows a user to create, edit, and delete custom toolbars for org units',
30 'ADMIN_TOOLBAR_FOR_WORKSTATION',
33 'Allows a user to create, edit, and delete custom toolbars for workstations',
39 'ADMIN_TOOLBAR_FOR_USER',
42 'Allows a user to create, edit, and delete custom toolbars for users',
49 -- Evergreen DB patch 0743.schema.remove_tsearch2.sql
51 -- Enable native full-text search to be used, and drop TSearch2 extension
54 -- check whether patch can be applied
55 SELECT evergreen.upgrade_deps_block_check('0743', :eg_version);
57 -- FIXME: add/check SQL statements to perform the upgrade
58 -- First up, these functions depend on metabib.full_rec. They have to go for now.
59 DROP FUNCTION IF EXISTS biblio.flatten_marc(bigint);
60 DROP FUNCTION IF EXISTS biblio.flatten_marc(text);
62 -- These views depend on metabib.full_rec as well. Bye-bye!
63 DROP VIEW IF EXISTS reporter.old_super_simple_record;
64 DROP VIEW IF EXISTS reporter.simple_record;
65 DROP VIEW IF EXISTS reporter.classic_item_list;
67 -- Now we can drop metabib.full_rec.
68 DROP VIEW IF EXISTS metabib.full_rec;
70 -- These indexes have to go. BEFORE we alter the tables, otherwise things take extra time when we alter the tables.
71 DROP INDEX metabib.metabib_author_field_entry_value_idx;
72 DROP INDEX metabib.metabib_identifier_field_entry_value_idx;
73 DROP INDEX metabib.metabib_keyword_field_entry_value_idx;
74 DROP INDEX metabib.metabib_series_field_entry_value_idx;
75 DROP INDEX metabib.metabib_subject_field_entry_value_idx;
76 DROP INDEX metabib.metabib_title_field_entry_value_idx;
78 -- Now grab all of the tsvector-enabled columns and switch them to the non-wrapper version of the type.
79 ALTER TABLE authority.full_rec ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
80 ALTER TABLE authority.simple_heading ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
81 ALTER TABLE metabib.real_full_rec ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
82 ALTER TABLE metabib.author_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
83 ALTER TABLE metabib.browse_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
84 ALTER TABLE metabib.identifier_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
85 ALTER TABLE metabib.keyword_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
86 ALTER TABLE metabib.series_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
87 ALTER TABLE metabib.subject_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
88 ALTER TABLE metabib.title_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
90 -- Halfway there! Goodbye tsearch2 extension!
91 DROP EXTENSION tsearch2;
93 -- Next up, re-creating all of the stuff we just dropped.
95 -- Indexes! Note to whomever: Do we even need these anymore?
96 CREATE INDEX metabib_author_field_entry_value_idx ON metabib.author_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
97 CREATE INDEX metabib_identifier_field_entry_value_idx ON metabib.identifier_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
98 CREATE INDEX metabib_keyword_field_entry_value_idx ON metabib.keyword_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
99 CREATE INDEX metabib_series_field_entry_value_idx ON metabib.series_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
100 CREATE INDEX metabib_subject_field_entry_value_idx ON metabib.subject_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
101 CREATE INDEX metabib_title_field_entry_value_idx ON metabib.title_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
103 -- metabib.full_rec, with insert/update/delete rules
104 CREATE OR REPLACE VIEW metabib.full_rec AS
111 SUBSTRING(value,1,1024) AS value,
113 FROM metabib.real_full_rec;
115 CREATE OR REPLACE RULE metabib_full_rec_insert_rule
116 AS ON INSERT TO metabib.full_rec
118 INSERT INTO metabib.real_full_rec VALUES (
119 COALESCE(NEW.id, NEXTVAL('metabib.full_rec_id_seq'::REGCLASS)),
129 CREATE OR REPLACE RULE metabib_full_rec_update_rule
130 AS ON UPDATE TO metabib.full_rec
132 UPDATE metabib.real_full_rec SET
138 subfield = NEW.subfield,
140 index_vector = NEW.index_vector
143 CREATE OR REPLACE RULE metabib_full_rec_delete_rule
144 AS ON DELETE TO metabib.full_rec
146 DELETE FROM metabib.real_full_rec WHERE id = OLD.id;
148 -- reporter views that depended on metabib.full_rec are up next
149 CREATE OR REPLACE VIEW reporter.simple_record AS
156 title.value AS title,
157 uniform_title.value AS uniform_title,
158 author.value AS author,
159 publisher.value AS publisher,
160 SUBSTRING(pubdate.value FROM $$\d+$$) AS pubdate,
161 series_title.value AS series_title,
162 series_statement.value AS series_statement,
163 summary.value AS summary,
164 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
165 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn,
166 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '650' AND subfield = 'a' AND record = r.id)) AS topic_subject,
167 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '651' AND subfield = 'a' AND record = r.id)) AS geographic_subject,
168 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '655' AND subfield = 'a' AND record = r.id)) AS genre,
169 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '600' AND subfield = 'a' AND record = r.id)) AS name_subject,
170 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '610' AND subfield = 'a' AND record = r.id)) AS corporate_subject,
171 ARRAY((SELECT value FROM metabib.full_rec WHERE tag = '856' AND subfield IN ('3','y','u') AND record = r.id ORDER BY CASE WHEN subfield IN ('3','y') THEN 0 ELSE 1 END)) AS external_uri
172 FROM biblio.record_entry r
173 JOIN metabib.metarecord_source_map s ON (s.source = r.id)
174 LEFT JOIN metabib.full_rec uniform_title ON (r.id = uniform_title.record AND uniform_title.tag = '240' AND uniform_title.subfield = 'a')
175 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
176 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag = '100' AND author.subfield = 'a')
177 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND publisher.tag = '260' AND publisher.subfield = 'b')
178 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND pubdate.tag = '260' AND pubdate.subfield = 'c')
179 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
180 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
181 LEFT JOIN metabib.full_rec series_title ON (r.id = series_title.record AND series_title.tag IN ('830','440') AND series_title.subfield = 'a')
182 LEFT JOIN metabib.full_rec series_statement ON (r.id = series_statement.record AND series_statement.tag = '490' AND series_statement.subfield = 'a')
183 LEFT JOIN metabib.full_rec summary ON (r.id = summary.record AND summary.tag = '520' AND summary.subfield = 'a')
184 GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14;
186 CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
192 FIRST(title.value) AS title,
193 FIRST(author.value) AS author,
194 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT publisher.value), ', ') AS publisher,
195 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$) ), ', ') AS pubdate,
196 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
197 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn
198 FROM biblio.record_entry r
199 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
200 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
201 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND publisher.tag = '260' AND publisher.subfield = 'b')
202 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND pubdate.tag = '260' AND pubdate.subfield = 'c')
203 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
204 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
207 -- And finally, the biblio functions. NOTE: I can't find the original source of the second one, so I skipped it as old cruft that was in our production DB.
208 CREATE OR REPLACE FUNCTION biblio.flatten_marc ( rid BIGINT ) RETURNS SETOF metabib.full_rec AS $func$
210 bib biblio.record_entry%ROWTYPE;
211 output metabib.full_rec%ROWTYPE;
214 SELECT INTO bib * FROM biblio.record_entry WHERE id = rid;
216 FOR field IN SELECT * FROM vandelay.flatten_marc( bib.marc ) LOOP
217 output.record := rid;
218 output.ind1 := field.ind1;
219 output.ind2 := field.ind2;
220 output.tag := field.tag;
221 output.subfield := field.subfield;
222 output.value := field.value;
227 $func$ LANGUAGE PLPGSQL;
229 -- Evergreen DB patch 0745.data.prewarn_expire_setting.sql
231 -- Configuration setting to warn staff when an account is about to expire
234 -- check whether patch can be applied
235 SELECT evergreen.upgrade_deps_block_check('0745', :eg_version);
237 INSERT INTO config.org_unit_setting_type
238 (name, grp, label, description, datatype)
240 'circ.patron_expires_soon_warning',
243 'circ.patron_expires_soon_warning',
244 'Warn when patron account is about to expire',
249 'circ.patron_expires_soon_warning',
250 'Warn when patron account is about to expire. If set, the staff client displays a warning this many days before the expiry of a patron account. Value is in number of days, for example: 3 for 3 days.',
257 -- LP1076399: Prevent reactivated holds from canceling immediately.
258 -- Set the expire_time to NULL on all frozen/suspended holds.
260 SELECT evergreen.upgrade_deps_block_check('0747', :eg_version);
262 UPDATE action.hold_request
263 SET expire_time = NULL
267 SELECT evergreen.upgrade_deps_block_check('0752', :eg_version);
269 INSERT INTO container.biblio_record_entry_bucket_type (code, label) VALUES ('url_verify', 'URL Verification Queue');
271 DROP SCHEMA IF EXISTS url_verify CASCADE;
273 CREATE SCHEMA url_verify;
275 CREATE TABLE url_verify.session (
276 id SERIAL PRIMARY KEY,
278 owning_lib INT NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED,
279 creator INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
280 container INT NOT NULL REFERENCES container.biblio_record_entry_bucket (id) DEFERRABLE INITIALLY DEFERRED,
281 create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
282 search TEXT NOT NULL,
283 CONSTRAINT uvs_name_once_per_lib UNIQUE (name, owning_lib)
286 CREATE TABLE url_verify.url_selector (
287 id SERIAL PRIMARY KEY,
289 session INT NOT NULL REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
290 CONSTRAINT tag_once_per_sess UNIQUE (xpath, session)
293 CREATE TABLE url_verify.url (
294 id SERIAL PRIMARY KEY,
295 redirect_from INT REFERENCES url_verify.url(id) DEFERRABLE INITIALLY DEFERRED,
296 item INT REFERENCES container.biblio_record_entry_bucket_item (id) DEFERRABLE INITIALLY DEFERRED,
297 url_selector INT REFERENCES url_verify.url_selector (id) DEFERRABLE INITIALLY DEFERRED,
298 session INT REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
302 full_url TEXT NOT NULL,
314 CONSTRAINT redirect_or_from_item CHECK (
315 redirect_from IS NOT NULL OR (
317 url_selector IS NOT NULL AND
319 subfield IS NOT NULL AND
325 CREATE TABLE url_verify.verification_attempt (
326 id SERIAL PRIMARY KEY,
327 usr INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
328 session INT NOT NULL REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
329 start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
330 finish_time TIMESTAMP WITH TIME ZONE
333 CREATE TABLE url_verify.url_verification (
334 id SERIAL PRIMARY KEY,
335 url INT NOT NULL REFERENCES url_verify.url (id) DEFERRABLE INITIALLY DEFERRED,
336 attempt INT NOT NULL REFERENCES url_verify.verification_attempt (id) DEFERRABLE INITIALLY DEFERRED,
337 req_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
338 res_time TIMESTAMP WITH TIME ZONE,
339 res_code INT CHECK (res_code BETWEEN 100 AND 999), -- we know > 599 will never be valid HTTP code, but we use 9XX for other stuff
341 redirect_to INT REFERENCES url_verify.url (id) DEFERRABLE INITIALLY DEFERRED -- if redirected
344 CREATE TABLE config.filter_dialog_interface (
345 key TEXT PRIMARY KEY,
349 CREATE TABLE config.filter_dialog_filter_set (
350 id SERIAL PRIMARY KEY,
352 owning_lib INT NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED,
353 creator INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
354 create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
355 interface TEXT NOT NULL REFERENCES config.filter_dialog_interface (key) DEFERRABLE INITIALLY DEFERRED,
356 filters TEXT NOT NULL CHECK (is_json(filters)),
357 CONSTRAINT cfdfs_name_once_per_lib UNIQUE (name, owning_lib)
361 SELECT evergreen.upgrade_deps_block_check('0753', :eg_version);
363 CREATE OR REPLACE FUNCTION url_verify.parse_url (url_in TEXT) RETURNS url_verify.url AS $$
368 my $url = Rose::URI->new($url_in);
370 my %parts = map { $_ => $url->$_ } qw/scheme username password host port path query fragment/;
372 $parts{full_url} = $url_in;
373 ($parts{domain} = $parts{host}) =~ s/^[^.]+\.//;
374 ($parts{tld} = $parts{domain}) =~ s/(?:[^.]+\.)+//;
375 ($parts{page} = $parts{path}) =~ s#(?:[^/]*/)+##;
381 CREATE OR REPLACE FUNCTION url_verify.ingest_url () RETURNS TRIGGER AS $$
383 tmp_row url_verify.url%ROWTYPE;
385 SELECT * INTO tmp_row FROM url_verify.parse_url(NEW.full_url);
387 NEW.scheme := tmp_row.scheme;
388 NEW.username := tmp_row.username;
389 NEW.password := tmp_row.password;
390 NEW.host := tmp_row.host;
391 NEW.domain := tmp_row.domain;
392 NEW.tld := tmp_row.tld;
393 NEW.port := tmp_row.port;
394 NEW.path := tmp_row.path;
395 NEW.page := tmp_row.page;
396 NEW.query := tmp_row.query;
397 NEW.fragment := tmp_row.fragment;
403 CREATE TRIGGER ingest_url_tgr
404 BEFORE INSERT ON url_verify.url
405 FOR EACH ROW EXECUTE PROCEDURE url_verify.ingest_url();
407 CREATE OR REPLACE FUNCTION url_verify.extract_urls ( session_id INT, item_id INT ) RETURNS INT AS $$
415 current_selector url_verify.url_selector%ROWTYPE;
419 FOR current_selector IN SELECT * FROM url_verify.url_selector s WHERE s.session = session_id LOOP
420 current_url_pos := 1;
422 SELECT (XPATH(current_selector.xpath || '/text()', b.marc::XML))[current_url_pos]::TEXT INTO current_url
423 FROM biblio.record_entry b
424 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
425 WHERE c.id = item_id;
427 EXIT WHEN current_url IS NULL;
429 SELECT (XPATH(current_selector.xpath || '/../@tag', b.marc::XML))[current_url_pos]::TEXT INTO current_tag
430 FROM biblio.record_entry b
431 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
432 WHERE c.id = item_id;
434 IF current_tag IS NULL THEN
435 current_tag := last_seen_tag;
437 last_seen_tag := current_tag;
440 SELECT (XPATH(current_selector.xpath || '/@code', b.marc::XML))[current_url_pos]::TEXT INTO current_sf
441 FROM biblio.record_entry b
442 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
443 WHERE c.id = item_id;
445 INSERT INTO url_verify.url (session, item, url_selector, tag, subfield, ord, full_url)
446 VALUES ( session_id, item_id, current_selector.id, current_tag, current_sf, current_ord, current_url);
448 current_url_pos := current_url_pos + 1;
449 current_ord := current_ord + 1;
453 RETURN current_ord - 1;
459 -- NOTE: beware the use of bare perm IDs in the update_perm's below and in
460 -- the 950 seed data file. Update before merge to match current perm IDs! XXX
463 SELECT evergreen.upgrade_deps_block_check('0754', :eg_version);
465 INSERT INTO permission.perm_list (id, code, description)
471 'Allows a user to process and verify ULSs',
478 INSERT INTO permission.perm_list (id, code, description)
481 'URL_VERIFY_UPDATE_SETTINGS',
484 'Allows a user to configure URL verification org unit settings',
491 INSERT INTO permission.perm_list (id, code, description)
494 'SAVED_FILTER_DIALOG_FILTERS',
497 'Allows users to save and load sets of filters for filter dialogs, available in certain staff interfaces',
504 INSERT INTO config.settings_group (name, label)
515 INSERT INTO config.org_unit_setting_type
516 (name, grp, label, description, datatype, update_perm)
518 'url_verify.url_verification_delay',
521 'url_verify.url_verification_delay',
522 'Number of seconds to wait between URL test attempts.',
527 'url_verify.url_verification_delay',
528 'Throttling mechanism for batch URL verification runs. Each running process will wait this number of seconds after a URL test before performing the next.',
536 INSERT INTO config.org_unit_setting_type
537 (name, grp, label, description, datatype, update_perm)
539 'url_verify.url_verification_max_redirects',
542 'url_verify.url_verification_max_redirects',
543 'Maximum redirect lookups',
548 'url_verify.url_verification_max_redirects',
549 'For URLs returning 3XX redirects, this is the maximum number of redirects we will follow before giving up.',
557 INSERT INTO config.org_unit_setting_type
558 (name, grp, label, description, datatype, update_perm)
560 'url_verify.url_verification_max_wait',
563 'url_verify.url_verification_max_wait',
564 'Maximum wait time (in seconds) for a URL to lookup',
569 'url_verify.url_verification_max_wait',
570 'If we exceed the wait time, the URL is marked as a "timeout" and the system moves on to the next URL',
579 INSERT INTO config.org_unit_setting_type
580 (name, grp, label, description, datatype, update_perm)
582 'url_verify.verification_batch_size',
585 'url_verify.verification_batch_size',
586 'Number of URLs to test in parallel',
591 'url_verify.verification_batch_size',
592 'URLs are tested in batches. This number defines the size of each batch and it directly relates to the number of back-end processes performing URL verification.',
601 INSERT INTO config.filter_dialog_interface (key, description) VALUES (
605 'All Link Checker filter dialogs',
612 INSERT INTO config.usr_setting_type (name,grp,opac_visible,label,description,datatype) VALUES (
613 'ui.grid_columns.url_verify.select_urls',
617 'ui.grid_columns.url_verify.select_urls',
618 'Link Checker''s URL Selection interface''s saved columns',
623 'ui.grid_columns.url_verify.select_urls',
624 'Link Checker''s URL Selection interface''s saved columns',
631 INSERT INTO config.usr_setting_type (name,grp,opac_visible,label,description,datatype) VALUES (
632 'ui.grid_columns.url_verify.review_attempt',
636 'ui.grid_columns.url_verify.review_attempt',
637 'Link Checker''s Review Attempt interface''s saved columns',
642 'ui.grid_columns.url_verify.review_attempt',
643 'Link Checker''s Review Attempt interface''s saved columns',
653 SELECT evergreen.upgrade_deps_block_check('0755', :eg_version);
655 INSERT INTO config.org_unit_setting_type
656 (name, label, description, grp, datatype, fm_class) VALUES
658 'acq.upload.default.create_po',
660 'acq.upload.default.create_po',
666 'acq.upload.default.create_po',
667 'Create a purchase order by default during ACQ file upload',
675 'acq.upload.default.activate_po',
677 'acq.upload.default.activate_po',
678 'Upload Activate PO',
683 'acq.upload.default.activate_po',
684 'Activate the purchase order by default during ACQ file upload',
692 'acq.upload.default.provider',
694 'acq.upload.default.provider',
695 'Upload Default Provider',
700 'acq.upload.default.provider',
701 'Default provider to use during ACQ file upload',
709 'acq.upload.default.vandelay.match_set',
711 'acq.upload.default.vandelay.match_set',
712 'Upload Default Match Set',
717 'acq.upload.default.vandelay.match_set',
718 'Default match set to use during ACQ file upload',
726 'acq.upload.default.vandelay.merge_profile',
728 'acq.upload.default.vandelay.merge_profile',
729 'Upload Default Merge Profile',
734 'acq.upload.default.vandelay.merge_profile',
735 'Default merge profile to use during ACQ file upload',
743 'acq.upload.default.vandelay.import_non_matching',
745 'acq.upload.default.vandelay.import_non_matching',
746 'Upload Import Non Matching by Default',
751 'acq.upload.default.vandelay.import_non_matching',
752 'Import non-matching records by default during ACQ file upload',
760 'acq.upload.default.vandelay.merge_on_exact',
762 'acq.upload.default.vandelay.merge_on_exact',
763 'Upload Merge on Exact Match by Default',
768 'acq.upload.default.vandelay.merge_on_exact',
769 'Merge records on exact match by default during ACQ file upload',
777 'acq.upload.default.vandelay.merge_on_best',
779 'acq.upload.default.vandelay.merge_on_best',
780 'Upload Merge on Best Match by Default',
785 'acq.upload.default.vandelay.merge_on_best',
786 'Merge records on best match by default during ACQ file upload',
794 'acq.upload.default.vandelay.merge_on_single',
796 'acq.upload.default.vandelay.merge_on_single',
797 'Upload Merge on Single Match by Default',
802 'acq.upload.default.vandelay.merge_on_single',
803 'Merge records on single match by default during ACQ file upload',
811 'acq.upload.default.vandelay.quality_ratio',
813 'acq.upload.default.vandelay.quality_ratio',
814 'Upload Default Min. Quality Ratio',
819 'acq.upload.default.vandelay.quality_ratio',
820 'Default minimum quality ratio used during ACQ file upload',
828 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
830 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
831 'Upload Default Insufficient Quality Fall-Thru Profile',
836 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
837 'Default low-quality fall through profile used during ACQ file upload',
845 'acq.upload.default.vandelay.load_item_for_imported',
847 'acq.upload.default.vandelay.load_item_for_imported',
848 'Upload Load Items for Imported Records by Default',
853 'acq.upload.default.vandelay.load_item_for_imported',
854 'Load items for imported records by default during ACQ file upload',
864 SELECT evergreen.upgrade_deps_block_check('0756', :eg_version);
866 -- Drop some lingering old functions in search schema
867 DROP FUNCTION IF EXISTS search.staged_fts(INT,INT,TEXT,INT[],INT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT,TEXT,TEXT,TEXT[],TEXT,REAL,TEXT,BOOL,BOOL,BOOL,INT,INT,INT);
868 DROP FUNCTION IF EXISTS search.parse_search_args(TEXT);
869 DROP FUNCTION IF EXISTS search.explode_array(ANYARRAY);
870 DROP FUNCTION IF EXISTS search.pick_table(TEXT);
872 -- Now drop query_parser_fts and related
873 DROP FUNCTION IF EXISTS search.query_parser_fts(INT,INT,TEXT,INT[],INT[],INT,INT,INT,BOOL,BOOL,INT);
874 DROP TYPE IF EXISTS search.search_result;
875 DROP TYPE IF EXISTS search.search_args;
878 SELECT evergreen.upgrade_deps_block_check('0757', :eg_version);
880 SET search_path = public, pg_catalog;
886 FOR lang IN SELECT substring(pptsd.dictname from '(.*)_stem$') AS lang FROM pg_catalog.pg_ts_dict pptsd JOIN pg_catalog.pg_namespace ppn ON ppn.oid = pptsd.dictnamespace
887 WHERE ppn.nspname = 'pg_catalog' AND pptsd.dictname LIKE '%_stem' LOOP
888 RAISE NOTICE 'FOUND LANGUAGE %', lang;
890 EXECUTE 'DROP TEXT SEARCH DICTIONARY IF EXISTS ' || lang || '_nostop CASCADE;
891 CREATE TEXT SEARCH DICTIONARY ' || lang || '_nostop (TEMPLATE=pg_catalog.snowball, language=''' || lang || ''');
892 COMMENT ON TEXT SEARCH DICTIONARY ' || lang || '_nostop IS ''' ||lang || ' snowball stemmer with no stopwords for ASCII words only.'';
893 CREATE TEXT SEARCH CONFIGURATION ' || lang || '_nostop ( COPY = pg_catalog.' || lang || ' );
894 ALTER TEXT SEARCH CONFIGURATION ' || lang || '_nostop ALTER MAPPING FOR word, hword, hword_part WITH pg_catalog.simple;
895 ALTER TEXT SEARCH CONFIGURATION ' || lang || '_nostop ALTER MAPPING FOR asciiword, asciihword, hword_asciipart WITH ' || lang || '_nostop;';
900 CREATE TEXT SEARCH CONFIGURATION keyword ( COPY = english_nostop );
901 CREATE TEXT SEARCH CONFIGURATION "default" ( COPY = english_nostop );
903 SET search_path = evergreen, public, pg_catalog;
905 ALTER TABLE config.metabib_class
906 ADD COLUMN a_weight NUMERIC DEFAULT 1.0 NOT NULL,
907 ADD COLUMN b_weight NUMERIC DEFAULT 0.4 NOT NULL,
908 ADD COLUMN c_weight NUMERIC DEFAULT 0.2 NOT NULL,
909 ADD COLUMN d_weight NUMERIC DEFAULT 0.1 NOT NULL;
911 CREATE TABLE config.ts_config_list (
915 COMMENT ON TABLE config.ts_config_list IS $$
918 A list of full text configs with names and descriptions.
921 CREATE TABLE config.metabib_class_ts_map (
922 id SERIAL PRIMARY KEY,
923 field_class TEXT NOT NULL REFERENCES config.metabib_class (name),
924 ts_config TEXT NOT NULL REFERENCES config.ts_config_list (id),
925 active BOOL NOT NULL DEFAULT TRUE,
926 index_weight CHAR(1) NOT NULL DEFAULT 'C' CHECK (index_weight IN ('A','B','C','D')),
927 index_lang TEXT NULL,
928 search_lang TEXT NULL,
929 always BOOL NOT NULL DEFAULT true
931 COMMENT ON TABLE config.metabib_class_ts_map IS $$
932 Text Search Configs for metabib class indexing
934 This table contains text search config definitions for
935 storing index_vector values.
938 CREATE TABLE config.metabib_field_ts_map (
939 id SERIAL PRIMARY KEY,
940 metabib_field INT NOT NULL REFERENCES config.metabib_field (id),
941 ts_config TEXT NOT NULL REFERENCES config.ts_config_list (id),
942 active BOOL NOT NULL DEFAULT TRUE,
943 index_weight CHAR(1) NOT NULL DEFAULT 'C' CHECK (index_weight IN ('A','B','C','D')),
944 index_lang TEXT NULL,
945 search_lang TEXT NULL
947 COMMENT ON TABLE config.metabib_field_ts_map IS $$
948 Text Search Configs for metabib field indexing
950 This table contains text search config definitions for
951 storing index_vector values.
954 CREATE TABLE metabib.combined_identifier_field_entry (
955 record BIGINT NOT NULL,
956 metabib_field INT NULL,
957 index_vector tsvector NOT NULL
959 CREATE UNIQUE INDEX metabib_combined_identifier_field_entry_fakepk_idx ON metabib.combined_identifier_field_entry (record, COALESCE(metabib_field::TEXT,''));
960 CREATE INDEX metabib_combined_identifier_field_entry_index_vector_idx ON metabib.combined_identifier_field_entry USING GIST (index_vector);
961 CREATE INDEX metabib_combined_identifier_field_source_idx ON metabib.combined_identifier_field_entry (metabib_field);
963 CREATE TABLE metabib.combined_title_field_entry (
964 record BIGINT NOT NULL,
965 metabib_field INT NULL,
966 index_vector tsvector NOT NULL
968 CREATE UNIQUE INDEX metabib_combined_title_field_entry_fakepk_idx ON metabib.combined_title_field_entry (record, COALESCE(metabib_field::TEXT,''));
969 CREATE INDEX metabib_combined_title_field_entry_index_vector_idx ON metabib.combined_title_field_entry USING GIST (index_vector);
970 CREATE INDEX metabib_combined_title_field_source_idx ON metabib.combined_title_field_entry (metabib_field);
972 CREATE TABLE metabib.combined_author_field_entry (
973 record BIGINT NOT NULL,
974 metabib_field INT NULL,
975 index_vector tsvector NOT NULL
977 CREATE UNIQUE INDEX metabib_combined_author_field_entry_fakepk_idx ON metabib.combined_author_field_entry (record, COALESCE(metabib_field::TEXT,''));
978 CREATE INDEX metabib_combined_author_field_entry_index_vector_idx ON metabib.combined_author_field_entry USING GIST (index_vector);
979 CREATE INDEX metabib_combined_author_field_source_idx ON metabib.combined_author_field_entry (metabib_field);
981 CREATE TABLE metabib.combined_subject_field_entry (
982 record BIGINT NOT NULL,
983 metabib_field INT NULL,
984 index_vector tsvector NOT NULL
986 CREATE UNIQUE INDEX metabib_combined_subject_field_entry_fakepk_idx ON metabib.combined_subject_field_entry (record, COALESCE(metabib_field::TEXT,''));
987 CREATE INDEX metabib_combined_subject_field_entry_index_vector_idx ON metabib.combined_subject_field_entry USING GIST (index_vector);
988 CREATE INDEX metabib_combined_subject_field_source_idx ON metabib.combined_subject_field_entry (metabib_field);
990 CREATE TABLE metabib.combined_keyword_field_entry (
991 record BIGINT NOT NULL,
992 metabib_field INT NULL,
993 index_vector tsvector NOT NULL
995 CREATE UNIQUE INDEX metabib_combined_keyword_field_entry_fakepk_idx ON metabib.combined_keyword_field_entry (record, COALESCE(metabib_field::TEXT,''));
996 CREATE INDEX metabib_combined_keyword_field_entry_index_vector_idx ON metabib.combined_keyword_field_entry USING GIST (index_vector);
997 CREATE INDEX metabib_combined_keyword_field_source_idx ON metabib.combined_keyword_field_entry (metabib_field);
999 CREATE TABLE metabib.combined_series_field_entry (
1000 record BIGINT NOT NULL,
1001 metabib_field INT NULL,
1002 index_vector tsvector NOT NULL
1004 CREATE UNIQUE INDEX metabib_combined_series_field_entry_fakepk_idx ON metabib.combined_series_field_entry (record, COALESCE(metabib_field::TEXT,''));
1005 CREATE INDEX metabib_combined_series_field_entry_index_vector_idx ON metabib.combined_series_field_entry USING GIST (index_vector);
1006 CREATE INDEX metabib_combined_series_field_source_idx ON metabib.combined_series_field_entry (metabib_field);
1008 CREATE OR REPLACE FUNCTION metabib.update_combined_index_vectors(bib_id BIGINT) RETURNS VOID AS $func$
1010 DELETE FROM metabib.combined_keyword_field_entry WHERE record = bib_id;
1011 INSERT INTO metabib.combined_keyword_field_entry(record, metabib_field, index_vector)
1012 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1013 FROM metabib.keyword_field_entry WHERE source = bib_id GROUP BY field;
1014 INSERT INTO metabib.combined_keyword_field_entry(record, metabib_field, index_vector)
1015 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1016 FROM metabib.keyword_field_entry WHERE source = bib_id;
1018 DELETE FROM metabib.combined_title_field_entry WHERE record = bib_id;
1019 INSERT INTO metabib.combined_title_field_entry(record, metabib_field, index_vector)
1020 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1021 FROM metabib.title_field_entry WHERE source = bib_id GROUP BY field;
1022 INSERT INTO metabib.combined_title_field_entry(record, metabib_field, index_vector)
1023 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1024 FROM metabib.title_field_entry WHERE source = bib_id;
1026 DELETE FROM metabib.combined_author_field_entry WHERE record = bib_id;
1027 INSERT INTO metabib.combined_author_field_entry(record, metabib_field, index_vector)
1028 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1029 FROM metabib.author_field_entry WHERE source = bib_id GROUP BY field;
1030 INSERT INTO metabib.combined_author_field_entry(record, metabib_field, index_vector)
1031 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1032 FROM metabib.author_field_entry WHERE source = bib_id;
1034 DELETE FROM metabib.combined_subject_field_entry WHERE record = bib_id;
1035 INSERT INTO metabib.combined_subject_field_entry(record, metabib_field, index_vector)
1036 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1037 FROM metabib.subject_field_entry WHERE source = bib_id GROUP BY field;
1038 INSERT INTO metabib.combined_subject_field_entry(record, metabib_field, index_vector)
1039 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1040 FROM metabib.subject_field_entry WHERE source = bib_id;
1042 DELETE FROM metabib.combined_series_field_entry WHERE record = bib_id;
1043 INSERT INTO metabib.combined_series_field_entry(record, metabib_field, index_vector)
1044 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1045 FROM metabib.series_field_entry WHERE source = bib_id GROUP BY field;
1046 INSERT INTO metabib.combined_series_field_entry(record, metabib_field, index_vector)
1047 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1048 FROM metabib.series_field_entry WHERE source = bib_id;
1050 DELETE FROM metabib.combined_identifier_field_entry WHERE record = bib_id;
1051 INSERT INTO metabib.combined_identifier_field_entry(record, metabib_field, index_vector)
1052 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1053 FROM metabib.identifier_field_entry WHERE source = bib_id GROUP BY field;
1054 INSERT INTO metabib.combined_identifier_field_entry(record, metabib_field, index_vector)
1055 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1056 FROM metabib.identifier_field_entry WHERE source = bib_id;
1059 $func$ LANGUAGE PLPGSQL;
1061 CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
1064 ind_data metabib.field_entry_template%ROWTYPE;
1065 mbe_row metabib.browse_entry%ROWTYPE;
1068 PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
1070 IF NOT skip_search THEN
1071 FOR fclass IN SELECT * FROM config.metabib_class LOOP
1072 -- RAISE NOTICE 'Emptying out %', fclass.name;
1073 EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
1076 IF NOT skip_facet THEN
1077 DELETE FROM metabib.facet_entry WHERE source = bib_id;
1079 IF NOT skip_browse THEN
1080 DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
1084 FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
1085 IF ind_data.field < 0 THEN
1086 ind_data.field = -1 * ind_data.field;
1089 IF ind_data.facet_field AND NOT skip_facet THEN
1090 INSERT INTO metabib.facet_entry (field, source, value)
1091 VALUES (ind_data.field, ind_data.source, ind_data.value);
1094 IF ind_data.browse_field AND NOT skip_browse THEN
1095 -- A caveat about this SELECT: this should take care of replacing
1096 -- old mbe rows when data changes, but not if normalization (by
1097 -- which I mean specifically the output of
1098 -- evergreen.oils_tsearch2()) changes. It may or may not be
1099 -- expensive to add a comparison of index_vector to index_vector
1100 -- to the WHERE clause below.
1101 SELECT INTO mbe_row * FROM metabib.browse_entry WHERE value = ind_data.value;
1103 mbe_id := mbe_row.id;
1105 INSERT INTO metabib.browse_entry (value) VALUES
1106 (metabib.browse_normalize(ind_data.value, ind_data.field));
1107 mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
1110 INSERT INTO metabib.browse_entry_def_map (entry, def, source)
1111 VALUES (mbe_id, ind_data.field, ind_data.source);
1114 IF ind_data.search_field AND NOT skip_search THEN
1116 INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
1118 quote_literal(ind_data.field) || $$, $$ ||
1119 quote_literal(ind_data.source) || $$, $$ ||
1120 quote_literal(ind_data.value) ||
1126 IF NOT skip_search THEN
1127 PERFORM metabib.update_combined_index_vectors(bib_id);
1132 $func$ LANGUAGE PLPGSQL;
1134 DROP FUNCTION IF EXISTS evergreen.oils_tsearch2() CASCADE;
1135 DROP FUNCTION IF EXISTS public.oils_tsearch2() CASCADE;
1137 CREATE OR REPLACE FUNCTION public.oils_tsearch2 () RETURNS TRIGGER AS $$
1141 temp_vector TEXT := '';
1146 NEW.index_vector = ''::tsvector;
1148 IF TG_TABLE_NAME::TEXT ~ 'field_entry$' THEN
1150 SELECT n.func AS func,
1151 n.param_count AS param_count,
1153 FROM config.index_normalizer n
1154 JOIN config.metabib_field_index_norm_map m ON (m.norm = n.id)
1155 WHERE field = NEW.field
1157 EXECUTE 'SELECT ' || normalizer.func || '(' ||
1158 quote_literal( value ) ||
1160 WHEN normalizer.param_count > 0
1161 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
1170 IF TG_TABLE_NAME::TEXT ~ 'browse_entry$' THEN
1171 value := ARRAY_TO_STRING(
1172 evergreen.regexp_split_to_array(value, E'\\W+'), ' '
1174 value := public.search_normalize(value);
1175 NEW.index_vector = to_tsvector(TG_ARGV[0]::regconfig, value);
1176 ELSIF TG_TABLE_NAME::TEXT ~ 'field_entry$' THEN
1178 SELECT ts_config, index_weight
1179 FROM config.metabib_class_ts_map
1180 WHERE field_class = TG_ARGV[0]
1181 AND index_lang IS NULL OR EXISTS (SELECT 1 FROM metabib.record_attr WHERE id = NEW.source AND index_lang IN(attrs->'item_lang',attrs->'language'))
1182 AND always OR NOT EXISTS (SELECT 1 FROM config.metabib_field_ts_map WHERE metabib_field = NEW.field)
1184 SELECT ts_config, index_weight
1185 FROM config.metabib_field_ts_map
1186 WHERE metabib_field = NEW.field
1187 AND index_lang IS NULL OR EXISTS (SELECT 1 FROM metabib.record_attr WHERE id = NEW.source AND index_lang IN(attrs->'item_lang',attrs->'language'))
1188 ORDER BY index_weight ASC
1190 IF cur_weight IS NOT NULL AND cur_weight != ts_rec.index_weight THEN
1191 NEW.index_vector = NEW.index_vector || setweight(temp_vector::tsvector,cur_weight);
1194 cur_weight = ts_rec.index_weight;
1195 SELECT INTO temp_vector temp_vector || ' ' || to_tsvector(ts_rec.ts_config::regconfig, value)::TEXT;
1197 NEW.index_vector = NEW.index_vector || setweight(temp_vector::tsvector,cur_weight);
1199 NEW.index_vector = to_tsvector(TG_ARGV[0]::regconfig, value);
1204 $$ LANGUAGE PLPGSQL;
1206 CREATE TRIGGER authority_full_rec_fti_trigger
1207 BEFORE UPDATE OR INSERT ON authority.full_rec
1208 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1210 CREATE TRIGGER authority_simple_heading_fti_trigger
1211 BEFORE UPDATE OR INSERT ON authority.simple_heading
1212 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1214 CREATE TRIGGER metabib_identifier_field_entry_fti_trigger
1215 BEFORE UPDATE OR INSERT ON metabib.identifier_field_entry
1216 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('identifier');
1218 CREATE TRIGGER metabib_title_field_entry_fti_trigger
1219 BEFORE UPDATE OR INSERT ON metabib.title_field_entry
1220 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('title');
1222 CREATE TRIGGER metabib_author_field_entry_fti_trigger
1223 BEFORE UPDATE OR INSERT ON metabib.author_field_entry
1224 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('author');
1226 CREATE TRIGGER metabib_subject_field_entry_fti_trigger
1227 BEFORE UPDATE OR INSERT ON metabib.subject_field_entry
1228 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('subject');
1230 CREATE TRIGGER metabib_keyword_field_entry_fti_trigger
1231 BEFORE UPDATE OR INSERT ON metabib.keyword_field_entry
1232 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1234 CREATE TRIGGER metabib_series_field_entry_fti_trigger
1235 BEFORE UPDATE OR INSERT ON metabib.series_field_entry
1236 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('series');
1238 CREATE TRIGGER metabib_browse_entry_fti_trigger
1239 BEFORE INSERT OR UPDATE ON metabib.browse_entry
1240 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1242 CREATE TRIGGER metabib_full_rec_fti_trigger
1243 BEFORE UPDATE OR INSERT ON metabib.real_full_rec
1244 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('default');
1246 INSERT INTO config.ts_config_list(id, name) VALUES
1247 ('simple','Non-Stemmed Simple'),
1248 ('danish_nostop','Danish Stemmed'),
1249 ('dutch_nostop','Dutch Stemmed'),
1250 ('english_nostop','English Stemmed'),
1251 ('finnish_nostop','Finnish Stemmed'),
1252 ('french_nostop','French Stemmed'),
1253 ('german_nostop','German Stemmed'),
1254 ('hungarian_nostop','Hungarian Stemmed'),
1255 ('italian_nostop','Italian Stemmed'),
1256 ('norwegian_nostop','Norwegian Stemmed'),
1257 ('portuguese_nostop','Portuguese Stemmed'),
1258 ('romanian_nostop','Romanian Stemmed'),
1259 ('russian_nostop','Russian Stemmed'),
1260 ('spanish_nostop','Spanish Stemmed'),
1261 ('swedish_nostop','Swedish Stemmed'),
1262 ('turkish_nostop','Turkish Stemmed');
1264 INSERT INTO config.metabib_class_ts_map(field_class, ts_config, index_weight, always) VALUES
1265 ('keyword','simple','A',true),
1266 ('keyword','english_nostop','C',true),
1267 ('title','simple','A',true),
1268 ('title','english_nostop','C',true),
1269 ('author','simple','A',true),
1270 ('author','english_nostop','C',true),
1271 ('series','simple','A',true),
1272 ('series','english_nostop','C',true),
1273 ('subject','simple','A',true),
1274 ('subject','english_nostop','C',true),
1275 ('identifier','simple','A',true);
1277 CREATE OR REPLACE FUNCTION evergreen.rel_bump(terms TEXT[], value TEXT, bumps TEXT[], mults NUMERIC[]) RETURNS NUMERIC AS
1280 my ($terms,$value,$bumps,$mults) = @_;
1284 for (my $id = 0; $id < @$bumps; $id++) {
1285 if ($bumps->[$id] eq 'first_word') {
1286 $retval *= $mults->[$id] if ($value =~ /^$terms->[0]/);
1287 } elsif ($bumps->[$id] eq 'full_match') {
1288 my $fullmatch = join(' ', @$terms);
1289 $retval *= $mults->[$id] if ($value =~ /^$fullmatch$/);
1290 } elsif ($bumps->[$id] eq 'word_order') {
1291 my $wordorder = join('.*', @$terms);
1292 $retval *= $mults->[$id] if ($value =~ /$wordorder/);
1296 $BODY$ LANGUAGE plperlu IMMUTABLE STRICT COST 100;
1298 /* ** This happens in the supplemental script **
1300 UPDATE metabib.identifier_field_entry set value = value;
1301 UPDATE metabib.title_field_entry set value = value;
1302 UPDATE metabib.author_field_entry set value = value;
1303 UPDATE metabib.subject_field_entry set value = value;
1304 UPDATE metabib.keyword_field_entry set value = value;
1305 UPDATE metabib.series_field_entry set value = value;
1307 SELECT metabib.update_combined_index_vectors(id)
1308 FROM biblio.record_entry
1313 SELECT evergreen.upgrade_deps_block_check('0758', :eg_version);
1315 INSERT INTO config.settings_group (name, label) VALUES
1316 ('vandelay', 'Vandelay');
1318 INSERT INTO config.org_unit_setting_type (name, grp, label, datatype, fm_class) VALUES
1319 ('vandelay.default_match_set', 'vandelay', 'Default Record Match Set', 'link', 'vms');
1322 SELECT evergreen.upgrade_deps_block_check('0759', :eg_version);
1324 CREATE TABLE actor.org_unit_proximity_adjustment (
1325 id SERIAL PRIMARY KEY,
1326 item_circ_lib INT REFERENCES actor.org_unit (id),
1327 item_owning_lib INT REFERENCES actor.org_unit (id),
1328 copy_location INT REFERENCES asset.copy_location (id),
1329 hold_pickup_lib INT REFERENCES actor.org_unit (id),
1330 hold_request_lib INT REFERENCES actor.org_unit (id),
1331 pos INT NOT NULL DEFAULT 0,
1332 absolute_adjustment BOOL NOT NULL DEFAULT FALSE,
1333 prox_adjustment NUMERIC,
1334 circ_mod TEXT, -- REFERENCES config.circ_modifier (code),
1335 CONSTRAINT prox_adj_criterium CHECK (COALESCE(item_circ_lib::TEXT,item_owning_lib::TEXT,copy_location::TEXT,hold_pickup_lib::TEXT,hold_request_lib::TEXT,circ_mod) IS NOT NULL)
1337 CREATE UNIQUE INDEX prox_adj_once_idx ON actor.org_unit_proximity_adjustment (item_circ_lib,item_owning_lib,copy_location,hold_pickup_lib,hold_request_lib,circ_mod);
1338 CREATE INDEX prox_adj_circ_lib_idx ON actor.org_unit_proximity_adjustment (item_circ_lib);
1339 CREATE INDEX prox_adj_owning_lib_idx ON actor.org_unit_proximity_adjustment (item_owning_lib);
1340 CREATE INDEX prox_adj_copy_location_idx ON actor.org_unit_proximity_adjustment (copy_location);
1341 CREATE INDEX prox_adj_pickup_lib_idx ON actor.org_unit_proximity_adjustment (hold_pickup_lib);
1342 CREATE INDEX prox_adj_request_lib_idx ON actor.org_unit_proximity_adjustment (hold_request_lib);
1343 CREATE INDEX prox_adj_circ_mod_idx ON actor.org_unit_proximity_adjustment (circ_mod);
1345 CREATE OR REPLACE FUNCTION actor.org_unit_ancestors_distance( INT ) RETURNS TABLE (id INT, distance INT) AS $$
1346 WITH RECURSIVE org_unit_ancestors_distance(id, distance) AS (
1349 SELECT ou.parent_ou, ouad.distance+1
1350 FROM actor.org_unit ou JOIN org_unit_ancestors_distance ouad ON (ou.id = ouad.id)
1351 WHERE ou.parent_ou IS NOT NULL
1353 SELECT * FROM org_unit_ancestors_distance;
1354 $$ LANGUAGE SQL STABLE ROWS 1;
1356 CREATE OR REPLACE FUNCTION action.hold_copy_calculated_proximity(
1359 copy_context_ou INT DEFAULT NULL
1360 -- TODO maybe? hold_context_ou INT DEFAULT NULL. This would optionally
1361 -- support an "ahprox" measurement: adjust prox between copy circ lib and
1362 -- hold request lib, but I'm unsure whether to use this theoretical
1363 -- argument only in the baseline calculation or later in the other
1364 -- queries in this function.
1365 ) RETURNS NUMERIC AS $f$
1367 aoupa actor.org_unit_proximity_adjustment%ROWTYPE;
1368 ahr action.hold_request%ROWTYPE;
1369 acp asset.copy%ROWTYPE;
1370 acn asset.call_number%ROWTYPE;
1371 acl asset.copy_location%ROWTYPE;
1372 baseline_prox NUMERIC;
1382 SELECT * INTO ahr FROM action.hold_request WHERE id = ahr_id;
1383 SELECT * INTO acp FROM asset.copy WHERE id = acp_id;
1384 SELECT * INTO acn FROM asset.call_number WHERE id = acp.call_number;
1385 SELECT * INTO acl FROM asset.copy_location WHERE id = acp.location;
1387 IF copy_context_ou IS NULL THEN
1388 copy_context_ou := acp.circ_lib;
1391 -- First, gather the baseline proximity of "here" to pickup lib
1392 SELECT prox INTO baseline_prox FROM actor.org_unit_proximity WHERE from_org = copy_context_ou AND to_org = ahr.pickup_lib;
1394 -- Find any absolute adjustments, and set the baseline prox to that
1395 SELECT adj.* INTO aoupa
1396 FROM actor.org_unit_proximity_adjustment adj
1397 LEFT JOIN actor.org_unit_ancestors_distance(copy_context_ou) acp_cl ON (acp_cl.id = adj.item_circ_lib)
1398 LEFT JOIN actor.org_unit_ancestors_distance(acn.owning_lib) acn_ol ON (acn_ol.id = adj.item_owning_lib)
1399 LEFT JOIN actor.org_unit_ancestors_distance(acl.owning_lib) acl_ol ON (acn_ol.id = adj.copy_location)
1400 LEFT JOIN actor.org_unit_ancestors_distance(ahr.pickup_lib) ahr_pl ON (ahr_pl.id = adj.hold_pickup_lib)
1401 LEFT JOIN actor.org_unit_ancestors_distance(ahr.request_lib) ahr_rl ON (ahr_rl.id = adj.hold_request_lib)
1402 WHERE (adj.circ_mod IS NULL OR adj.circ_mod = acp.circ_modifier) AND
1403 absolute_adjustment AND
1404 COALESCE(acp_cl.id, acn_ol.id, acl_ol.id, ahr_pl.id, ahr_rl.id) IS NOT NULL
1406 COALESCE(acp_cl.distance,999)
1407 + COALESCE(acn_ol.distance,999)
1408 + COALESCE(acl_ol.distance,999)
1409 + COALESCE(ahr_pl.distance,999)
1410 + COALESCE(ahr_rl.distance,999),
1415 baseline_prox := aoupa.prox_adjustment;
1418 -- Now find any relative adjustments, and change the baseline prox based on them
1421 FROM actor.org_unit_proximity_adjustment adj
1422 LEFT JOIN actor.org_unit_ancestors_distance(copy_context_ou) acp_cl ON (acp_cl.id = adj.item_circ_lib)
1423 LEFT JOIN actor.org_unit_ancestors_distance(acn.owning_lib) acn_ol ON (acn_ol.id = adj.item_owning_lib)
1424 LEFT JOIN actor.org_unit_ancestors_distance(acl.owning_lib) acl_ol ON (acn_ol.id = adj.copy_location)
1425 LEFT JOIN actor.org_unit_ancestors_distance(ahr.pickup_lib) ahr_pl ON (ahr_pl.id = adj.hold_pickup_lib)
1426 LEFT JOIN actor.org_unit_ancestors_distance(ahr.request_lib) ahr_rl ON (ahr_rl.id = adj.hold_request_lib)
1427 WHERE (adj.circ_mod IS NULL OR adj.circ_mod = acp.circ_modifier) AND
1428 NOT absolute_adjustment AND
1429 COALESCE(acp_cl.id, acn_ol.id, acl_ol.id, ahr_pl.id, ahr_rl.id) IS NOT NULL
1431 baseline_prox := baseline_prox + aoupa.prox_adjustment;
1434 RETURN baseline_prox;
1436 $f$ LANGUAGE PLPGSQL;
1438 ALTER TABLE actor.org_unit_proximity_adjustment
1439 ADD CONSTRAINT actor_org_unit_proximity_adjustment_circ_mod_fkey
1440 FOREIGN KEY (circ_mod) REFERENCES config.circ_modifier (code)
1441 DEFERRABLE INITIALLY DEFERRED;
1443 ALTER TABLE action.hold_copy_map ADD COLUMN proximity NUMERIC;
1446 SELECT evergreen.upgrade_deps_block_check('0760', :eg_version);
1448 CREATE TABLE config.best_hold_order(
1449 id SERIAL PRIMARY KEY, -- (metadata)
1450 name TEXT UNIQUE, -- i18n (metadata)
1451 pprox INT, -- copy capture <-> pickup lib prox
1452 hprox INT, -- copy circ lib <-> request lib prox
1453 aprox INT, -- copy circ lib <-> pickup lib ADJUSTED prox on ahcm
1454 approx INT, -- copy capture <-> pickup lib ADJUSTED prox from function
1455 priority INT, -- group hold priority
1456 cut INT, -- cut-in-line
1457 depth INT, -- selection depth
1458 htime INT, -- time since last home-lib circ exceeds org-unit setting
1459 rtime INT, -- request time
1460 shtime INT -- time since copy last trip home exceeds org-unit setting
1463 -- At least one of these columns must contain a non-null value
1464 ALTER TABLE config.best_hold_order ADD CHECK ((
1465 pprox IS NOT NULL OR
1466 hprox IS NOT NULL OR
1467 aprox IS NOT NULL OR
1468 priority IS NOT NULL OR
1470 depth IS NOT NULL OR
1471 htime IS NOT NULL OR
1475 INSERT INTO config.best_hold_order (
1477 pprox, aprox, priority, cut, depth, rtime, htime, hprox
1480 1, 2, 3, 4, 5, 6, 7, 8
1483 INSERT INTO config.best_hold_order (
1485 hprox, pprox, aprox, priority, cut, depth, rtime, htime
1487 'Traditional with Holds-always-go-home',
1488 1, 2, 3, 4, 5, 6, 7, 8
1491 INSERT INTO config.best_hold_order (
1493 htime, hprox, pprox, aprox, priority, cut, depth, rtime
1495 'Traditional with Holds-go-home',
1496 1, 2, 3, 4, 5, 6, 7, 8
1499 INSERT INTO config.best_hold_order (
1501 priority, cut, rtime, depth, pprox, hprox, aprox, htime
1504 1, 2, 3, 4, 5, 6, 7, 8
1507 INSERT INTO config.best_hold_order (
1509 hprox, priority, cut, rtime, depth, pprox, aprox, htime
1511 'FIFO with Holds-always-go-home',
1512 1, 2, 3, 4, 5, 6, 7, 8
1515 INSERT INTO config.best_hold_order (
1517 htime, priority, cut, rtime, depth, pprox, aprox, hprox
1519 'FIFO with Holds-go-home',
1520 1, 2, 3, 4, 5, 6, 7, 8
1523 INSERT INTO permission.perm_list (
1524 id, code, description
1527 'ADMIN_HOLD_CAPTURE_SORT',
1530 'Allows a user to make changes to best-hold selection sort order',
1536 INSERT INTO config.org_unit_setting_type (
1537 name, label, description, datatype, fm_class, update_perm, grp
1539 'circ.hold_capture_order',
1541 'circ.hold_capture_order',
1542 'Best-hold selection sort order',
1547 'circ.hold_capture_order',
1548 'Defines the sort order of holds when selecting a hold to fill using a given copy at capture time',
1558 INSERT INTO config.org_unit_setting_type (
1559 name, label, description, datatype, update_perm, grp
1561 'circ.hold_go_home_interval',
1563 'circ.hold_go_home_interval',
1564 'Max foreign-circulation time',
1569 'circ.hold_go_home_interval',
1570 'Time a copy can spend circulating away from its circ lib before returning there to fill a hold (if one exists there)',
1579 INSERT INTO actor.org_unit_setting (
1580 org_unit, name, value
1582 (SELECT id FROM actor.org_unit WHERE parent_ou IS NULL),
1583 'circ.hold_go_home_interval',
1587 UPDATE actor.org_unit_setting SET
1588 name = 'circ.hold_capture_order',
1589 value = (SELECT id FROM config.best_hold_order WHERE name = 'FIFO')
1591 name = 'circ.holds_fifo' AND value ILIKE '%true%';
1594 SELECT evergreen.upgrade_deps_block_check('0762', :eg_version);
1596 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_browse_indexing');
1597 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_search_indexing');
1598 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_facet_indexing');
1600 CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
1603 ind_data metabib.field_entry_template%ROWTYPE;
1604 mbe_row metabib.browse_entry%ROWTYPE;
1611 SELECT COALESCE(NULLIF(skip_facet, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_facet_indexing' AND enabled)) INTO b_skip_facet;
1612 SELECT COALESCE(NULLIF(skip_browse, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_browse_indexing' AND enabled)) INTO b_skip_browse;
1613 SELECT COALESCE(NULLIF(skip_search, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_search_indexing' AND enabled)) INTO b_skip_search;
1615 PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
1617 IF NOT b_skip_search THEN
1618 FOR fclass IN SELECT * FROM config.metabib_class LOOP
1619 -- RAISE NOTICE 'Emptying out %', fclass.name;
1620 EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
1623 IF NOT b_skip_facet THEN
1624 DELETE FROM metabib.facet_entry WHERE source = bib_id;
1626 IF NOT b_skip_browse THEN
1627 DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
1631 FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
1632 IF ind_data.field < 0 THEN
1633 ind_data.field = -1 * ind_data.field;
1636 IF ind_data.facet_field AND NOT b_skip_facet THEN
1637 INSERT INTO metabib.facet_entry (field, source, value)
1638 VALUES (ind_data.field, ind_data.source, ind_data.value);
1641 IF ind_data.browse_field AND NOT b_skip_browse THEN
1642 -- A caveat about this SELECT: this should take care of replacing
1643 -- old mbe rows when data changes, but not if normalization (by
1644 -- which I mean specifically the output of
1645 -- evergreen.oils_tsearch2()) changes. It may or may not be
1646 -- expensive to add a comparison of index_vector to index_vector
1647 -- to the WHERE clause below.
1648 SELECT INTO mbe_row * FROM metabib.browse_entry WHERE value = ind_data.value;
1650 mbe_id := mbe_row.id;
1652 INSERT INTO metabib.browse_entry (value) VALUES
1653 (metabib.browse_normalize(ind_data.value, ind_data.field));
1654 mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
1657 INSERT INTO metabib.browse_entry_def_map (entry, def, source)
1658 VALUES (mbe_id, ind_data.field, ind_data.source);
1661 IF ind_data.search_field AND NOT b_skip_search THEN
1663 INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
1665 quote_literal(ind_data.field) || $$, $$ ||
1666 quote_literal(ind_data.source) || $$, $$ ||
1667 quote_literal(ind_data.value) ||
1673 IF NOT b_skip_search THEN
1674 PERFORM metabib.update_combined_index_vectors(bib_id);
1679 $func$ LANGUAGE PLPGSQL;
1682 SELECT evergreen.upgrade_deps_block_check('0763', :eg_version);
1684 INSERT INTO config.org_unit_setting_type (
1685 name, label, grp, datatype
1687 'circ.fines.truncate_to_max_fine',
1688 'Truncate fines to max fine amount',
1695 SELECT evergreen.upgrade_deps_block_check('0765', :eg_version);
1697 ALTER TABLE acq.provider
1698 ADD COLUMN default_copy_count INTEGER NOT NULL DEFAULT 0;
1701 SELECT evergreen.upgrade_deps_block_check('0768', :eg_version);
1703 CREATE OR REPLACE FUNCTION evergreen.rank_ou(lib INT, search_lib INT, pref_lib INT DEFAULT NULL)
1704 RETURNS INTEGER AS $$
1707 -- lib matches search_lib
1708 (SELECT CASE WHEN $1 = $2 THEN -20000 END),
1710 -- lib matches pref_lib
1711 (SELECT CASE WHEN $1 = $3 THEN -10000 END),
1714 -- pref_lib is a child of search_lib and lib is a child of pref lib.
1715 (SELECT distance - 5000
1716 FROM actor.org_unit_descendants_distance($3)
1717 WHERE id = $1 AND $3 IN (
1718 SELECT id FROM actor.org_unit_descendants($2))),
1720 -- lib is a child of search_lib
1721 (SELECT distance FROM actor.org_unit_descendants_distance($2) WHERE id = $1),
1723 -- all others pay cash
1726 $$ LANGUAGE SQL STABLE;
1731 SELECT evergreen.upgrade_deps_block_check('0769', :eg_version);
1733 DROP FUNCTION IF EXISTS
1734 evergreen.ranked_volumes(BIGINT, INT, INT, HSTORE, HSTORE, INT);
1736 CREATE OR REPLACE FUNCTION evergreen.ranked_volumes(
1739 depth INT DEFAULT NULL,
1740 slimit HSTORE DEFAULT NULL,
1741 soffset HSTORE DEFAULT NULL,
1742 pref_lib INT DEFAULT NULL,
1743 includes TEXT[] DEFAULT NULL::TEXT[]
1744 ) RETURNS TABLE (id BIGINT, name TEXT, label_sortkey TEXT, rank BIGINT) AS $$
1745 SELECT ua.id, ua.name, ua.label_sortkey, MIN(ua.rank) AS rank FROM (
1746 SELECT acn.id, aou.name, acn.label_sortkey,
1747 evergreen.rank_ou(aou.id, $2, $6), evergreen.rank_cp_status(acp.status),
1749 FROM asset.call_number acn
1750 JOIN asset.copy acp ON (acn.id = acp.call_number)
1751 JOIN actor.org_unit_descendants( $2, COALESCE(
1754 FROM actor.org_unit_type aout
1755 INNER JOIN actor.org_unit ou ON ou_type = aout.id
1758 ) AS aou ON (acp.circ_lib = aou.id)
1759 WHERE acn.record = $1
1760 AND acn.deleted IS FALSE
1761 AND acp.deleted IS FALSE
1762 AND CASE WHEN ('exclude_invisible_acn' = ANY($7)) THEN
1765 FROM asset.opac_visible_copies
1766 WHERE copy_id = acp.id AND record = acn.record
1768 GROUP BY acn.id, acp.status, aou.name, acn.label_sortkey, aou.id
1770 ORDER BY evergreen.rank_ou(aou.id, $2, $6), evergreen.rank_cp_status(acp.status)
1773 GROUP BY ua.id, ua.name, ua.label_sortkey
1774 ORDER BY rank, ua.name, ua.label_sortkey
1775 LIMIT ($4 -> 'acn')::INT
1776 OFFSET ($5 -> 'acn')::INT;
1778 LANGUAGE SQL STABLE;
1780 CREATE OR REPLACE FUNCTION unapi.holdings_xml (
1784 depth INT DEFAULT NULL,
1785 includes TEXT[] DEFAULT NULL::TEXT[],
1786 slimit HSTORE DEFAULT NULL,
1787 soffset HSTORE DEFAULT NULL,
1788 include_xmlns BOOL DEFAULT TRUE,
1789 pref_lib INT DEFAULT NULL
1795 CASE WHEN $8 THEN 'http://open-ils.org/spec/holdings/v1' ELSE NULL END AS xmlns,
1796 CASE WHEN ('bre' = ANY ($5)) THEN 'tag:open-ils.org:U2@bre/' || $1 || '/' || $3 ELSE NULL END AS id,
1797 (SELECT record_has_holdable_copy FROM asset.record_has_holdable_copy($1)) AS has_holdable
1801 (SELECT XMLAGG(XMLELEMENT::XML) FROM (
1804 XMLATTRIBUTES('public' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1806 FROM asset.opac_ou_record_copy_count($2, $1)
1810 XMLATTRIBUTES('staff' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1812 FROM asset.staff_ou_record_copy_count($2, $1)
1816 XMLATTRIBUTES('pref_lib' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1818 FROM asset.opac_ou_record_copy_count($9, $1)
1823 WHEN ('bmp' = ANY ($5)) THEN
1825 name monograph_parts,
1826 (SELECT XMLAGG(bmp) FROM (
1827 SELECT unapi.bmp( id, 'xml', 'monograph_part', evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'bre'), 'holdings_xml'), $3, $4, $6, $7, FALSE)
1828 FROM biblio.monograph_part
1836 (SELECT XMLAGG(acn ORDER BY rank, name, label_sortkey) FROM (
1838 SELECT unapi.acn(y.id,'xml','volume',evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'holdings_xml'),'bre'), $3, $4, $6, $7, FALSE), y.rank, name, label_sortkey
1839 FROM evergreen.ranked_volumes($1, $2, $4, $6, $7, $9, $5) AS y
1842 SELECT unapi.acn(uris.id,'xml','volume',evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'holdings_xml'),'bre'), $3, $4, $6, $7, FALSE), 0, name, label_sortkey
1843 FROM evergreen.located_uris($1, $2, $9) AS uris
1846 CASE WHEN ('ssub' = ANY ($5)) THEN
1849 (SELECT XMLAGG(ssub) FROM (
1850 SELECT unapi.ssub(id,'xml','subscription','{}'::TEXT[], $3, $4, $6, $7, FALSE)
1851 FROM serial.subscription
1852 WHERE record_entry = $1
1856 CASE WHEN ('acp' = ANY ($5)) THEN
1858 name foreign_copies,
1859 (SELECT XMLAGG(acp) FROM (
1860 SELECT unapi.acp(p.target_copy,'xml','copy',evergreen.array_remove_item_by_value($5,'acp'), $3, $4, $6, $7, FALSE)
1861 FROM biblio.peer_bib_copy_map p
1862 JOIN asset.copy c ON (p.target_copy = c.id)
1863 WHERE NOT c.deleted AND p.peer_record = $1
1864 LIMIT ($6 -> 'acp')::INT
1865 OFFSET ($7 -> 'acp')::INT
1870 $F$ LANGUAGE SQL STABLE;
1874 SELECT evergreen.upgrade_deps_block_check('0771', :eg_version);
1876 INSERT INTO action_trigger.hook (
1884 'A user was barred by staff',
1888 INSERT INTO action_trigger.hook (
1896 'A user was un-barred by staff',
1900 INSERT INTO action_trigger.validator (
1905 'Tests if a patron is currently marked as barred'
1908 INSERT INTO action_trigger.validator (
1913 'Tests if a patron is currently not marked as barred'
1917 SELECT evergreen.upgrade_deps_block_check('0772', :eg_version);
1919 INSERT INTO config.internal_flag (name) VALUES ('ingest.metarecord_mapping.preserve_on_delete'); -- defaults to false/off
1921 DROP RULE protect_bib_rec_delete ON biblio.record_entry;
1922 CREATE RULE protect_bib_rec_delete AS
1923 ON DELETE TO biblio.record_entry DO INSTEAD (
1924 UPDATE biblio.record_entry
1926 WHERE OLD.id = biblio.record_entry.id
1930 -- AFTER UPDATE OR INSERT trigger for biblio.record_entry
1931 CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
1933 transformed_xml TEXT;
1936 xfrm config.xml_transform%ROWTYPE;
1938 new_attrs HSTORE := ''::HSTORE;
1939 attr_def config.record_attr_definition%ROWTYPE;
1942 IF NEW.deleted IS TRUE THEN -- If this bib is deleted
1943 PERFORM * FROM config.internal_flag WHERE
1944 name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
1946 -- One needs to keep these around to support searches
1947 -- with the #deleted modifier, so one should turn on the named
1948 -- internal flag for that functionality.
1949 DELETE FROM metabib.metarecord_source_map WHERE source = NEW.id;
1950 DELETE FROM metabib.record_attr WHERE id = NEW.id;
1953 DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
1954 DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
1955 DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
1956 RETURN NEW; -- and we're done
1959 IF TG_OP = 'UPDATE' THEN -- re-ingest?
1960 PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
1962 IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
1967 -- Record authority linking
1968 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
1970 PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
1973 -- Flatten and insert the mfr data
1974 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
1976 PERFORM metabib.reingest_metabib_full_rec(NEW.id);
1978 -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
1979 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
1981 FOR attr_def IN SELECT * FROM config.record_attr_definition ORDER BY format LOOP
1983 IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection
1984 SELECT ARRAY_TO_STRING(ARRAY_ACCUM(value), COALESCE(attr_def.joiner,' ')) INTO attr_value
1985 FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x
1986 WHERE record = NEW.id
1987 AND tag LIKE attr_def.tag
1989 WHEN attr_def.sf_list IS NOT NULL
1990 THEN POSITION(subfield IN attr_def.sf_list) > 0
1997 ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field
1998 attr_value := biblio.marc21_extract_fixed_field(NEW.id, attr_def.fixed_field);
2000 ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression
2002 SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format;
2004 -- See if we can skip the XSLT ... it's expensive
2005 IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN
2006 -- Can't skip the transform
2007 IF xfrm.xslt <> '---' THEN
2008 transformed_xml := oils_xslt_process(NEW.marc,xfrm.xslt);
2010 transformed_xml := NEW.marc;
2013 prev_xfrm := xfrm.name;
2016 IF xfrm.name IS NULL THEN
2017 -- just grab the marcxml (empty) transform
2018 SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1;
2019 prev_xfrm := xfrm.name;
2022 attr_value := oils_xpath_string(attr_def.xpath, transformed_xml, COALESCE(attr_def.joiner,' '), ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]]);
2024 ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map
2025 SELECT m.value INTO attr_value
2026 FROM biblio.marc21_physical_characteristics(NEW.id) v
2027 JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value)
2028 WHERE v.subfield = attr_def.phys_char_sf
2029 LIMIT 1; -- Just in case ...
2033 -- apply index normalizers to attr_value
2035 SELECT n.func AS func,
2036 n.param_count AS param_count,
2038 FROM config.index_normalizer n
2039 JOIN config.record_attr_index_norm_map m ON (m.norm = n.id)
2040 WHERE attr = attr_def.name
2042 EXECUTE 'SELECT ' || normalizer.func || '(' ||
2043 COALESCE( quote_literal( attr_value ), 'NULL' ) ||
2045 WHEN normalizer.param_count > 0
2046 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
2049 ')' INTO attr_value;
2053 -- Add the new value to the hstore
2054 new_attrs := new_attrs || hstore( attr_def.name, attr_value );
2058 IF TG_OP = 'INSERT' OR OLD.deleted THEN -- initial insert OR revivication
2059 INSERT INTO metabib.record_attr (id, attrs) VALUES (NEW.id, new_attrs);
2061 UPDATE metabib.record_attr SET attrs = new_attrs WHERE id = NEW.id;
2067 -- Gather and insert the field entry data
2068 PERFORM metabib.reingest_metabib_field_entries(NEW.id);
2070 -- Located URI magic
2071 IF TG_OP = 'INSERT' THEN
2072 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
2074 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
2077 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
2079 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
2083 -- (re)map metarecord-bib linking
2084 IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
2085 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
2087 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
2089 ELSE -- we're doing an update, and we're not deleted, remap
2090 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
2092 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
2098 $func$ LANGUAGE PLPGSQL;
2101 -- Evergreen DB patch xxxx.data.authority_thesaurus_update.sql
2104 -- check whether patch can be applied
2105 SELECT evergreen.upgrade_deps_block_check('0773', :eg_version);
2108 INSERT INTO authority.thesaurus (code, name, control_set) VALUES
2109 (' ', oils_i18n_gettext(' ','Alternate no attempt to code','at','name'), NULL);
2113 SELECT evergreen.upgrade_deps_block_check('0774', :eg_version);
2115 CREATE TABLE config.z3950_source_credentials (
2116 id SERIAL PRIMARY KEY,
2117 owner INTEGER NOT NULL REFERENCES actor.org_unit(id),
2118 source TEXT NOT NULL REFERENCES config.z3950_source(name) ON DELETE CASCADE ON UPDATE CASCADE DEFERRABLE INITIALLY DEFERRED,
2119 -- do some Z servers require a username but no password or vice versa?
2122 CONSTRAINT czsc_source_once_per_lib UNIQUE (source, owner)
2125 -- find the most relevant set of credentials for the Z source and org
2126 CREATE OR REPLACE FUNCTION config.z3950_source_credentials_lookup
2127 (source TEXT, owner INTEGER)
2128 RETURNS config.z3950_source_credentials AS $$
2131 FROM config.z3950_source_credentials creds
2132 JOIN actor.org_unit aou ON (aou.id = creds.owner)
2133 JOIN actor.org_unit_type aout ON (aout.id = aou.ou_type)
2134 WHERE creds.source = $1 AND creds.owner IN (
2135 SELECT id FROM actor.org_unit_ancestors($2)
2137 ORDER BY aout.depth DESC LIMIT 1;
2139 $$ LANGUAGE SQL STABLE;
2141 -- since we are not exposing config.z3950_source_credentials
2142 -- via the IDL, providing a stored proc gives us a way to
2143 -- set values in the table via cstore
2144 CREATE OR REPLACE FUNCTION config.z3950_source_credentials_apply
2145 (src TEXT, org INTEGER, uname TEXT, passwd TEXT)
2148 PERFORM 1 FROM config.z3950_source_credentials
2149 WHERE owner = org AND source = src;
2152 IF COALESCE(uname, '') = '' AND COALESCE(passwd, '') = '' THEN
2153 DELETE FROM config.z3950_source_credentials
2154 WHERE owner = org AND source = src;
2156 UPDATE config.z3950_source_credentials
2157 SET username = uname, password = passwd
2158 WHERE owner = org AND source = src;
2161 IF COALESCE(uname, '') <> '' OR COALESCE(passwd, '') <> '' THEN
2162 INSERT INTO config.z3950_source_credentials
2163 (source, owner, username, password)
2164 VALUES (src, org, uname, passwd);
2168 $$ LANGUAGE PLPGSQL;
2173 SELECT evergreen.upgrade_deps_block_check('0775', :eg_version);
2175 ALTER TABLE config.z3950_attr
2176 DROP CONSTRAINT z3950_attr_source_fkey,
2177 ADD CONSTRAINT z3950_attr_source_fkey
2178 FOREIGN KEY (source)
2179 REFERENCES config.z3950_source(name)
2182 DEFERRABLE INITIALLY DEFERRED;
2185 SELECT evergreen.upgrade_deps_block_check('0776', :eg_version);
2187 ALTER TABLE acq.lineitem_attr
2188 ADD COLUMN order_ident BOOLEAN NOT NULL DEFAULT FALSE;
2190 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
2192 'ACQ_ADD_LINEITEM_IDENTIFIER',
2195 'When granted, newly added lineitem identifiers will propagate to linked bib records',
2201 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
2203 'ACQ_SET_LINEITEM_IDENTIFIER',
2206 'Allows staff to change the lineitem identifier',
2213 SELECT evergreen.upgrade_deps_block_check('0777', :eg_version);
2215 -- Listed here for reference / ease of access. The update
2216 -- is not applied here (see the WHERE clause).
2217 UPDATE action_trigger.event_definition SET template =
2221 # extract some commonly used variables
2223 VENDOR_SAN = target.provider.san;
2224 VENDCODE = target.provider.edi_default.vendcode;
2225 VENDACCT = target.provider.edi_default.vendacct;
2226 ORG_UNIT_SAN = target.ordering_agency.mailing_address.san;
2228 # set the vendor / provider
2230 VENDOR_BT = 0; # Baker & Taylor
2233 VENDOR_MW_TAPE = 0; # Midwest Tape
2234 VENDOR_RB = 0; # Recorded Books
2235 VENDOR_ULS = 0; # ULS
2237 IF VENDOR_SAN == '1556150'; VENDOR_BT = 1;
2238 ELSIF VENDOR_SAN == '1697684'; VENDOR_BRODART = 1;
2239 ELSIF VENDOR_SAN == '1697978'; VENDOR_INGRAM = 1;
2240 ELSIF VENDOR_SAN == '2549913'; VENDOR_MW_TAPE = 1;
2241 ELSIF VENDOR_SAN == '1113984'; VENDOR_RB = 1;
2242 ELSIF VENDOR_SAN == '1699342'; VENDOR_ULS = 1;
2245 # if true, pass the PO name as a secondary identifier
2246 # RFF+LI:<name>/li_id
2252 # GIR configuration --------------------------------------
2254 INC_COPIES = 1; # copies on/off switch
2259 INC_COLLECTION_CODE = 1;
2274 INC_COLLECTION_CODE = 0;
2278 # END GIR configuration ---------------------------------
2281 [%- BLOCK big_block -%]
2283 "recipient":"[% VENDOR_SAN %]",
2284 "sender":"[% ORG_UNIT_SAN %]",
2286 "ORDERS":[ "order", {
2288 "po_number":[% target.id %],
2290 [% IF INC_PO_NAME %]
2291 "po_name":"[% target.name | replace('\/', ' ') | replace('"', '\"') %]",
2294 "date":"[% date.format(date.now, '%Y%m%d') %]",
2298 {"id-qualifier": 91, "id":"[% ORG_UNIT_SAN %] [% VENDCODE %]"}
2300 {"id":"[% ORG_UNIT_SAN %]"},
2301 {"id-qualifier": 91, "id":"[% VENDACCT %]"}
2307 {"id-qualifier": 92, "id":"[% target.provider.id %]"}
2310 "currency":"[% target.provider.currency_type %]",
2313 [%- FOR li IN target.lineitems %]
2315 "line_index":"[% li.id %]",
2319 idqual = 'EN'; # default ISBN/UPC/EAN-13
2320 ident_attr = helpers.get_li_order_ident(li.attributes);
2322 idname = ident_attr.attr_name;
2323 idval = ident_attr.attr_value;
2324 IF idname == 'isbn' AND idval.length != 13;
2326 ELSIF idname == 'issn';
2333 {"id-qualifier":"[% idqual %]","id":"[% idval %]"}
2335 "price":[% li.estimated_unit_price || '0.00' %],
2337 {"BTI":"[% helpers.get_li_attr_jedi('title', '', li.attributes) %]"},
2338 {"BPU":"[% helpers.get_li_attr_jedi('publisher', '', li.attributes) %]"},
2339 {"BPD":"[% helpers.get_li_attr_jedi('pubdate', '', li.attributes) %]"},
2340 [% IF VENDOR_ULS -%]
2341 {"BEN":"[% helpers.get_li_attr_jedi('edition', '', li.attributes) %]"},
2342 {"BAU":"[% helpers.get_li_attr_jedi('author', '', li.attributes) %]"}
2344 {"BPH":"[% helpers.get_li_attr_jedi('pagination','', li.attributes) %]"}
2348 FOR note IN li.lineitem_notes;
2349 NEXT UNLESS note.vendor_public == 't';
2350 ftx_vals.push(note.value);
2352 IF VENDOR_BRODART; # look for copy-level spec code
2353 FOR lid IN li.lineitem_details;
2355 spec_note = lid.note.match('spec code ([a-zA-Z0-9_])');
2356 IF spec_note.0; ftx_vals.push(spec_note.0); END;
2360 IF xtra_ftx; ftx_vals.unshift(xtra_ftx); END;
2362 # BT & ULS want FTX+LIN for every LI, even if empty
2363 IF ((VENDOR_BT OR VENDOR_ULS) AND ftx_vals.size == 0);
2364 ftx_vals.unshift('');
2369 [% FOR note IN ftx_vals -%] "[% note %]"[% UNLESS loop.last %], [% END %][% END %]
2372 "quantity":[% li.lineitem_details.size %],
2374 [%- IF INC_COPIES -%]
2376 [%- compressed_copies = [];
2377 FOR lid IN li.lineitem_details;
2378 fund = lid.fund.code;
2379 item_type = lid.circ_modifier;
2380 callnumber = lid.cn_label;
2381 owning_lib = lid.owning_lib.shortname;
2382 location = lid.location;
2383 collection_code = lid.collection_code;
2385 # when we have real copy data, treat it as authoritative for some fields
2386 acp = lid.eg_copy_id;
2388 item_type = acp.circ_modifier;
2389 callnumber = acp.call_number.label;
2390 location = acp.location.name;
2394 # collapse like copies into groups w/ quantity
2397 IF !INC_COPY_ID; # INC_COPY_ID implies 1 copy per GIR
2398 FOR copy IN compressed_copies;
2399 IF (fund == copy.fund OR (!fund AND !copy.fund)) AND
2400 (item_type == copy.item_type OR (!item_type AND !copy.item_type)) AND
2401 (callnumber == copy.callnumber OR (!callnumber AND !copy.callnumber)) AND
2402 (owning_lib == copy.owning_lib OR (!owning_lib AND !copy.owning_lib)) AND
2403 (location == copy.location OR (!location AND !copy.location)) AND
2404 (collection_code == copy.collection_code OR (!collection_code AND !copy.collection_code));
2406 copy.quantity = copy.quantity + 1;
2413 compressed_copies.push({
2415 item_type => item_type,
2416 callnumber => callnumber,
2417 owning_lib => owning_lib,
2418 location => location,
2419 collection_code => collection_code,
2420 copy_id => lid.id, # for INC_COPY_ID
2425 FOR copy IN compressed_copies;
2427 # If we assume owning_lib is required and set,
2428 # it is safe to prepend each following copy field w/ a ","
2430 # B&T EDI requires expected GIR fields to be
2431 # present regardless of whether a value exists.
2432 # some fields are required to have a value in ACQ,
2433 # though, so they are not forced into place below.
2435 %]{[%- IF INC_OWNING_LIB AND copy.owning_lib %] "owning_lib":"[% copy.owning_lib %]"[% END -%]
2436 [%- IF INC_FUND AND copy.fund %],"fund":"[% copy.fund %]"[% END -%]
2437 [%- IF INC_CALLNUMBER AND (VENDOR_BT OR copy.callnumber) %],"call_number":"[% copy.callnumber %]"[% END -%]
2438 [%- IF INC_ITEM_TYPE AND (VENDOR_BT OR copy.item_type) %],"item_type":"[% copy.item_type %]"[% END -%]
2439 [%- IF INC_LOCATION AND copy.location %],"copy_location":"[% copy.location %]"[% END -%]
2440 [%- IF INC_COLLECTION_CODE AND (VENDOR_BT OR copy.collection_code) %],"collection_code":"[% copy.collection_code %]"[% END -%]
2441 [%- IF INC_QUANTITY %],"quantity":"[% copy.quantity %]"[% END -%]
2442 [%- IF INC_COPY_ID %],"copy_id":"[% copy.copy_id %]" [% END %]}[% ',' UNLESS loop.last -%]
2443 [%- END -%] [%# FOR compressed_copies -%]
2445 [%- END -%] [%# IF INC_COPIES %]
2447 }[% UNLESS loop.last %],[% END -%]
2449 [% END %] [%# END lineitems %]
2451 "line_items":[% target.lineitems.size %]
2452 }] [%# close ORDERS array %]
2453 }] [%# close body array %]
2456 [% tempo = PROCESS big_block; helpers.escape_json(tempo) %]
2458 WHERE ID = 23 AND FALSE; -- remove 'AND FALSE' to apply this update
2461 -- lineitem worksheet
2462 UPDATE action_trigger.event_definition SET template =
2466 # find a lineitem attribute by name and optional type
2468 FOR attr IN li.attributes;
2469 IF attr.attr_name == attr_name;
2470 IF !attr_type OR attr_type == attr.attr_type;
2479 <h2>Purchase Order [% target.id %]</h2>
2481 date <b>[% date.format(date.now, '%Y%m%d') %]</b>
2485 table td { padding:5px; border:1px solid #aaa;}
2486 table { width:95%; border-collapse:collapse; }
2487 #vendor-notes { padding:5px; border:1px solid #aaa; }
2489 <table id='vendor-table'>
2491 <td valign='top'>Vendor</td>
2493 <div>[% target.provider.name %]</div>
2494 <div>[% target.provider.addresses.0.street1 %]</div>
2495 <div>[% target.provider.addresses.0.street2 %]</div>
2496 <div>[% target.provider.addresses.0.city %]</div>
2497 <div>[% target.provider.addresses.0.state %]</div>
2498 <div>[% target.provider.addresses.0.country %]</div>
2499 <div>[% target.provider.addresses.0.post_code %]</div>
2501 <td valign='top'>Ship to / Bill to</td>
2503 <div>[% target.ordering_agency.name %]</div>
2504 <div>[% target.ordering_agency.billing_address.street1 %]</div>
2505 <div>[% target.ordering_agency.billing_address.street2 %]</div>
2506 <div>[% target.ordering_agency.billing_address.city %]</div>
2507 <div>[% target.ordering_agency.billing_address.state %]</div>
2508 <div>[% target.ordering_agency.billing_address.country %]</div>
2509 <div>[% target.ordering_agency.billing_address.post_code %]</div>
2515 <fieldset id='vendor-notes'>
2516 <legend>Notes to the Vendor</legend>
2518 [% FOR note IN target.notes %]
2519 [% IF note.vendor_public == 't' %]
2520 <li>[% note.value %]</li>
2531 <th>ISBN or Item #</th>
2542 [% FOR li IN target.lineitems %]
2545 [% count = li.lineitem_details.size %]
2546 [% price = li.estimated_unit_price %]
2547 [% litotal = (price * count) %]
2548 [% subtotal = subtotal + litotal %]
2550 ident_attr = helpers.get_li_order_ident(li.attributes);
2551 SET ident_value = ident_attr.attr_value IF ident_attr;
2553 <td>[% target.id %]</td>
2554 <td>[% ident_value %]</td>
2555 <td>[% PROCESS get_li_attr attr_name = 'title' %]</td>
2556 <td>[% count %]</td>
2557 <td>[% price %]</td>
2558 <td>[% litotal %]</td>
2561 [% FOR note IN li.lineitem_notes %]
2562 [% IF note.vendor_public == 't' %]
2563 <li>[% note.value %]</li>
2571 <td/><td/><td/><td/>
2573 <td>[% subtotal %]</td>
2580 Total Line Item Count: [% target.lineitems.size %]
2582 WHERE ID = 4; -- PO HTML
2585 SELECT evergreen.upgrade_deps_block_check('0778', :eg_version);
2587 CREATE OR REPLACE FUNCTION extract_marc_field_set
2588 (TEXT, BIGINT, TEXT, TEXT) RETURNS SETOF TEXT AS $$
2596 FROM oils_xpath_table(
2597 'id', 'marc', $1, $3, 'id = ' || $2)
2598 AS t(id int, t text))x
2600 IF $4 IS NOT NULL THEN
2601 SELECT INTO output (SELECT regexp_replace(output, $4, '', 'g'));
2607 $$ LANGUAGE PLPGSQL IMMUTABLE;
2610 CREATE OR REPLACE FUNCTION
2611 public.extract_acq_marc_field_set ( BIGINT, TEXT, TEXT)
2612 RETURNS SETOF TEXT AS $$
2613 SELECT extract_marc_field_set('acq.lineitem', $1, $2, $3);
2617 CREATE OR REPLACE FUNCTION public.ingest_acq_marc ( ) RETURNS TRIGGER AS $function$
2626 FOR adef IN SELECT *,tableoid FROM acq.lineitem_attr_definition LOOP
2628 SELECT relname::TEXT INTO atype FROM pg_class WHERE oid = adef.tableoid;
2630 IF (atype NOT IN ('lineitem_usr_attr_definition','lineitem_local_attr_definition')) THEN
2631 IF (atype = 'lineitem_provider_attr_definition') THEN
2632 SELECT provider INTO prov FROM acq.lineitem_provider_attr_definition WHERE id = adef.id;
2633 CONTINUE WHEN NEW.provider IS NULL OR prov <> NEW.provider;
2636 IF (atype = 'lineitem_provider_attr_definition') THEN
2637 SELECT xpath INTO xpath_string FROM acq.lineitem_provider_attr_definition WHERE id = adef.id;
2638 ELSIF (atype = 'lineitem_marc_attr_definition') THEN
2639 SELECT xpath INTO xpath_string FROM acq.lineitem_marc_attr_definition WHERE id = adef.id;
2640 ELSIF (atype = 'lineitem_generated_attr_definition') THEN
2641 SELECT xpath INTO xpath_string FROM acq.lineitem_generated_attr_definition WHERE id = adef.id;
2644 xpath_string := REGEXP_REPLACE(xpath_string,$re$//?text\(\)$$re$,'');
2646 IF (adef.code = 'title' OR adef.code = 'author') THEN
2647 -- title and author should not be split
2648 -- FIXME: once oils_xpath can grok XPATH 2.0 functions, we can use
2649 -- string-join in the xpath and remove this special case
2650 SELECT extract_acq_marc_field(id, xpath_string, adef.remove) INTO value FROM acq.lineitem WHERE id = NEW.id;
2651 IF (value IS NOT NULL AND value <> '') THEN
2652 INSERT INTO acq.lineitem_attr (lineitem, definition, attr_type, attr_name, attr_value)
2653 VALUES (NEW.id, adef.id, atype, adef.code, value);
2658 -- each application of the regex may produce multiple values
2660 SELECT * FROM extract_acq_marc_field_set(
2661 NEW.id, xpath_string || '[' || pos || ']', adef.remove)
2664 IF (value IS NOT NULL AND value <> '') THEN
2665 INSERT INTO acq.lineitem_attr
2666 (lineitem, definition, attr_type, attr_name, attr_value)
2667 VALUES (NEW.id, adef.id, atype, adef.code, value);
2685 $function$ LANGUAGE PLPGSQL;
2688 SELECT evergreen.upgrade_deps_block_check('0779', :eg_version);
2690 CREATE TABLE vandelay.import_bib_trash_group(
2691 id SERIAL PRIMARY KEY,
2692 owner INT NOT NULL REFERENCES actor.org_unit(id),
2693 label TEXT NOT NULL, --i18n
2694 always_apply BOOLEAN NOT NULL DEFAULT FALSE,
2695 CONSTRAINT vand_import_bib_trash_grp_owner_label UNIQUE (owner, label)
2698 -- otherwise, the ALTER TABLE statement below
2699 -- will fail with pending trigger events.
2700 SET CONSTRAINTS ALL IMMEDIATE;
2702 ALTER TABLE vandelay.import_bib_trash_fields
2703 -- allow null-able for now..
2704 ADD COLUMN grp INTEGER REFERENCES vandelay.import_bib_trash_group;
2706 -- add any existing trash_fields to "Legacy" groups (one per unique field
2707 -- owner) as part of the upgrade, since grp is now required.
2708 -- note that vandelay.import_bib_trash_fields was never used before,
2709 -- so in most cases this should be a no-op.
2711 INSERT INTO vandelay.import_bib_trash_group (owner, label)
2712 SELECT DISTINCT(owner), 'Legacy' FROM vandelay.import_bib_trash_fields;
2714 UPDATE vandelay.import_bib_trash_fields field SET grp = tgroup.id
2715 FROM vandelay.import_bib_trash_group tgroup
2716 WHERE tgroup.owner = field.owner;
2718 ALTER TABLE vandelay.import_bib_trash_fields
2719 -- now that have values, we can make this non-null
2720 ALTER COLUMN grp SET NOT NULL,
2721 -- drop outdated constraint
2722 DROP CONSTRAINT vand_import_bib_trash_fields_idx,
2723 -- owner is implied by the grp
2725 -- make grp+field unique
2726 ADD CONSTRAINT vand_import_bib_trash_fields_once_per UNIQUE (grp, field);
2729 SELECT evergreen.upgrade_deps_block_check('0780', :eg_version);
2731 ALTER TABLE acq.distribution_formula_entry
2732 ADD COLUMN fund INT REFERENCES acq.fund (id),
2733 ADD COLUMN circ_modifier TEXT REFERENCES config.circ_modifier (code),
2734 ADD COLUMN collection_code TEXT ;
2737 -- support option to roll distribution formula funds
2738 CREATE OR REPLACE FUNCTION acq.rollover_funds_by_org_tree(
2741 org_unit_id INTEGER,
2742 encumb_only BOOL DEFAULT FALSE,
2743 include_desc BOOL DEFAULT TRUE
2744 ) RETURNS VOID AS $$
2748 new_year INT := old_year + 1;
2751 xfer_amount NUMERIC := 0;
2755 roll_distrib_forms BOOL;
2761 IF old_year IS NULL THEN
2762 RAISE EXCEPTION 'Input year argument is NULL';
2763 ELSIF old_year NOT BETWEEN 2008 and 2200 THEN
2764 RAISE EXCEPTION 'Input year is out of range';
2767 IF user_id IS NULL THEN
2768 RAISE EXCEPTION 'Input user id argument is NULL';
2771 IF org_unit_id IS NULL THEN
2772 RAISE EXCEPTION 'Org unit id argument is NULL';
2775 -- Validate the org unit
2780 WHERE id = org_unit_id;
2782 IF org_found IS NULL THEN
2783 RAISE EXCEPTION 'Org unit id % is invalid', org_unit_id;
2784 ELSIF encumb_only THEN
2785 SELECT INTO perm_ous value::BOOL FROM
2786 actor.org_unit_ancestor_setting(
2787 'acq.fund.allow_rollover_without_money', org_unit_id
2789 IF NOT FOUND OR NOT perm_ous THEN
2790 RAISE EXCEPTION 'Encumbrance-only rollover not permitted at org %', org_unit_id;
2795 -- Loop over the propagable funds to identify the details
2796 -- from the old fund plus the id of the new one, if it exists.
2800 oldf.id AS old_fund,
2806 newf.id AS new_fund_id
2809 LEFT JOIN acq.fund AS newf
2810 ON ( oldf.code = newf.code )
2812 oldf.year = old_year
2814 AND newf.year = new_year
2815 AND ( ( include_desc AND oldf.org IN ( SELECT id FROM actor.org_unit_descendants( org_unit_id ) ) )
2816 OR (NOT include_desc AND oldf.org = org_unit_id ) )
2818 --RAISE NOTICE 'Processing fund %', roll_fund.old_fund;
2820 IF roll_fund.new_fund_id IS NULL THEN
2822 -- The old fund hasn't been propagated yet. Propagate it now.
2824 INSERT INTO acq.fund (
2832 balance_warning_percent,
2833 balance_stop_percent
2838 roll_fund.currency_type,
2842 roll_fund.balance_warning_percent,
2843 roll_fund.balance_stop_percent
2845 RETURNING id INTO new_fund;
2847 new_fund = roll_fund.new_fund_id;
2850 -- Determine the amount to transfer
2854 FROM acq.fund_spent_balance
2855 WHERE fund = roll_fund.old_fund;
2857 IF xfer_amount <> 0 THEN
2858 IF NOT encumb_only AND roll_fund.rollover THEN
2860 -- Transfer balance from old fund to new
2862 --RAISE NOTICE 'Transferring % from fund % to %', xfer_amount, roll_fund.old_fund, new_fund;
2864 PERFORM acq.transfer_fund(
2874 -- Transfer balance from old fund to the void
2876 -- RAISE NOTICE 'Transferring % from fund % to the void', xfer_amount, roll_fund.old_fund;
2878 PERFORM acq.transfer_fund(
2884 'Rollover into the void'
2889 IF roll_fund.rollover THEN
2891 -- Move any lineitems from the old fund to the new one
2892 -- where the associated debit is an encumbrance.
2894 -- Any other tables tying expenditure details to funds should
2895 -- receive similar treatment. At this writing there are none.
2897 UPDATE acq.lineitem_detail
2900 fund = roll_fund.old_fund -- this condition may be redundant
2906 fund = roll_fund.old_fund
2910 -- Move encumbrance debits from the old fund to the new fund
2912 UPDATE acq.fund_debit
2915 fund = roll_fund.old_fund
2919 -- Rollover distribution formulae funds
2920 SELECT INTO roll_distrib_forms value::BOOL FROM
2921 actor.org_unit_ancestor_setting(
2922 'acq.fund.rollover_distrib_forms', org_unit_id
2925 IF roll_distrib_forms THEN
2926 UPDATE acq.distribution_formula_entry
2927 SET fund = roll_fund.new_fund_id
2928 WHERE fund = roll_fund.old_fund;
2932 -- Mark old fund as inactive, now that we've closed it
2936 WHERE id = roll_fund.old_fund;
2939 $$ LANGUAGE plpgsql;
2943 SELECT evergreen.upgrade_deps_block_check('0781', :eg_version);
2945 INSERT INTO config.org_unit_setting_type
2946 (name, label, description, grp, datatype)
2948 'acq.fund.rollover_distrib_forms',
2950 'acq.fund.rollover_distrib_forms',
2951 'Rollover Distribution Formulae Funds',
2956 'acq.fund.rollover_distrib_forms',
2957 'During fiscal rollover, update distribution formalae to use new funds',
2966 -- No transaction needed. This can be run on a live, production server.
2967 SELECT evergreen.upgrade_deps_block_check('0782', :eg_version);
2969 /* ** Handled by the supplemental script ** */
2970 -- On a heavily used system, user activity lookup is painful. This is used
2971 -- on the patron display in the staff client.
2973 -- Measured speed increase: ~2s -> .01s
2974 -- CREATE INDEX CONCURRENTLY usr_activity_usr_idx on actor.usr_activity (usr);
2976 -- Finding open holds, often as a subquery within larger hold-related logic,
2977 -- can be sped up with the following.
2979 -- Measured speed increase: ~3s -> .02s
2980 -- CREATE INDEX CONCURRENTLY hold_request_open_idx on action.hold_request (id) where cancel_time IS NULL AND fulfillment_time IS NULL;
2982 -- Hold queue position is a particularly difficult thing to calculate
2983 -- efficiently. Recent changes in the query structure now allow some
2984 -- optimization via indexing. These do that.
2986 -- Measured speed increase: ~6s -> ~0.4s
2987 -- CREATE INDEX CONCURRENTLY cp_available_by_circ_lib_idx on asset.copy (circ_lib) where status IN (0,7);
2988 -- CREATE INDEX CONCURRENTLY hold_request_current_copy_before_cap_idx on action.hold_request (current_copy) where capture_time IS NULL AND cancel_time IS NULL;
2990 -- After heavy use, fetching EDI messages becomes time consuming. The following
2991 -- index addresses that for large-data environments.
2993 -- Measured speed increase: ~3s -> .1s
2994 -- CREATE INDEX CONCURRENTLY edi_message_account_status_idx on acq.edi_message (account,status);
2996 -- After heavy use, fetching POs becomes time consuming. The following
2997 -- index addresses that for large-data environments.
2999 -- Measured speed increase: ~1.5s -> .1s
3000 -- CREATE INDEX CONCURRENTLY edi_message_po_idx on acq.edi_message (purchase_order);
3002 -- Related to EDI messages, fetching of certain A/T events benefit from specific
3003 -- indexing. This index is more general than necessary for the observed query
3004 -- but ends up speeding several other (already relatively fast) queries.
3006 -- Measured speed increase: ~2s -> .06s
3007 -- CREATE INDEX CONCURRENTLY atev_def_state on action_trigger.event (event_def,state);
3009 -- Retrieval of hold transit by hold id (for transit completion or cancelation)
3010 -- is slow in some query formulations.
3012 -- Measured speed increase: ~.5s -> .1s
3013 -- CREATE INDEX CONCURRENTLY hold_transit_copy_hold_idx on action.hold_transit_copy (hold);
3016 SELECT evergreen.upgrade_deps_block_check('0785', :eg_version);
3018 DROP INDEX actor.prox_adj_once_idx;
3020 CREATE UNIQUE INDEX prox_adj_once_idx ON actor.org_unit_proximity_adjustment (
3021 COALESCE(item_circ_lib, -1),
3022 COALESCE(item_owning_lib, -1),
3023 COALESCE(copy_location, -1),
3024 COALESCE(hold_pickup_lib, -1),
3025 COALESCE(hold_request_lib, -1),
3026 COALESCE(circ_mod, ''),
3031 --Check if we can apply the upgrade.
3032 SELECT evergreen.upgrade_deps_block_check('0786', :eg_version);
3035 CREATE TYPE search.search_result AS ( id BIGINT, rel NUMERIC, record INT, total INT, checked INT, visible INT, deleted INT, excluded INT );
3036 CREATE TYPE search.search_args AS ( id INT, field_class TEXT, field_name TEXT, table_alias TEXT, term TEXT, term_type TEXT );
3038 CREATE OR REPLACE FUNCTION search.query_parser_fts (
3040 param_search_ou INT,
3043 param_statuses INT[],
3044 param_locations INT[],
3050 param_pref_ou INT DEFAULT NULL
3051 ) RETURNS SETOF search.search_result AS $func$
3054 current_res search.search_result%ROWTYPE;
3055 search_org_list INT[];
3056 luri_org_list INT[];
3065 core_cursor REFCURSOR;
3066 core_rel_query TEXT;
3068 total_count INT := 0;
3069 check_count INT := 0;
3070 deleted_count INT := 0;
3071 visible_count INT := 0;
3072 excluded_count INT := 0;
3076 check_limit := COALESCE( param_check, 1000 );
3077 core_limit := COALESCE( param_limit, 25000 );
3078 core_offset := COALESCE( param_offset, 0 );
3080 -- core_skip_chk := COALESCE( param_skip_chk, 1 );
3082 IF param_search_ou > 0 THEN
3083 IF param_depth IS NOT NULL THEN
3084 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
3086 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
3089 SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
3091 ELSIF param_search_ou < 0 THEN
3092 SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
3094 FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
3095 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
3096 luri_org_list := luri_org_list || tmp_int_list;
3099 SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
3101 ELSIF param_search_ou = 0 THEN
3102 -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
3105 IF param_pref_ou IS NOT NULL THEN
3106 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
3107 luri_org_list := luri_org_list || tmp_int_list;
3110 OPEN core_cursor FOR EXECUTE param_query;
3114 FETCH core_cursor INTO core_result;
3115 EXIT WHEN NOT FOUND;
3116 EXIT WHEN total_count >= core_limit;
3118 total_count := total_count + 1;
3120 CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset;
3122 check_count := check_count + 1;
3124 PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3126 -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
3127 deleted_count := deleted_count + 1;
3132 FROM biblio.record_entry b
3133 JOIN config.bib_source s ON (b.source = s.id)
3134 WHERE s.transcendant
3135 AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3138 -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
3139 visible_count := visible_count + 1;
3141 current_res.id = core_result.id;
3142 current_res.rel = core_result.rel;
3146 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3150 current_res.record = core_result.records[1];
3152 current_res.record = NULL;
3155 RETURN NEXT current_res;
3161 FROM asset.call_number cn
3162 JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
3163 JOIN asset.uri uri ON (map.uri = uri.id)
3164 WHERE NOT cn.deleted
3165 AND cn.label = '##URI##'
3167 AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
3168 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3169 AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
3173 -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
3174 visible_count := visible_count + 1;
3176 current_res.id = core_result.id;
3177 current_res.rel = core_result.rel;
3181 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3185 current_res.record = core_result.records[1];
3187 current_res.record = NULL;
3190 RETURN NEXT current_res;
3195 IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
3198 FROM asset.call_number cn
3199 JOIN asset.copy cp ON (cp.call_number = cn.id)
3200 WHERE NOT cn.deleted
3202 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3203 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3204 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3209 FROM biblio.peer_bib_copy_map pr
3210 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3211 WHERE NOT cp.deleted
3212 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3213 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3214 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3218 -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
3219 excluded_count := excluded_count + 1;
3226 IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
3229 FROM asset.call_number cn
3230 JOIN asset.copy cp ON (cp.call_number = cn.id)
3231 WHERE NOT cn.deleted
3233 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3234 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3235 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3240 FROM biblio.peer_bib_copy_map pr
3241 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3242 WHERE NOT cp.deleted
3243 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3244 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3245 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3249 -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
3250 excluded_count := excluded_count + 1;
3257 IF staff IS NULL OR NOT staff THEN
3260 FROM asset.opac_visible_copies
3261 WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3262 AND record IN ( SELECT * FROM unnest( core_result.records ) )
3267 FROM biblio.peer_bib_copy_map pr
3268 JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
3269 WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3270 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3275 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3276 excluded_count := excluded_count + 1;
3284 FROM asset.call_number cn
3285 JOIN asset.copy cp ON (cp.call_number = cn.id)
3286 WHERE NOT cn.deleted
3288 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3289 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3295 FROM biblio.peer_bib_copy_map pr
3296 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3297 WHERE NOT cp.deleted
3298 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3299 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3305 FROM asset.call_number cn
3306 JOIN asset.copy cp ON (cp.call_number = cn.id)
3307 WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3312 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3313 excluded_count := excluded_count + 1;
3322 visible_count := visible_count + 1;
3324 current_res.id = core_result.id;
3325 current_res.rel = core_result.rel;
3329 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3333 current_res.record = core_result.records[1];
3335 current_res.record = NULL;
3338 RETURN NEXT current_res;
3340 IF visible_count % 1000 = 0 THEN
3341 -- RAISE NOTICE ' % visible so far ... ', visible_count;
3346 current_res.id = NULL;
3347 current_res.rel = NULL;
3348 current_res.record = NULL;
3349 current_res.total = total_count;
3350 current_res.checked = check_count;
3351 current_res.deleted = deleted_count;
3352 current_res.visible = visible_count;
3353 current_res.excluded = excluded_count;
3357 RETURN NEXT current_res;
3360 $func$ LANGUAGE PLPGSQL;
3364 SELECT evergreen.upgrade_deps_block_check('0788', :eg_version);
3366 -- New view including 264 as a potential tag for publisher and pubdate
3367 CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
3373 FIRST(title.value) AS title,
3374 FIRST(author.value) AS author,
3375 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT publisher.value), ', ') AS publisher,
3376 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$) ), ', ') AS pubdate,
3377 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
3378 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn
3379 FROM biblio.record_entry r
3380 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
3381 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
3382 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND (publisher.tag = '260' OR (publisher.tag = '264' AND publisher.ind2 = '1')) AND publisher.subfield = 'b')
3383 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND (pubdate.tag = '260' OR (pubdate.tag = '264' AND pubdate.ind2 = '1')) AND pubdate.subfield = 'c')
3384 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
3385 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
3388 -- Update reporter.materialized_simple_record with new 264-based values for publisher and pubdate
3389 DELETE FROM reporter.materialized_simple_record WHERE id IN (
3390 SELECT DISTINCT record FROM metabib.full_rec WHERE tag = '264' AND subfield IN ('b', 'c')
3393 INSERT INTO reporter.materialized_simple_record
3394 SELECT DISTINCT rossr.* FROM reporter.old_super_simple_record rossr INNER JOIN metabib.full_rec mfr ON mfr.record = rossr.id
3395 WHERE mfr.tag = '264' AND mfr.subfield IN ('b', 'c')
3398 SELECT evergreen.upgrade_deps_block_check('0789', :eg_version);
3399 SELECT evergreen.upgrade_deps_block_check('0790', :eg_version);
3401 ALTER TABLE config.metabib_class ADD COLUMN combined BOOL NOT NULL DEFAULT FALSE;
3402 UPDATE config.metabib_class SET combined = TRUE WHERE name = 'subject';
3405 --Check if we can apply the upgrade.
3406 SELECT evergreen.upgrade_deps_block_check('0791', :eg_version);
3410 CREATE OR REPLACE FUNCTION search.query_parser_fts (
3412 param_search_ou INT,
3415 param_statuses INT[],
3416 param_locations INT[],
3422 deleted_search BOOL,
3423 param_pref_ou INT DEFAULT NULL
3424 ) RETURNS SETOF search.search_result AS $func$
3427 current_res search.search_result%ROWTYPE;
3428 search_org_list INT[];
3429 luri_org_list INT[];
3438 core_cursor REFCURSOR;
3439 core_rel_query TEXT;
3441 total_count INT := 0;
3442 check_count INT := 0;
3443 deleted_count INT := 0;
3444 visible_count INT := 0;
3445 excluded_count INT := 0;
3449 check_limit := COALESCE( param_check, 1000 );
3450 core_limit := COALESCE( param_limit, 25000 );
3451 core_offset := COALESCE( param_offset, 0 );
3453 -- core_skip_chk := COALESCE( param_skip_chk, 1 );
3455 IF param_search_ou > 0 THEN
3456 IF param_depth IS NOT NULL THEN
3457 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
3459 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
3462 SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
3464 ELSIF param_search_ou < 0 THEN
3465 SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
3467 FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
3468 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
3469 luri_org_list := luri_org_list || tmp_int_list;
3472 SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
3474 ELSIF param_search_ou = 0 THEN
3475 -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
3478 IF param_pref_ou IS NOT NULL THEN
3479 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
3480 luri_org_list := luri_org_list || tmp_int_list;
3483 OPEN core_cursor FOR EXECUTE param_query;
3487 FETCH core_cursor INTO core_result;
3488 EXIT WHEN NOT FOUND;
3489 EXIT WHEN total_count >= core_limit;
3491 total_count := total_count + 1;
3493 CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset;
3495 check_count := check_count + 1;
3497 IF NOT deleted_search THEN
3499 PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3501 -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
3502 deleted_count := deleted_count + 1;
3507 FROM biblio.record_entry b
3508 JOIN config.bib_source s ON (b.source = s.id)
3509 WHERE s.transcendant
3510 AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3513 -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
3514 visible_count := visible_count + 1;
3516 current_res.id = core_result.id;
3517 current_res.rel = core_result.rel;
3521 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3525 current_res.record = core_result.records[1];
3527 current_res.record = NULL;
3530 RETURN NEXT current_res;
3536 FROM asset.call_number cn
3537 JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
3538 JOIN asset.uri uri ON (map.uri = uri.id)
3539 WHERE NOT cn.deleted
3540 AND cn.label = '##URI##'
3542 AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
3543 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3544 AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
3548 -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
3549 visible_count := visible_count + 1;
3551 current_res.id = core_result.id;
3552 current_res.rel = core_result.rel;
3556 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3560 current_res.record = core_result.records[1];
3562 current_res.record = NULL;
3565 RETURN NEXT current_res;
3570 IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
3573 FROM asset.call_number cn
3574 JOIN asset.copy cp ON (cp.call_number = cn.id)
3575 WHERE NOT cn.deleted
3577 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3578 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3579 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3584 FROM biblio.peer_bib_copy_map pr
3585 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3586 WHERE NOT cp.deleted
3587 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3588 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3589 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3593 -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
3594 excluded_count := excluded_count + 1;
3601 IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
3604 FROM asset.call_number cn
3605 JOIN asset.copy cp ON (cp.call_number = cn.id)
3606 WHERE NOT cn.deleted
3608 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3609 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3610 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3615 FROM biblio.peer_bib_copy_map pr
3616 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3617 WHERE NOT cp.deleted
3618 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3619 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3620 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3624 -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
3625 excluded_count := excluded_count + 1;
3632 IF staff IS NULL OR NOT staff THEN
3635 FROM asset.opac_visible_copies
3636 WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3637 AND record IN ( SELECT * FROM unnest( core_result.records ) )
3642 FROM biblio.peer_bib_copy_map pr
3643 JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
3644 WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3645 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3650 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3651 excluded_count := excluded_count + 1;
3659 FROM asset.call_number cn
3660 JOIN asset.copy cp ON (cp.call_number = cn.id)
3661 WHERE NOT cn.deleted
3663 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3664 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3670 FROM biblio.peer_bib_copy_map pr
3671 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3672 WHERE NOT cp.deleted
3673 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3674 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3680 FROM asset.call_number cn
3681 JOIN asset.copy cp ON (cp.call_number = cn.id)
3682 WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3687 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3688 excluded_count := excluded_count + 1;
3699 visible_count := visible_count + 1;
3701 current_res.id = core_result.id;
3702 current_res.rel = core_result.rel;
3706 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3710 current_res.record = core_result.records[1];
3712 current_res.record = NULL;
3715 RETURN NEXT current_res;
3717 IF visible_count % 1000 = 0 THEN
3718 -- RAISE NOTICE ' % visible so far ... ', visible_count;
3723 current_res.id = NULL;
3724 current_res.rel = NULL;
3725 current_res.record = NULL;
3726 current_res.total = total_count;
3727 current_res.checked = check_count;
3728 current_res.deleted = deleted_count;
3729 current_res.visible = visible_count;
3730 current_res.excluded = excluded_count;
3734 RETURN NEXT current_res;
3737 $func$ LANGUAGE PLPGSQL;
3740 -- AFTER UPDATE OR INSERT trigger for biblio.record_entry
3741 CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
3743 transformed_xml TEXT;
3746 xfrm config.xml_transform%ROWTYPE;
3748 new_attrs HSTORE := ''::HSTORE;
3749 attr_def config.record_attr_definition%ROWTYPE;
3752 IF NEW.deleted IS TRUE THEN -- If this bib is deleted
3753 PERFORM * FROM config.internal_flag WHERE
3754 name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
3756 -- One needs to keep these around to support searches
3757 -- with the #deleted modifier, so one should turn on the named
3758 -- internal flag for that functionality.
3759 DELETE FROM metabib.metarecord_source_map WHERE source = NEW.id;
3760 DELETE FROM metabib.record_attr WHERE id = NEW.id;
3763 DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
3764 DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
3765 DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
3766 RETURN NEW; -- and we're done
3769 IF TG_OP = 'UPDATE' THEN -- re-ingest?
3770 PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
3772 IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
3777 -- Record authority linking
3778 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
3780 PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
3783 -- Flatten and insert the mfr data
3784 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
3786 PERFORM metabib.reingest_metabib_full_rec(NEW.id);
3788 -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
3789 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
3791 FOR attr_def IN SELECT * FROM config.record_attr_definition ORDER BY format LOOP
3793 IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection
3794 SELECT ARRAY_TO_STRING(ARRAY_ACCUM(value), COALESCE(attr_def.joiner,' ')) INTO attr_value
3795 FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x
3796 WHERE record = NEW.id
3797 AND tag LIKE attr_def.tag
3799 WHEN attr_def.sf_list IS NOT NULL
3800 THEN POSITION(subfield IN attr_def.sf_list) > 0
3807 ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field
3808 attr_value := biblio.marc21_extract_fixed_field(NEW.id, attr_def.fixed_field);
3810 ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression
3812 SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format;
3814 -- See if we can skip the XSLT ... it's expensive
3815 IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN
3816 -- Can't skip the transform
3817 IF xfrm.xslt <> '---' THEN
3818 transformed_xml := oils_xslt_process(NEW.marc,xfrm.xslt);
3820 transformed_xml := NEW.marc;
3823 prev_xfrm := xfrm.name;
3826 IF xfrm.name IS NULL THEN
3827 -- just grab the marcxml (empty) transform
3828 SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1;
3829 prev_xfrm := xfrm.name;
3832 attr_value := oils_xpath_string(attr_def.xpath, transformed_xml, COALESCE(attr_def.joiner,' '), ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]]);
3834 ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map
3835 SELECT m.value INTO attr_value
3836 FROM biblio.marc21_physical_characteristics(NEW.id) v
3837 JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value)
3838 WHERE v.subfield = attr_def.phys_char_sf
3839 LIMIT 1; -- Just in case ...
3843 -- apply index normalizers to attr_value
3845 SELECT n.func AS func,
3846 n.param_count AS param_count,
3848 FROM config.index_normalizer n
3849 JOIN config.record_attr_index_norm_map m ON (m.norm = n.id)
3850 WHERE attr = attr_def.name
3852 EXECUTE 'SELECT ' || normalizer.func || '(' ||
3853 COALESCE( quote_literal( attr_value ), 'NULL' ) ||
3855 WHEN normalizer.param_count > 0
3856 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
3859 ')' INTO attr_value;
3863 -- Add the new value to the hstore
3864 new_attrs := new_attrs || hstore( attr_def.name, attr_value );
3868 IF TG_OP = 'INSERT' OR OLD.deleted THEN -- initial insert OR revivication
3869 DELETE FROM metabib.record_attr WHERE id = NEW.id;
3870 INSERT INTO metabib.record_attr (id, attrs) VALUES (NEW.id, new_attrs);
3872 UPDATE metabib.record_attr SET attrs = new_attrs WHERE id = NEW.id;
3878 -- Gather and insert the field entry data
3879 PERFORM metabib.reingest_metabib_field_entries(NEW.id);
3881 -- Located URI magic
3882 IF TG_OP = 'INSERT' THEN
3883 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
3885 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
3888 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
3890 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
3894 -- (re)map metarecord-bib linking
3895 IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
3896 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
3898 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
3900 ELSE -- we're doing an update, and we're not deleted, remap
3901 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
3903 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
3909 $func$ LANGUAGE PLPGSQL;
3911 SELECT evergreen.upgrade_deps_block_check('0792', :eg_version);
3913 UPDATE permission.perm_list SET code = 'URL_VERIFY_UPDATE_SETTINGS' WHERE id = 544 AND code = '544';
3916 SELECT evergreen.upgrade_deps_block_check('0793', :eg_version);
3918 UPDATE config.best_hold_order
3929 WHERE name = 'Traditional' AND
3937 UPDATE config.best_hold_order
3948 WHERE name = 'Traditional with Holds-always-go-home' AND
3958 UPDATE config.best_hold_order
3969 WHERE name = 'Traditional with Holds-go-home' AND
3982 -- These are from 0789, and can and should be run outside of a transaction
3983 CREATE TEXT SEARCH CONFIGURATION title ( COPY = english_nostop );
3984 CREATE TEXT SEARCH CONFIGURATION author ( COPY = english_nostop );
3985 CREATE TEXT SEARCH CONFIGURATION subject ( COPY = english_nostop );
3986 CREATE TEXT SEARCH CONFIGURATION series ( COPY = english_nostop );
3987 CREATE TEXT SEARCH CONFIGURATION identifier ( COPY = english_nostop );
3989 \qecho Please run Open-ILS/src/sql/Pg/version-upgrade/2.3-2.4-supplemental.sh now, which contains additional required SQL to complete your Evergreen upgrade!