1 --Upgrade Script for 2.3 to 2.4.0RC
2 \set eg_version '''2.4.0RC'''
4 INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.4.0RC', :eg_version);
5 -- remove the Bypass hold capture during clear shelf process setting
7 SELECT evergreen.upgrade_deps_block_check('0739', :eg_version);
10 DELETE FROM actor.org_unit_setting WHERE name = 'circ.holds.clear_shelf.no_capture_holds';
11 DELETE FROM config.org_unit_setting_type_log WHERE field_name = 'circ.holds.clear_shelf.no_capture_holds';
14 DELETE FROM config.org_unit_setting_type WHERE name = 'circ.holds.clear_shelf.no_capture_holds';
17 SELECT evergreen.upgrade_deps_block_check('0741', :eg_version);
19 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
21 'ADMIN_TOOLBAR_FOR_ORG',
24 'Allows a user to create, edit, and delete custom toolbars for org units',
30 'ADMIN_TOOLBAR_FOR_WORKSTATION',
33 'Allows a user to create, edit, and delete custom toolbars for workstations',
39 'ADMIN_TOOLBAR_FOR_USER',
42 'Allows a user to create, edit, and delete custom toolbars for users',
49 -- Evergreen DB patch 0743.schema.remove_tsearch2.sql
51 -- Enable native full-text search to be used, and drop TSearch2 extension
54 -- check whether patch can be applied
55 SELECT evergreen.upgrade_deps_block_check('0743', :eg_version);
57 -- FIXME: add/check SQL statements to perform the upgrade
58 -- First up, these functions depend on metabib.full_rec. They have to go for now.
59 DROP FUNCTION IF EXISTS biblio.flatten_marc(bigint);
60 DROP FUNCTION IF EXISTS biblio.flatten_marc(text);
62 -- These views depend on metabib.full_rec as well. Bye-bye!
63 DROP VIEW IF EXISTS reporter.old_super_simple_record;
64 DROP VIEW IF EXISTS reporter.simple_record;
66 -- Now we can drop metabib.full_rec.
67 DROP VIEW IF EXISTS metabib.full_rec;
69 -- These indexes have to go. BEFORE we alter the tables, otherwise things take extra time when we alter the tables.
70 DROP INDEX metabib.metabib_author_field_entry_value_idx;
71 DROP INDEX metabib.metabib_identifier_field_entry_value_idx;
72 DROP INDEX metabib.metabib_keyword_field_entry_value_idx;
73 DROP INDEX metabib.metabib_series_field_entry_value_idx;
74 DROP INDEX metabib.metabib_subject_field_entry_value_idx;
75 DROP INDEX metabib.metabib_title_field_entry_value_idx;
77 -- Now grab all of the tsvector-enabled columns and switch them to the non-wrapper version of the type.
78 ALTER TABLE authority.full_rec ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
79 ALTER TABLE authority.simple_heading ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
80 ALTER TABLE metabib.real_full_rec ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
81 ALTER TABLE metabib.author_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
82 ALTER TABLE metabib.browse_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
83 ALTER TABLE metabib.identifier_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
84 ALTER TABLE metabib.keyword_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
85 ALTER TABLE metabib.series_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
86 ALTER TABLE metabib.subject_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
87 ALTER TABLE metabib.title_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
89 -- Halfway there! Goodbye tsearch2 extension!
90 DROP EXTENSION tsearch2;
92 -- Next up, re-creating all of the stuff we just dropped.
94 -- Indexes! Note to whomever: Do we even need these anymore?
95 CREATE INDEX metabib_author_field_entry_value_idx ON metabib.author_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
96 CREATE INDEX metabib_identifier_field_entry_value_idx ON metabib.identifier_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
97 CREATE INDEX metabib_keyword_field_entry_value_idx ON metabib.keyword_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
98 CREATE INDEX metabib_series_field_entry_value_idx ON metabib.series_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
99 CREATE INDEX metabib_subject_field_entry_value_idx ON metabib.subject_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
100 CREATE INDEX metabib_title_field_entry_value_idx ON metabib.title_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
102 -- metabib.full_rec, with insert/update/delete rules
103 CREATE OR REPLACE VIEW metabib.full_rec AS
110 SUBSTRING(value,1,1024) AS value,
112 FROM metabib.real_full_rec;
114 CREATE OR REPLACE RULE metabib_full_rec_insert_rule
115 AS ON INSERT TO metabib.full_rec
117 INSERT INTO metabib.real_full_rec VALUES (
118 COALESCE(NEW.id, NEXTVAL('metabib.full_rec_id_seq'::REGCLASS)),
128 CREATE OR REPLACE RULE metabib_full_rec_update_rule
129 AS ON UPDATE TO metabib.full_rec
131 UPDATE metabib.real_full_rec SET
137 subfield = NEW.subfield,
139 index_vector = NEW.index_vector
142 CREATE OR REPLACE RULE metabib_full_rec_delete_rule
143 AS ON DELETE TO metabib.full_rec
145 DELETE FROM metabib.real_full_rec WHERE id = OLD.id;
147 -- reporter views that depended on metabib.full_rec are up next
148 CREATE OR REPLACE VIEW reporter.simple_record AS
155 title.value AS title,
156 uniform_title.value AS uniform_title,
157 author.value AS author,
158 publisher.value AS publisher,
159 SUBSTRING(pubdate.value FROM $$\d+$$) AS pubdate,
160 series_title.value AS series_title,
161 series_statement.value AS series_statement,
162 summary.value AS summary,
163 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
164 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn,
165 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '650' AND subfield = 'a' AND record = r.id)) AS topic_subject,
166 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '651' AND subfield = 'a' AND record = r.id)) AS geographic_subject,
167 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '655' AND subfield = 'a' AND record = r.id)) AS genre,
168 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '600' AND subfield = 'a' AND record = r.id)) AS name_subject,
169 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '610' AND subfield = 'a' AND record = r.id)) AS corporate_subject,
170 ARRAY((SELECT value FROM metabib.full_rec WHERE tag = '856' AND subfield IN ('3','y','u') AND record = r.id ORDER BY CASE WHEN subfield IN ('3','y') THEN 0 ELSE 1 END)) AS external_uri
171 FROM biblio.record_entry r
172 JOIN metabib.metarecord_source_map s ON (s.source = r.id)
173 LEFT JOIN metabib.full_rec uniform_title ON (r.id = uniform_title.record AND uniform_title.tag = '240' AND uniform_title.subfield = 'a')
174 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
175 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag = '100' AND author.subfield = 'a')
176 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND publisher.tag = '260' AND publisher.subfield = 'b')
177 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND pubdate.tag = '260' AND pubdate.subfield = 'c')
178 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
179 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
180 LEFT JOIN metabib.full_rec series_title ON (r.id = series_title.record AND series_title.tag IN ('830','440') AND series_title.subfield = 'a')
181 LEFT JOIN metabib.full_rec series_statement ON (r.id = series_statement.record AND series_statement.tag = '490' AND series_statement.subfield = 'a')
182 LEFT JOIN metabib.full_rec summary ON (r.id = summary.record AND summary.tag = '520' AND summary.subfield = 'a')
183 GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14;
185 CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
191 FIRST(title.value) AS title,
192 FIRST(author.value) AS author,
193 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT publisher.value), ', ') AS publisher,
194 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$) ), ', ') AS pubdate,
195 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
196 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn
197 FROM biblio.record_entry r
198 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
199 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
200 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND publisher.tag = '260' AND publisher.subfield = 'b')
201 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND pubdate.tag = '260' AND pubdate.subfield = 'c')
202 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
203 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
206 -- And finally, the biblio functions. NOTE: I can't find the original source of the second one, so I skipped it as old cruft that was in our production DB.
207 CREATE OR REPLACE FUNCTION biblio.flatten_marc ( rid BIGINT ) RETURNS SETOF metabib.full_rec AS $func$
209 bib biblio.record_entry%ROWTYPE;
210 output metabib.full_rec%ROWTYPE;
213 SELECT INTO bib * FROM biblio.record_entry WHERE id = rid;
215 FOR field IN SELECT * FROM vandelay.flatten_marc( bib.marc ) LOOP
216 output.record := rid;
217 output.ind1 := field.ind1;
218 output.ind2 := field.ind2;
219 output.tag := field.tag;
220 output.subfield := field.subfield;
221 output.value := field.value;
226 $func$ LANGUAGE PLPGSQL;
228 -- Evergreen DB patch 0745.data.prewarn_expire_setting.sql
230 -- Configuration setting to warn staff when an account is about to expire
233 -- check whether patch can be applied
234 SELECT evergreen.upgrade_deps_block_check('0745', :eg_version);
236 INSERT INTO config.org_unit_setting_type
237 (name, grp, label, description, datatype)
239 'circ.patron_expires_soon_warning',
242 'circ.patron_expires_soon_warning',
243 'Warn when patron account is about to expire',
248 'circ.patron_expires_soon_warning',
249 'Warn when patron account is about to expire. If set, the staff client displays a warning this many days before the expiry of a patron account. Value is in number of days, for example: 3 for 3 days.',
256 -- LP1076399: Prevent reactivated holds from canceling immediately.
257 -- Set the expire_time to NULL on all frozen/suspended holds.
259 SELECT evergreen.upgrade_deps_block_check('0747', :eg_version);
261 UPDATE action.hold_request
262 SET expire_time = NULL
266 SELECT evergreen.upgrade_deps_block_check('0752', :eg_version);
268 INSERT INTO container.biblio_record_entry_bucket_type (code, label) VALUES ('url_verify', 'URL Verification Queue');
270 DROP SCHEMA IF EXISTS url_verify CASCADE;
272 CREATE SCHEMA url_verify;
274 CREATE TABLE url_verify.session (
275 id SERIAL PRIMARY KEY,
277 owning_lib INT NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED,
278 creator INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
279 container INT NOT NULL REFERENCES container.biblio_record_entry_bucket (id) DEFERRABLE INITIALLY DEFERRED,
280 create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
281 search TEXT NOT NULL,
282 CONSTRAINT uvs_name_once_per_lib UNIQUE (name, owning_lib)
285 CREATE TABLE url_verify.url_selector (
286 id SERIAL PRIMARY KEY,
288 session INT NOT NULL REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
289 CONSTRAINT tag_once_per_sess UNIQUE (xpath, session)
292 CREATE TABLE url_verify.url (
293 id SERIAL PRIMARY KEY,
294 redirect_from INT REFERENCES url_verify.url(id) DEFERRABLE INITIALLY DEFERRED,
295 item INT REFERENCES container.biblio_record_entry_bucket_item (id) DEFERRABLE INITIALLY DEFERRED,
296 url_selector INT REFERENCES url_verify.url_selector (id) DEFERRABLE INITIALLY DEFERRED,
297 session INT REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
301 full_url TEXT NOT NULL,
313 CONSTRAINT redirect_or_from_item CHECK (
314 redirect_from IS NOT NULL OR (
316 url_selector IS NOT NULL AND
318 subfield IS NOT NULL AND
324 CREATE TABLE url_verify.verification_attempt (
325 id SERIAL PRIMARY KEY,
326 usr INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
327 session INT NOT NULL REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
328 start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
329 finish_time TIMESTAMP WITH TIME ZONE
332 CREATE TABLE url_verify.url_verification (
333 id SERIAL PRIMARY KEY,
334 url INT NOT NULL REFERENCES url_verify.url (id) DEFERRABLE INITIALLY DEFERRED,
335 attempt INT NOT NULL REFERENCES url_verify.verification_attempt (id) DEFERRABLE INITIALLY DEFERRED,
336 req_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
337 res_time TIMESTAMP WITH TIME ZONE,
338 res_code INT CHECK (res_code BETWEEN 100 AND 999), -- we know > 599 will never be valid HTTP code, but we use 9XX for other stuff
340 redirect_to INT REFERENCES url_verify.url (id) DEFERRABLE INITIALLY DEFERRED -- if redirected
343 CREATE TABLE config.filter_dialog_interface (
344 key TEXT PRIMARY KEY,
348 CREATE TABLE config.filter_dialog_filter_set (
349 id SERIAL PRIMARY KEY,
351 owning_lib INT NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED,
352 creator INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
353 create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
354 interface TEXT NOT NULL REFERENCES config.filter_dialog_interface (key) DEFERRABLE INITIALLY DEFERRED,
355 filters TEXT NOT NULL CHECK (is_json(filters)),
356 CONSTRAINT cfdfs_name_once_per_lib UNIQUE (name, owning_lib)
360 SELECT evergreen.upgrade_deps_block_check('0753', :eg_version);
362 CREATE OR REPLACE FUNCTION url_verify.parse_url (url_in TEXT) RETURNS url_verify.url AS $$
367 my $url = Rose::URI->new($url_in);
369 my %parts = map { $_ => $url->$_ } qw/scheme username password host port path query fragment/;
371 $parts{full_url} = $url_in;
372 ($parts{domain} = $parts{host}) =~ s/^[^.]+\.//;
373 ($parts{tld} = $parts{domain}) =~ s/(?:[^.]+\.)+//;
374 ($parts{page} = $parts{path}) =~ s#(?:[^/]*/)+##;
380 CREATE OR REPLACE FUNCTION url_verify.ingest_url () RETURNS TRIGGER AS $$
382 tmp_row url_verify.url%ROWTYPE;
384 SELECT * INTO tmp_row FROM url_verify.parse_url(NEW.full_url);
386 NEW.scheme := tmp_row.scheme;
387 NEW.username := tmp_row.username;
388 NEW.password := tmp_row.password;
389 NEW.host := tmp_row.host;
390 NEW.domain := tmp_row.domain;
391 NEW.tld := tmp_row.tld;
392 NEW.port := tmp_row.port;
393 NEW.path := tmp_row.path;
394 NEW.page := tmp_row.page;
395 NEW.query := tmp_row.query;
396 NEW.fragment := tmp_row.fragment;
402 CREATE TRIGGER ingest_url_tgr
403 BEFORE INSERT ON url_verify.url
404 FOR EACH ROW EXECUTE PROCEDURE url_verify.ingest_url();
406 CREATE OR REPLACE FUNCTION url_verify.extract_urls ( session_id INT, item_id INT ) RETURNS INT AS $$
414 current_selector url_verify.url_selector%ROWTYPE;
418 FOR current_selector IN SELECT * FROM url_verify.url_selector s WHERE s.session = session_id LOOP
419 current_url_pos := 1;
421 SELECT (XPATH(current_selector.xpath || '/text()', b.marc::XML))[current_url_pos]::TEXT INTO current_url
422 FROM biblio.record_entry b
423 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
424 WHERE c.id = item_id;
426 EXIT WHEN current_url IS NULL;
428 SELECT (XPATH(current_selector.xpath || '/../@tag', b.marc::XML))[current_url_pos]::TEXT INTO current_tag
429 FROM biblio.record_entry b
430 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
431 WHERE c.id = item_id;
433 IF current_tag IS NULL THEN
434 current_tag := last_seen_tag;
436 last_seen_tag := current_tag;
439 SELECT (XPATH(current_selector.xpath || '/@code', b.marc::XML))[current_url_pos]::TEXT INTO current_sf
440 FROM biblio.record_entry b
441 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
442 WHERE c.id = item_id;
444 INSERT INTO url_verify.url (session, item, url_selector, tag, subfield, ord, full_url)
445 VALUES ( session_id, item_id, current_selector.id, current_tag, current_sf, current_ord, current_url);
447 current_url_pos := current_url_pos + 1;
448 current_ord := current_ord + 1;
452 RETURN current_ord - 1;
458 -- NOTE: beware the use of bare perm IDs in the update_perm's below and in
459 -- the 950 seed data file. Update before merge to match current perm IDs! XXX
462 SELECT evergreen.upgrade_deps_block_check('0754', :eg_version);
464 INSERT INTO permission.perm_list (id, code, description)
470 'Allows a user to process and verify ULSs',
477 INSERT INTO permission.perm_list (id, code, description)
480 'URL_VERIFY_UPDATE_SETTINGS',
483 'Allows a user to configure URL verification org unit settings',
490 INSERT INTO permission.perm_list (id, code, description)
493 'SAVED_FILTER_DIALOG_FILTERS',
496 'Allows users to save and load sets of filters for filter dialogs, available in certain staff interfaces',
503 INSERT INTO config.settings_group (name, label)
514 INSERT INTO config.org_unit_setting_type
515 (name, grp, label, description, datatype, update_perm)
517 'url_verify.url_verification_delay',
520 'url_verify.url_verification_delay',
521 'Number of seconds to wait between URL test attempts.',
526 'url_verify.url_verification_delay',
527 'Throttling mechanism for batch URL verification runs. Each running process will wait this number of seconds after a URL test before performing the next.',
535 INSERT INTO config.org_unit_setting_type
536 (name, grp, label, description, datatype, update_perm)
538 'url_verify.url_verification_max_redirects',
541 'url_verify.url_verification_max_redirects',
542 'Maximum redirect lookups',
547 'url_verify.url_verification_max_redirects',
548 'For URLs returning 3XX redirects, this is the maximum number of redirects we will follow before giving up.',
556 INSERT INTO config.org_unit_setting_type
557 (name, grp, label, description, datatype, update_perm)
559 'url_verify.url_verification_max_wait',
562 'url_verify.url_verification_max_wait',
563 'Maximum wait time (in seconds) for a URL to lookup',
568 'url_verify.url_verification_max_wait',
569 'If we exceed the wait time, the URL is marked as a "timeout" and the system moves on to the next URL',
578 INSERT INTO config.org_unit_setting_type
579 (name, grp, label, description, datatype, update_perm)
581 'url_verify.verification_batch_size',
584 'url_verify.verification_batch_size',
585 'Number of URLs to test in parallel',
590 'url_verify.verification_batch_size',
591 'URLs are tested in batches. This number defines the size of each batch and it directly relates to the number of back-end processes performing URL verification.',
600 INSERT INTO config.filter_dialog_interface (key, description) VALUES (
604 'All Link Checker filter dialogs',
611 INSERT INTO config.usr_setting_type (name,grp,opac_visible,label,description,datatype) VALUES (
612 'ui.grid_columns.url_verify.select_urls',
616 'ui.grid_columns.url_verify.select_urls',
617 'Link Checker''s URL Selection interface''s saved columns',
622 'ui.grid_columns.url_verify.select_urls',
623 'Link Checker''s URL Selection interface''s saved columns',
630 INSERT INTO config.usr_setting_type (name,grp,opac_visible,label,description,datatype) VALUES (
631 'ui.grid_columns.url_verify.review_attempt',
635 'ui.grid_columns.url_verify.review_attempt',
636 'Link Checker''s Review Attempt interface''s saved columns',
641 'ui.grid_columns.url_verify.review_attempt',
642 'Link Checker''s Review Attempt interface''s saved columns',
652 SELECT evergreen.upgrade_deps_block_check('0755', :eg_version);
654 INSERT INTO config.org_unit_setting_type
655 (name, label, description, grp, datatype, fm_class) VALUES
657 'acq.upload.default.create_po',
659 'acq.upload.default.create_po',
665 'acq.upload.default.create_po',
666 'Create a purchase order by default during ACQ file upload',
674 'acq.upload.default.activate_po',
676 'acq.upload.default.activate_po',
677 'Upload Activate PO',
682 'acq.upload.default.activate_po',
683 'Activate the purchase order by default during ACQ file upload',
691 'acq.upload.default.provider',
693 'acq.upload.default.provider',
694 'Upload Default Provider',
699 'acq.upload.default.provider',
700 'Default provider to use during ACQ file upload',
708 'acq.upload.default.vandelay.match_set',
710 'acq.upload.default.vandelay.match_set',
711 'Upload Default Match Set',
716 'acq.upload.default.vandelay.match_set',
717 'Default match set to use during ACQ file upload',
725 'acq.upload.default.vandelay.merge_profile',
727 'acq.upload.default.vandelay.merge_profile',
728 'Upload Default Merge Profile',
733 'acq.upload.default.vandelay.merge_profile',
734 'Default merge profile to use during ACQ file upload',
742 'acq.upload.default.vandelay.import_non_matching',
744 'acq.upload.default.vandelay.import_non_matching',
745 'Upload Import Non Matching by Default',
750 'acq.upload.default.vandelay.import_non_matching',
751 'Import non-matching records by default during ACQ file upload',
759 'acq.upload.default.vandelay.merge_on_exact',
761 'acq.upload.default.vandelay.merge_on_exact',
762 'Upload Merge on Exact Match by Default',
767 'acq.upload.default.vandelay.merge_on_exact',
768 'Merge records on exact match by default during ACQ file upload',
776 'acq.upload.default.vandelay.merge_on_best',
778 'acq.upload.default.vandelay.merge_on_best',
779 'Upload Merge on Best Match by Default',
784 'acq.upload.default.vandelay.merge_on_best',
785 'Merge records on best match by default during ACQ file upload',
793 'acq.upload.default.vandelay.merge_on_single',
795 'acq.upload.default.vandelay.merge_on_single',
796 'Upload Merge on Single Match by Default',
801 'acq.upload.default.vandelay.merge_on_single',
802 'Merge records on single match by default during ACQ file upload',
810 'acq.upload.default.vandelay.quality_ratio',
812 'acq.upload.default.vandelay.quality_ratio',
813 'Upload Default Min. Quality Ratio',
818 'acq.upload.default.vandelay.quality_ratio',
819 'Default minimum quality ratio used during ACQ file upload',
827 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
829 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
830 'Upload Default Insufficient Quality Fall-Thru Profile',
835 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
836 'Default low-quality fall through profile used during ACQ file upload',
844 'acq.upload.default.vandelay.load_item_for_imported',
846 'acq.upload.default.vandelay.load_item_for_imported',
847 'Upload Load Items for Imported Records by Default',
852 'acq.upload.default.vandelay.load_item_for_imported',
853 'Load items for imported records by default during ACQ file upload',
863 SELECT evergreen.upgrade_deps_block_check('0756', :eg_version);
865 -- Drop some lingering old functions in search schema
866 DROP FUNCTION IF EXISTS search.staged_fts(INT,INT,TEXT,INT[],INT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT,TEXT,TEXT,TEXT[],TEXT,REAL,TEXT,BOOL,BOOL,BOOL,INT,INT,INT);
867 DROP FUNCTION IF EXISTS search.parse_search_args(TEXT);
868 DROP FUNCTION IF EXISTS search.explode_array(ANYARRAY);
869 DROP FUNCTION IF EXISTS search.pick_table(TEXT);
871 -- Now drop query_parser_fts and related
872 DROP FUNCTION IF EXISTS search.query_parser_fts(INT,INT,TEXT,INT[],INT[],INT,INT,INT,BOOL,BOOL,INT);
873 DROP TYPE IF EXISTS search.search_result;
874 DROP TYPE IF EXISTS search.search_args;
877 SELECT evergreen.upgrade_deps_block_check('0757', :eg_version);
879 SET search_path = public, pg_catalog;
885 FOR lang IN SELECT substring(pptsd.dictname from '(.*)_stem$') AS lang FROM pg_catalog.pg_ts_dict pptsd JOIN pg_catalog.pg_namespace ppn ON ppn.oid = pptsd.dictnamespace
886 WHERE ppn.nspname = 'pg_catalog' AND pptsd.dictname LIKE '%_stem' LOOP
887 RAISE NOTICE 'FOUND LANGUAGE %', lang;
889 EXECUTE 'DROP TEXT SEARCH DICTIONARY IF EXISTS ' || lang || '_nostop CASCADE;
890 CREATE TEXT SEARCH DICTIONARY ' || lang || '_nostop (TEMPLATE=pg_catalog.snowball, language=''' || lang || ''');
891 COMMENT ON TEXT SEARCH DICTIONARY ' || lang || '_nostop IS ''' ||lang || ' snowball stemmer with no stopwords for ASCII words only.'';
892 CREATE TEXT SEARCH CONFIGURATION ' || lang || '_nostop ( COPY = pg_catalog.' || lang || ' );
893 ALTER TEXT SEARCH CONFIGURATION ' || lang || '_nostop ALTER MAPPING FOR word, hword, hword_part WITH pg_catalog.simple;
894 ALTER TEXT SEARCH CONFIGURATION ' || lang || '_nostop ALTER MAPPING FOR asciiword, asciihword, hword_asciipart WITH ' || lang || '_nostop;';
899 CREATE TEXT SEARCH CONFIGURATION keyword ( COPY = english_nostop );
900 CREATE TEXT SEARCH CONFIGURATION "default" ( COPY = english_nostop );
902 SET search_path = evergreen, public, pg_catalog;
904 ALTER TABLE config.metabib_class
905 ADD COLUMN a_weight NUMERIC DEFAULT 1.0 NOT NULL,
906 ADD COLUMN b_weight NUMERIC DEFAULT 0.4 NOT NULL,
907 ADD COLUMN c_weight NUMERIC DEFAULT 0.2 NOT NULL,
908 ADD COLUMN d_weight NUMERIC DEFAULT 0.1 NOT NULL;
910 CREATE TABLE config.ts_config_list (
914 COMMENT ON TABLE config.ts_config_list IS $$
917 A list of full text configs with names and descriptions.
920 CREATE TABLE config.metabib_class_ts_map (
921 id SERIAL PRIMARY KEY,
922 field_class TEXT NOT NULL REFERENCES config.metabib_class (name),
923 ts_config TEXT NOT NULL REFERENCES config.ts_config_list (id),
924 active BOOL NOT NULL DEFAULT TRUE,
925 index_weight CHAR(1) NOT NULL DEFAULT 'C' CHECK (index_weight IN ('A','B','C','D')),
926 index_lang TEXT NULL,
927 search_lang TEXT NULL,
928 always BOOL NOT NULL DEFAULT true
930 COMMENT ON TABLE config.metabib_class_ts_map IS $$
931 Text Search Configs for metabib class indexing
933 This table contains text search config definitions for
934 storing index_vector values.
937 CREATE TABLE config.metabib_field_ts_map (
938 id SERIAL PRIMARY KEY,
939 metabib_field INT NOT NULL REFERENCES config.metabib_field (id),
940 ts_config TEXT NOT NULL REFERENCES config.ts_config_list (id),
941 active BOOL NOT NULL DEFAULT TRUE,
942 index_weight CHAR(1) NOT NULL DEFAULT 'C' CHECK (index_weight IN ('A','B','C','D')),
943 index_lang TEXT NULL,
944 search_lang TEXT NULL
946 COMMENT ON TABLE config.metabib_field_ts_map IS $$
947 Text Search Configs for metabib field indexing
949 This table contains text search config definitions for
950 storing index_vector values.
953 CREATE TABLE metabib.combined_identifier_field_entry (
954 record BIGINT NOT NULL,
955 metabib_field INT NULL,
956 index_vector tsvector NOT NULL
958 CREATE UNIQUE INDEX metabib_combined_identifier_field_entry_fakepk_idx ON metabib.combined_identifier_field_entry (record, COALESCE(metabib_field::TEXT,''));
959 CREATE INDEX metabib_combined_identifier_field_entry_index_vector_idx ON metabib.combined_identifier_field_entry USING GIST (index_vector);
960 CREATE INDEX metabib_combined_identifier_field_source_idx ON metabib.combined_identifier_field_entry (metabib_field);
962 CREATE TABLE metabib.combined_title_field_entry (
963 record BIGINT NOT NULL,
964 metabib_field INT NULL,
965 index_vector tsvector NOT NULL
967 CREATE UNIQUE INDEX metabib_combined_title_field_entry_fakepk_idx ON metabib.combined_title_field_entry (record, COALESCE(metabib_field::TEXT,''));
968 CREATE INDEX metabib_combined_title_field_entry_index_vector_idx ON metabib.combined_title_field_entry USING GIST (index_vector);
969 CREATE INDEX metabib_combined_title_field_source_idx ON metabib.combined_title_field_entry (metabib_field);
971 CREATE TABLE metabib.combined_author_field_entry (
972 record BIGINT NOT NULL,
973 metabib_field INT NULL,
974 index_vector tsvector NOT NULL
976 CREATE UNIQUE INDEX metabib_combined_author_field_entry_fakepk_idx ON metabib.combined_author_field_entry (record, COALESCE(metabib_field::TEXT,''));
977 CREATE INDEX metabib_combined_author_field_entry_index_vector_idx ON metabib.combined_author_field_entry USING GIST (index_vector);
978 CREATE INDEX metabib_combined_author_field_source_idx ON metabib.combined_author_field_entry (metabib_field);
980 CREATE TABLE metabib.combined_subject_field_entry (
981 record BIGINT NOT NULL,
982 metabib_field INT NULL,
983 index_vector tsvector NOT NULL
985 CREATE UNIQUE INDEX metabib_combined_subject_field_entry_fakepk_idx ON metabib.combined_subject_field_entry (record, COALESCE(metabib_field::TEXT,''));
986 CREATE INDEX metabib_combined_subject_field_entry_index_vector_idx ON metabib.combined_subject_field_entry USING GIST (index_vector);
987 CREATE INDEX metabib_combined_subject_field_source_idx ON metabib.combined_subject_field_entry (metabib_field);
989 CREATE TABLE metabib.combined_keyword_field_entry (
990 record BIGINT NOT NULL,
991 metabib_field INT NULL,
992 index_vector tsvector NOT NULL
994 CREATE UNIQUE INDEX metabib_combined_keyword_field_entry_fakepk_idx ON metabib.combined_keyword_field_entry (record, COALESCE(metabib_field::TEXT,''));
995 CREATE INDEX metabib_combined_keyword_field_entry_index_vector_idx ON metabib.combined_keyword_field_entry USING GIST (index_vector);
996 CREATE INDEX metabib_combined_keyword_field_source_idx ON metabib.combined_keyword_field_entry (metabib_field);
998 CREATE TABLE metabib.combined_series_field_entry (
999 record BIGINT NOT NULL,
1000 metabib_field INT NULL,
1001 index_vector tsvector NOT NULL
1003 CREATE UNIQUE INDEX metabib_combined_series_field_entry_fakepk_idx ON metabib.combined_series_field_entry (record, COALESCE(metabib_field::TEXT,''));
1004 CREATE INDEX metabib_combined_series_field_entry_index_vector_idx ON metabib.combined_series_field_entry USING GIST (index_vector);
1005 CREATE INDEX metabib_combined_series_field_source_idx ON metabib.combined_series_field_entry (metabib_field);
1007 CREATE OR REPLACE FUNCTION metabib.update_combined_index_vectors(bib_id BIGINT) RETURNS VOID AS $func$
1009 DELETE FROM metabib.combined_keyword_field_entry WHERE record = bib_id;
1010 INSERT INTO metabib.combined_keyword_field_entry(record, metabib_field, index_vector)
1011 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1012 FROM metabib.keyword_field_entry WHERE source = bib_id GROUP BY field;
1013 INSERT INTO metabib.combined_keyword_field_entry(record, metabib_field, index_vector)
1014 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1015 FROM metabib.keyword_field_entry WHERE source = bib_id;
1017 DELETE FROM metabib.combined_title_field_entry WHERE record = bib_id;
1018 INSERT INTO metabib.combined_title_field_entry(record, metabib_field, index_vector)
1019 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1020 FROM metabib.title_field_entry WHERE source = bib_id GROUP BY field;
1021 INSERT INTO metabib.combined_title_field_entry(record, metabib_field, index_vector)
1022 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1023 FROM metabib.title_field_entry WHERE source = bib_id;
1025 DELETE FROM metabib.combined_author_field_entry WHERE record = bib_id;
1026 INSERT INTO metabib.combined_author_field_entry(record, metabib_field, index_vector)
1027 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1028 FROM metabib.author_field_entry WHERE source = bib_id GROUP BY field;
1029 INSERT INTO metabib.combined_author_field_entry(record, metabib_field, index_vector)
1030 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1031 FROM metabib.author_field_entry WHERE source = bib_id;
1033 DELETE FROM metabib.combined_subject_field_entry WHERE record = bib_id;
1034 INSERT INTO metabib.combined_subject_field_entry(record, metabib_field, index_vector)
1035 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1036 FROM metabib.subject_field_entry WHERE source = bib_id GROUP BY field;
1037 INSERT INTO metabib.combined_subject_field_entry(record, metabib_field, index_vector)
1038 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1039 FROM metabib.subject_field_entry WHERE source = bib_id;
1041 DELETE FROM metabib.combined_series_field_entry WHERE record = bib_id;
1042 INSERT INTO metabib.combined_series_field_entry(record, metabib_field, index_vector)
1043 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1044 FROM metabib.series_field_entry WHERE source = bib_id GROUP BY field;
1045 INSERT INTO metabib.combined_series_field_entry(record, metabib_field, index_vector)
1046 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1047 FROM metabib.series_field_entry WHERE source = bib_id;
1049 DELETE FROM metabib.combined_identifier_field_entry WHERE record = bib_id;
1050 INSERT INTO metabib.combined_identifier_field_entry(record, metabib_field, index_vector)
1051 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1052 FROM metabib.identifier_field_entry WHERE source = bib_id GROUP BY field;
1053 INSERT INTO metabib.combined_identifier_field_entry(record, metabib_field, index_vector)
1054 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1055 FROM metabib.identifier_field_entry WHERE source = bib_id;
1058 $func$ LANGUAGE PLPGSQL;
1060 CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
1063 ind_data metabib.field_entry_template%ROWTYPE;
1064 mbe_row metabib.browse_entry%ROWTYPE;
1067 PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
1069 IF NOT skip_search THEN
1070 FOR fclass IN SELECT * FROM config.metabib_class LOOP
1071 -- RAISE NOTICE 'Emptying out %', fclass.name;
1072 EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
1075 IF NOT skip_facet THEN
1076 DELETE FROM metabib.facet_entry WHERE source = bib_id;
1078 IF NOT skip_browse THEN
1079 DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
1083 FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
1084 IF ind_data.field < 0 THEN
1085 ind_data.field = -1 * ind_data.field;
1088 IF ind_data.facet_field AND NOT skip_facet THEN
1089 INSERT INTO metabib.facet_entry (field, source, value)
1090 VALUES (ind_data.field, ind_data.source, ind_data.value);
1093 IF ind_data.browse_field AND NOT skip_browse THEN
1094 -- A caveat about this SELECT: this should take care of replacing
1095 -- old mbe rows when data changes, but not if normalization (by
1096 -- which I mean specifically the output of
1097 -- evergreen.oils_tsearch2()) changes. It may or may not be
1098 -- expensive to add a comparison of index_vector to index_vector
1099 -- to the WHERE clause below.
1100 SELECT INTO mbe_row * FROM metabib.browse_entry WHERE value = ind_data.value;
1102 mbe_id := mbe_row.id;
1104 INSERT INTO metabib.browse_entry (value) VALUES
1105 (metabib.browse_normalize(ind_data.value, ind_data.field));
1106 mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
1109 INSERT INTO metabib.browse_entry_def_map (entry, def, source)
1110 VALUES (mbe_id, ind_data.field, ind_data.source);
1113 IF ind_data.search_field AND NOT skip_search THEN
1115 INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
1117 quote_literal(ind_data.field) || $$, $$ ||
1118 quote_literal(ind_data.source) || $$, $$ ||
1119 quote_literal(ind_data.value) ||
1125 IF NOT skip_search THEN
1126 PERFORM metabib.update_combined_index_vectors(bib_id);
1131 $func$ LANGUAGE PLPGSQL;
1133 DROP FUNCTION IF EXISTS evergreen.oils_tsearch2() CASCADE;
1134 DROP FUNCTION IF EXISTS public.oils_tsearch2() CASCADE;
1136 CREATE OR REPLACE FUNCTION public.oils_tsearch2 () RETURNS TRIGGER AS $$
1140 temp_vector TEXT := '';
1145 NEW.index_vector = ''::tsvector;
1147 IF TG_TABLE_NAME::TEXT ~ 'field_entry$' THEN
1149 SELECT n.func AS func,
1150 n.param_count AS param_count,
1152 FROM config.index_normalizer n
1153 JOIN config.metabib_field_index_norm_map m ON (m.norm = n.id)
1154 WHERE field = NEW.field
1156 EXECUTE 'SELECT ' || normalizer.func || '(' ||
1157 quote_literal( value ) ||
1159 WHEN normalizer.param_count > 0
1160 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
1169 IF TG_TABLE_NAME::TEXT ~ 'browse_entry$' THEN
1170 value := ARRAY_TO_STRING(
1171 evergreen.regexp_split_to_array(value, E'\\W+'), ' '
1173 value := public.search_normalize(value);
1174 NEW.index_vector = to_tsvector(TG_ARGV[0]::regconfig, value);
1175 ELSIF TG_TABLE_NAME::TEXT ~ 'field_entry$' THEN
1177 SELECT ts_config, index_weight
1178 FROM config.metabib_class_ts_map
1179 WHERE field_class = TG_ARGV[0]
1180 AND index_lang IS NULL OR EXISTS (SELECT 1 FROM metabib.record_attr WHERE id = NEW.source AND index_lang IN(attrs->'item_lang',attrs->'language'))
1181 AND always OR NOT EXISTS (SELECT 1 FROM config.metabib_field_ts_map WHERE metabib_field = NEW.field)
1183 SELECT ts_config, index_weight
1184 FROM config.metabib_field_ts_map
1185 WHERE metabib_field = NEW.field
1186 AND index_lang IS NULL OR EXISTS (SELECT 1 FROM metabib.record_attr WHERE id = NEW.source AND index_lang IN(attrs->'item_lang',attrs->'language'))
1187 ORDER BY index_weight ASC
1189 IF cur_weight IS NOT NULL AND cur_weight != ts_rec.index_weight THEN
1190 NEW.index_vector = NEW.index_vector || setweight(temp_vector::tsvector,cur_weight);
1193 cur_weight = ts_rec.index_weight;
1194 SELECT INTO temp_vector temp_vector || ' ' || to_tsvector(ts_rec.ts_config::regconfig, value)::TEXT;
1196 NEW.index_vector = NEW.index_vector || setweight(temp_vector::tsvector,cur_weight);
1198 NEW.index_vector = to_tsvector(TG_ARGV[0]::regconfig, value);
1203 $$ LANGUAGE PLPGSQL;
1205 CREATE TRIGGER authority_full_rec_fti_trigger
1206 BEFORE UPDATE OR INSERT ON authority.full_rec
1207 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1209 CREATE TRIGGER authority_simple_heading_fti_trigger
1210 BEFORE UPDATE OR INSERT ON authority.simple_heading
1211 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1213 CREATE TRIGGER metabib_identifier_field_entry_fti_trigger
1214 BEFORE UPDATE OR INSERT ON metabib.identifier_field_entry
1215 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('identifier');
1217 CREATE TRIGGER metabib_title_field_entry_fti_trigger
1218 BEFORE UPDATE OR INSERT ON metabib.title_field_entry
1219 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('title');
1221 CREATE TRIGGER metabib_author_field_entry_fti_trigger
1222 BEFORE UPDATE OR INSERT ON metabib.author_field_entry
1223 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('author');
1225 CREATE TRIGGER metabib_subject_field_entry_fti_trigger
1226 BEFORE UPDATE OR INSERT ON metabib.subject_field_entry
1227 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('subject');
1229 CREATE TRIGGER metabib_keyword_field_entry_fti_trigger
1230 BEFORE UPDATE OR INSERT ON metabib.keyword_field_entry
1231 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1233 CREATE TRIGGER metabib_series_field_entry_fti_trigger
1234 BEFORE UPDATE OR INSERT ON metabib.series_field_entry
1235 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('series');
1237 CREATE TRIGGER metabib_browse_entry_fti_trigger
1238 BEFORE INSERT OR UPDATE ON metabib.browse_entry
1239 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1241 CREATE TRIGGER metabib_full_rec_fti_trigger
1242 BEFORE UPDATE OR INSERT ON metabib.real_full_rec
1243 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('default');
1245 INSERT INTO config.ts_config_list(id, name) VALUES
1246 ('simple','Non-Stemmed Simple'),
1247 ('danish_nostop','Danish Stemmed'),
1248 ('dutch_nostop','Dutch Stemmed'),
1249 ('english_nostop','English Stemmed'),
1250 ('finnish_nostop','Finnish Stemmed'),
1251 ('french_nostop','French Stemmed'),
1252 ('german_nostop','German Stemmed'),
1253 ('hungarian_nostop','Hungarian Stemmed'),
1254 ('italian_nostop','Italian Stemmed'),
1255 ('norwegian_nostop','Norwegian Stemmed'),
1256 ('portuguese_nostop','Portuguese Stemmed'),
1257 ('romanian_nostop','Romanian Stemmed'),
1258 ('russian_nostop','Russian Stemmed'),
1259 ('spanish_nostop','Spanish Stemmed'),
1260 ('swedish_nostop','Swedish Stemmed'),
1261 ('turkish_nostop','Turkish Stemmed');
1263 INSERT INTO config.metabib_class_ts_map(field_class, ts_config, index_weight, always) VALUES
1264 ('keyword','simple','A',true),
1265 ('keyword','english_nostop','C',true),
1266 ('title','simple','A',true),
1267 ('title','english_nostop','C',true),
1268 ('author','simple','A',true),
1269 ('author','english_nostop','C',true),
1270 ('series','simple','A',true),
1271 ('series','english_nostop','C',true),
1272 ('subject','simple','A',true),
1273 ('subject','english_nostop','C',true),
1274 ('identifier','simple','A',true);
1276 CREATE OR REPLACE FUNCTION evergreen.rel_bump(terms TEXT[], value TEXT, bumps TEXT[], mults NUMERIC[]) RETURNS NUMERIC AS
1279 my ($terms,$value,$bumps,$mults) = @_;
1283 for (my $id = 0; $id < @$bumps; $id++) {
1284 if ($bumps->[$id] eq 'first_word') {
1285 $retval *= $mults->[$id] if ($value =~ /^$terms->[0]/);
1286 } elsif ($bumps->[$id] eq 'full_match') {
1287 my $fullmatch = join(' ', @$terms);
1288 $retval *= $mults->[$id] if ($value =~ /^$fullmatch$/);
1289 } elsif ($bumps->[$id] eq 'word_order') {
1290 my $wordorder = join('.*', @$terms);
1291 $retval *= $mults->[$id] if ($value =~ /$wordorder/);
1295 $BODY$ LANGUAGE plperlu IMMUTABLE STRICT COST 100;
1297 /* ** This happens in the supplemental script **
1299 UPDATE metabib.identifier_field_entry set value = value;
1300 UPDATE metabib.title_field_entry set value = value;
1301 UPDATE metabib.author_field_entry set value = value;
1302 UPDATE metabib.subject_field_entry set value = value;
1303 UPDATE metabib.keyword_field_entry set value = value;
1304 UPDATE metabib.series_field_entry set value = value;
1306 SELECT metabib.update_combined_index_vectors(id)
1307 FROM biblio.record_entry
1312 SELECT evergreen.upgrade_deps_block_check('0758', :eg_version);
1314 INSERT INTO config.settings_group (name, label) VALUES
1315 ('vandelay', 'Vandelay');
1317 INSERT INTO config.org_unit_setting_type (name, grp, label, datatype, fm_class) VALUES
1318 ('vandelay.default_match_set', 'vandelay', 'Default Record Match Set', 'link', 'vms');
1321 SELECT evergreen.upgrade_deps_block_check('0759', :eg_version);
1323 CREATE TABLE actor.org_unit_proximity_adjustment (
1324 id SERIAL PRIMARY KEY,
1325 item_circ_lib INT REFERENCES actor.org_unit (id),
1326 item_owning_lib INT REFERENCES actor.org_unit (id),
1327 copy_location INT REFERENCES asset.copy_location (id),
1328 hold_pickup_lib INT REFERENCES actor.org_unit (id),
1329 hold_request_lib INT REFERENCES actor.org_unit (id),
1330 pos INT NOT NULL DEFAULT 0,
1331 absolute_adjustment BOOL NOT NULL DEFAULT FALSE,
1332 prox_adjustment NUMERIC,
1333 circ_mod TEXT, -- REFERENCES config.circ_modifier (code),
1334 CONSTRAINT prox_adj_criterium CHECK (COALESCE(item_circ_lib::TEXT,item_owning_lib::TEXT,copy_location::TEXT,hold_pickup_lib::TEXT,hold_request_lib::TEXT,circ_mod) IS NOT NULL)
1336 CREATE UNIQUE INDEX prox_adj_once_idx ON actor.org_unit_proximity_adjustment (item_circ_lib,item_owning_lib,copy_location,hold_pickup_lib,hold_request_lib,circ_mod);
1337 CREATE INDEX prox_adj_circ_lib_idx ON actor.org_unit_proximity_adjustment (item_circ_lib);
1338 CREATE INDEX prox_adj_owning_lib_idx ON actor.org_unit_proximity_adjustment (item_owning_lib);
1339 CREATE INDEX prox_adj_copy_location_idx ON actor.org_unit_proximity_adjustment (copy_location);
1340 CREATE INDEX prox_adj_pickup_lib_idx ON actor.org_unit_proximity_adjustment (hold_pickup_lib);
1341 CREATE INDEX prox_adj_request_lib_idx ON actor.org_unit_proximity_adjustment (hold_request_lib);
1342 CREATE INDEX prox_adj_circ_mod_idx ON actor.org_unit_proximity_adjustment (circ_mod);
1344 CREATE OR REPLACE FUNCTION actor.org_unit_ancestors_distance( INT ) RETURNS TABLE (id INT, distance INT) AS $$
1345 WITH RECURSIVE org_unit_ancestors_distance(id, distance) AS (
1348 SELECT ou.parent_ou, ouad.distance+1
1349 FROM actor.org_unit ou JOIN org_unit_ancestors_distance ouad ON (ou.id = ouad.id)
1350 WHERE ou.parent_ou IS NOT NULL
1352 SELECT * FROM org_unit_ancestors_distance;
1353 $$ LANGUAGE SQL STABLE ROWS 1;
1355 CREATE OR REPLACE FUNCTION action.hold_copy_calculated_proximity(
1358 copy_context_ou INT DEFAULT NULL
1359 -- TODO maybe? hold_context_ou INT DEFAULT NULL. This would optionally
1360 -- support an "ahprox" measurement: adjust prox between copy circ lib and
1361 -- hold request lib, but I'm unsure whether to use this theoretical
1362 -- argument only in the baseline calculation or later in the other
1363 -- queries in this function.
1364 ) RETURNS NUMERIC AS $f$
1366 aoupa actor.org_unit_proximity_adjustment%ROWTYPE;
1367 ahr action.hold_request%ROWTYPE;
1368 acp asset.copy%ROWTYPE;
1369 acn asset.call_number%ROWTYPE;
1370 acl asset.copy_location%ROWTYPE;
1371 baseline_prox NUMERIC;
1381 SELECT * INTO ahr FROM action.hold_request WHERE id = ahr_id;
1382 SELECT * INTO acp FROM asset.copy WHERE id = acp_id;
1383 SELECT * INTO acn FROM asset.call_number WHERE id = acp.call_number;
1384 SELECT * INTO acl FROM asset.copy_location WHERE id = acp.location;
1386 IF copy_context_ou IS NULL THEN
1387 copy_context_ou := acp.circ_lib;
1390 -- First, gather the baseline proximity of "here" to pickup lib
1391 SELECT prox INTO baseline_prox FROM actor.org_unit_proximity WHERE from_org = copy_context_ou AND to_org = ahr.pickup_lib;
1393 -- Find any absolute adjustments, and set the baseline prox to that
1394 SELECT adj.* INTO aoupa
1395 FROM actor.org_unit_proximity_adjustment adj
1396 LEFT JOIN actor.org_unit_ancestors_distance(copy_context_ou) acp_cl ON (acp_cl.id = adj.item_circ_lib)
1397 LEFT JOIN actor.org_unit_ancestors_distance(acn.owning_lib) acn_ol ON (acn_ol.id = adj.item_owning_lib)
1398 LEFT JOIN actor.org_unit_ancestors_distance(acl.owning_lib) acl_ol ON (acn_ol.id = adj.copy_location)
1399 LEFT JOIN actor.org_unit_ancestors_distance(ahr.pickup_lib) ahr_pl ON (ahr_pl.id = adj.hold_pickup_lib)
1400 LEFT JOIN actor.org_unit_ancestors_distance(ahr.request_lib) ahr_rl ON (ahr_rl.id = adj.hold_request_lib)
1401 WHERE (adj.circ_mod IS NULL OR adj.circ_mod = acp.circ_modifier) AND
1402 absolute_adjustment AND
1403 COALESCE(acp_cl.id, acn_ol.id, acl_ol.id, ahr_pl.id, ahr_rl.id) IS NOT NULL
1405 COALESCE(acp_cl.distance,999)
1406 + COALESCE(acn_ol.distance,999)
1407 + COALESCE(acl_ol.distance,999)
1408 + COALESCE(ahr_pl.distance,999)
1409 + COALESCE(ahr_rl.distance,999),
1414 baseline_prox := aoupa.prox_adjustment;
1417 -- Now find any relative adjustments, and change the baseline prox based on them
1420 FROM actor.org_unit_proximity_adjustment adj
1421 LEFT JOIN actor.org_unit_ancestors_distance(copy_context_ou) acp_cl ON (acp_cl.id = adj.item_circ_lib)
1422 LEFT JOIN actor.org_unit_ancestors_distance(acn.owning_lib) acn_ol ON (acn_ol.id = adj.item_owning_lib)
1423 LEFT JOIN actor.org_unit_ancestors_distance(acl.owning_lib) acl_ol ON (acn_ol.id = adj.copy_location)
1424 LEFT JOIN actor.org_unit_ancestors_distance(ahr.pickup_lib) ahr_pl ON (ahr_pl.id = adj.hold_pickup_lib)
1425 LEFT JOIN actor.org_unit_ancestors_distance(ahr.request_lib) ahr_rl ON (ahr_rl.id = adj.hold_request_lib)
1426 WHERE (adj.circ_mod IS NULL OR adj.circ_mod = acp.circ_modifier) AND
1427 NOT absolute_adjustment AND
1428 COALESCE(acp_cl.id, acn_ol.id, acl_ol.id, ahr_pl.id, ahr_rl.id) IS NOT NULL
1430 baseline_prox := baseline_prox + aoupa.prox_adjustment;
1433 RETURN baseline_prox;
1435 $f$ LANGUAGE PLPGSQL;
1437 ALTER TABLE actor.org_unit_proximity_adjustment
1438 ADD CONSTRAINT actor_org_unit_proximity_adjustment_circ_mod_fkey
1439 FOREIGN KEY (circ_mod) REFERENCES config.circ_modifier (code)
1440 DEFERRABLE INITIALLY DEFERRED;
1442 ALTER TABLE action.hold_copy_map ADD COLUMN proximity NUMERIC;
1445 SELECT evergreen.upgrade_deps_block_check('0760', :eg_version);
1447 CREATE TABLE config.best_hold_order(
1448 id SERIAL PRIMARY KEY, -- (metadata)
1449 name TEXT UNIQUE, -- i18n (metadata)
1450 pprox INT, -- copy capture <-> pickup lib prox
1451 hprox INT, -- copy circ lib <-> request lib prox
1452 aprox INT, -- copy circ lib <-> pickup lib ADJUSTED prox on ahcm
1453 approx INT, -- copy capture <-> pickup lib ADJUSTED prox from function
1454 priority INT, -- group hold priority
1455 cut INT, -- cut-in-line
1456 depth INT, -- selection depth
1457 htime INT, -- time since last home-lib circ exceeds org-unit setting
1458 rtime INT, -- request time
1459 shtime INT -- time since copy last trip home exceeds org-unit setting
1462 -- At least one of these columns must contain a non-null value
1463 ALTER TABLE config.best_hold_order ADD CHECK ((
1464 pprox IS NOT NULL OR
1465 hprox IS NOT NULL OR
1466 aprox IS NOT NULL OR
1467 priority IS NOT NULL OR
1469 depth IS NOT NULL OR
1470 htime IS NOT NULL OR
1474 INSERT INTO config.best_hold_order (
1476 pprox, aprox, priority, cut, depth, rtime, htime, hprox
1479 1, 2, 3, 4, 5, 6, 7, 8
1482 INSERT INTO config.best_hold_order (
1484 hprox, pprox, aprox, priority, cut, depth, rtime, htime
1486 'Traditional with Holds-always-go-home',
1487 1, 2, 3, 4, 5, 6, 7, 8
1490 INSERT INTO config.best_hold_order (
1492 htime, hprox, pprox, aprox, priority, cut, depth, rtime
1494 'Traditional with Holds-go-home',
1495 1, 2, 3, 4, 5, 6, 7, 8
1498 INSERT INTO config.best_hold_order (
1500 priority, cut, rtime, depth, pprox, hprox, aprox, htime
1503 1, 2, 3, 4, 5, 6, 7, 8
1506 INSERT INTO config.best_hold_order (
1508 hprox, priority, cut, rtime, depth, pprox, aprox, htime
1510 'FIFO with Holds-always-go-home',
1511 1, 2, 3, 4, 5, 6, 7, 8
1514 INSERT INTO config.best_hold_order (
1516 htime, priority, cut, rtime, depth, pprox, aprox, hprox
1518 'FIFO with Holds-go-home',
1519 1, 2, 3, 4, 5, 6, 7, 8
1522 INSERT INTO permission.perm_list (
1523 id, code, description
1526 'ADMIN_HOLD_CAPTURE_SORT',
1529 'Allows a user to make changes to best-hold selection sort order',
1535 INSERT INTO config.org_unit_setting_type (
1536 name, label, description, datatype, fm_class, update_perm, grp
1538 'circ.hold_capture_order',
1540 'circ.hold_capture_order',
1541 'Best-hold selection sort order',
1546 'circ.hold_capture_order',
1547 'Defines the sort order of holds when selecting a hold to fill using a given copy at capture time',
1557 INSERT INTO config.org_unit_setting_type (
1558 name, label, description, datatype, update_perm, grp
1560 'circ.hold_go_home_interval',
1562 'circ.hold_go_home_interval',
1563 'Max foreign-circulation time',
1568 'circ.hold_go_home_interval',
1569 'Time a copy can spend circulating away from its circ lib before returning there to fill a hold (if one exists there)',
1578 INSERT INTO actor.org_unit_setting (
1579 org_unit, name, value
1581 (SELECT id FROM actor.org_unit WHERE parent_ou IS NULL),
1582 'circ.hold_go_home_interval',
1586 UPDATE actor.org_unit_setting SET
1587 name = 'circ.hold_capture_order',
1588 value = (SELECT id FROM config.best_hold_order WHERE name = 'FIFO')
1590 name = 'circ.holds_fifo' AND value ILIKE '%true%';
1593 SELECT evergreen.upgrade_deps_block_check('0762', :eg_version);
1595 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_browse_indexing');
1596 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_search_indexing');
1597 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_facet_indexing');
1599 CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
1602 ind_data metabib.field_entry_template%ROWTYPE;
1603 mbe_row metabib.browse_entry%ROWTYPE;
1610 SELECT COALESCE(NULLIF(skip_facet, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_facet_indexing' AND enabled)) INTO b_skip_facet;
1611 SELECT COALESCE(NULLIF(skip_browse, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_browse_indexing' AND enabled)) INTO b_skip_browse;
1612 SELECT COALESCE(NULLIF(skip_search, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_search_indexing' AND enabled)) INTO b_skip_search;
1614 PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
1616 IF NOT b_skip_search THEN
1617 FOR fclass IN SELECT * FROM config.metabib_class LOOP
1618 -- RAISE NOTICE 'Emptying out %', fclass.name;
1619 EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
1622 IF NOT b_skip_facet THEN
1623 DELETE FROM metabib.facet_entry WHERE source = bib_id;
1625 IF NOT b_skip_browse THEN
1626 DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
1630 FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
1631 IF ind_data.field < 0 THEN
1632 ind_data.field = -1 * ind_data.field;
1635 IF ind_data.facet_field AND NOT b_skip_facet THEN
1636 INSERT INTO metabib.facet_entry (field, source, value)
1637 VALUES (ind_data.field, ind_data.source, ind_data.value);
1640 IF ind_data.browse_field AND NOT b_skip_browse THEN
1641 -- A caveat about this SELECT: this should take care of replacing
1642 -- old mbe rows when data changes, but not if normalization (by
1643 -- which I mean specifically the output of
1644 -- evergreen.oils_tsearch2()) changes. It may or may not be
1645 -- expensive to add a comparison of index_vector to index_vector
1646 -- to the WHERE clause below.
1647 SELECT INTO mbe_row * FROM metabib.browse_entry WHERE value = ind_data.value;
1649 mbe_id := mbe_row.id;
1651 INSERT INTO metabib.browse_entry (value) VALUES
1652 (metabib.browse_normalize(ind_data.value, ind_data.field));
1653 mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
1656 INSERT INTO metabib.browse_entry_def_map (entry, def, source)
1657 VALUES (mbe_id, ind_data.field, ind_data.source);
1660 IF ind_data.search_field AND NOT b_skip_search THEN
1662 INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
1664 quote_literal(ind_data.field) || $$, $$ ||
1665 quote_literal(ind_data.source) || $$, $$ ||
1666 quote_literal(ind_data.value) ||
1672 IF NOT b_skip_search THEN
1673 PERFORM metabib.update_combined_index_vectors(bib_id);
1678 $func$ LANGUAGE PLPGSQL;
1681 SELECT evergreen.upgrade_deps_block_check('0763', :eg_version);
1683 INSERT INTO config.org_unit_setting_type (
1684 name, label, grp, datatype
1686 'circ.fines.truncate_to_max_fine',
1687 'Truncate fines to max fine amount',
1694 SELECT evergreen.upgrade_deps_block_check('0765', :eg_version);
1696 ALTER TABLE acq.provider
1697 ADD COLUMN default_copy_count INTEGER NOT NULL DEFAULT 0;
1700 SELECT evergreen.upgrade_deps_block_check('0768', :eg_version);
1702 CREATE OR REPLACE FUNCTION evergreen.rank_ou(lib INT, search_lib INT, pref_lib INT DEFAULT NULL)
1703 RETURNS INTEGER AS $$
1706 -- lib matches search_lib
1707 (SELECT CASE WHEN $1 = $2 THEN -20000 END),
1709 -- lib matches pref_lib
1710 (SELECT CASE WHEN $1 = $3 THEN -10000 END),
1713 -- pref_lib is a child of search_lib and lib is a child of pref lib.
1714 (SELECT distance - 5000
1715 FROM actor.org_unit_descendants_distance($3)
1716 WHERE id = $1 AND $3 IN (
1717 SELECT id FROM actor.org_unit_descendants($2))),
1719 -- lib is a child of search_lib
1720 (SELECT distance FROM actor.org_unit_descendants_distance($2) WHERE id = $1),
1722 -- all others pay cash
1725 $$ LANGUAGE SQL STABLE;
1730 SELECT evergreen.upgrade_deps_block_check('0769', :eg_version);
1732 DROP FUNCTION IF EXISTS
1733 evergreen.ranked_volumes(BIGINT, INT, INT, HSTORE, HSTORE, INT);
1735 CREATE OR REPLACE FUNCTION evergreen.ranked_volumes(
1738 depth INT DEFAULT NULL,
1739 slimit HSTORE DEFAULT NULL,
1740 soffset HSTORE DEFAULT NULL,
1741 pref_lib INT DEFAULT NULL,
1742 includes TEXT[] DEFAULT NULL::TEXT[]
1743 ) RETURNS TABLE (id BIGINT, name TEXT, label_sortkey TEXT, rank BIGINT) AS $$
1744 SELECT ua.id, ua.name, ua.label_sortkey, MIN(ua.rank) AS rank FROM (
1745 SELECT acn.id, aou.name, acn.label_sortkey,
1746 evergreen.rank_ou(aou.id, $2, $6), evergreen.rank_cp_status(acp.status),
1748 FROM asset.call_number acn
1749 JOIN asset.copy acp ON (acn.id = acp.call_number)
1750 JOIN actor.org_unit_descendants( $2, COALESCE(
1753 FROM actor.org_unit_type aout
1754 INNER JOIN actor.org_unit ou ON ou_type = aout.id
1757 ) AS aou ON (acp.circ_lib = aou.id)
1758 WHERE acn.record = $1
1759 AND acn.deleted IS FALSE
1760 AND acp.deleted IS FALSE
1761 AND CASE WHEN ('exclude_invisible_acn' = ANY($7)) THEN
1764 FROM asset.opac_visible_copies
1765 WHERE copy_id = acp.id AND record = acn.record
1767 GROUP BY acn.id, acp.status, aou.name, acn.label_sortkey, aou.id
1769 ORDER BY evergreen.rank_ou(aou.id, $2, $6), evergreen.rank_cp_status(acp.status)
1772 GROUP BY ua.id, ua.name, ua.label_sortkey
1773 ORDER BY rank, ua.name, ua.label_sortkey
1774 LIMIT ($4 -> 'acn')::INT
1775 OFFSET ($5 -> 'acn')::INT;
1777 LANGUAGE SQL STABLE;
1779 CREATE OR REPLACE FUNCTION unapi.holdings_xml (
1783 depth INT DEFAULT NULL,
1784 includes TEXT[] DEFAULT NULL::TEXT[],
1785 slimit HSTORE DEFAULT NULL,
1786 soffset HSTORE DEFAULT NULL,
1787 include_xmlns BOOL DEFAULT TRUE,
1788 pref_lib INT DEFAULT NULL
1794 CASE WHEN $8 THEN 'http://open-ils.org/spec/holdings/v1' ELSE NULL END AS xmlns,
1795 CASE WHEN ('bre' = ANY ($5)) THEN 'tag:open-ils.org:U2@bre/' || $1 || '/' || $3 ELSE NULL END AS id,
1796 (SELECT record_has_holdable_copy FROM asset.record_has_holdable_copy($1)) AS has_holdable
1800 (SELECT XMLAGG(XMLELEMENT::XML) FROM (
1803 XMLATTRIBUTES('public' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1805 FROM asset.opac_ou_record_copy_count($2, $1)
1809 XMLATTRIBUTES('staff' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1811 FROM asset.staff_ou_record_copy_count($2, $1)
1815 XMLATTRIBUTES('pref_lib' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1817 FROM asset.opac_ou_record_copy_count($9, $1)
1822 WHEN ('bmp' = ANY ($5)) THEN
1824 name monograph_parts,
1825 (SELECT XMLAGG(bmp) FROM (
1826 SELECT unapi.bmp( id, 'xml', 'monograph_part', evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'bre'), 'holdings_xml'), $3, $4, $6, $7, FALSE)
1827 FROM biblio.monograph_part
1835 (SELECT XMLAGG(acn ORDER BY rank, name, label_sortkey) FROM (
1837 SELECT unapi.acn(y.id,'xml','volume',evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'holdings_xml'),'bre'), $3, $4, $6, $7, FALSE), y.rank, name, label_sortkey
1838 FROM evergreen.ranked_volumes($1, $2, $4, $6, $7, $9, $5) AS y
1841 SELECT unapi.acn(uris.id,'xml','volume',evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'holdings_xml'),'bre'), $3, $4, $6, $7, FALSE), 0, name, label_sortkey
1842 FROM evergreen.located_uris($1, $2, $9) AS uris
1845 CASE WHEN ('ssub' = ANY ($5)) THEN
1848 (SELECT XMLAGG(ssub) FROM (
1849 SELECT unapi.ssub(id,'xml','subscription','{}'::TEXT[], $3, $4, $6, $7, FALSE)
1850 FROM serial.subscription
1851 WHERE record_entry = $1
1855 CASE WHEN ('acp' = ANY ($5)) THEN
1857 name foreign_copies,
1858 (SELECT XMLAGG(acp) FROM (
1859 SELECT unapi.acp(p.target_copy,'xml','copy',evergreen.array_remove_item_by_value($5,'acp'), $3, $4, $6, $7, FALSE)
1860 FROM biblio.peer_bib_copy_map p
1861 JOIN asset.copy c ON (p.target_copy = c.id)
1862 WHERE NOT c.deleted AND p.peer_record = $1
1863 LIMIT ($6 -> 'acp')::INT
1864 OFFSET ($7 -> 'acp')::INT
1869 $F$ LANGUAGE SQL STABLE;
1873 SELECT evergreen.upgrade_deps_block_check('0771', :eg_version);
1875 INSERT INTO action_trigger.hook (
1883 'A user was barred by staff',
1887 INSERT INTO action_trigger.hook (
1895 'A user was un-barred by staff',
1899 INSERT INTO action_trigger.validator (
1904 'Tests if a patron is currently marked as barred'
1907 INSERT INTO action_trigger.validator (
1912 'Tests if a patron is currently not marked as barred'
1916 SELECT evergreen.upgrade_deps_block_check('0772', :eg_version);
1918 INSERT INTO config.internal_flag (name) VALUES ('ingest.metarecord_mapping.preserve_on_delete'); -- defaults to false/off
1920 DROP RULE protect_bib_rec_delete ON biblio.record_entry;
1921 CREATE RULE protect_bib_rec_delete AS
1922 ON DELETE TO biblio.record_entry DO INSTEAD (
1923 UPDATE biblio.record_entry
1925 WHERE OLD.id = biblio.record_entry.id
1929 -- AFTER UPDATE OR INSERT trigger for biblio.record_entry
1930 CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
1932 transformed_xml TEXT;
1935 xfrm config.xml_transform%ROWTYPE;
1937 new_attrs HSTORE := ''::HSTORE;
1938 attr_def config.record_attr_definition%ROWTYPE;
1941 IF NEW.deleted IS TRUE THEN -- If this bib is deleted
1942 PERFORM * FROM config.internal_flag WHERE
1943 name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
1945 -- One needs to keep these around to support searches
1946 -- with the #deleted modifier, so one should turn on the named
1947 -- internal flag for that functionality.
1948 DELETE FROM metabib.metarecord_source_map WHERE source = NEW.id;
1949 DELETE FROM metabib.record_attr WHERE id = NEW.id;
1952 DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
1953 DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
1954 DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
1955 RETURN NEW; -- and we're done
1958 IF TG_OP = 'UPDATE' THEN -- re-ingest?
1959 PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
1961 IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
1966 -- Record authority linking
1967 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
1969 PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
1972 -- Flatten and insert the mfr data
1973 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
1975 PERFORM metabib.reingest_metabib_full_rec(NEW.id);
1977 -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
1978 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
1980 FOR attr_def IN SELECT * FROM config.record_attr_definition ORDER BY format LOOP
1982 IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection
1983 SELECT ARRAY_TO_STRING(ARRAY_ACCUM(value), COALESCE(attr_def.joiner,' ')) INTO attr_value
1984 FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x
1985 WHERE record = NEW.id
1986 AND tag LIKE attr_def.tag
1988 WHEN attr_def.sf_list IS NOT NULL
1989 THEN POSITION(subfield IN attr_def.sf_list) > 0
1996 ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field
1997 attr_value := biblio.marc21_extract_fixed_field(NEW.id, attr_def.fixed_field);
1999 ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression
2001 SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format;
2003 -- See if we can skip the XSLT ... it's expensive
2004 IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN
2005 -- Can't skip the transform
2006 IF xfrm.xslt <> '---' THEN
2007 transformed_xml := oils_xslt_process(NEW.marc,xfrm.xslt);
2009 transformed_xml := NEW.marc;
2012 prev_xfrm := xfrm.name;
2015 IF xfrm.name IS NULL THEN
2016 -- just grab the marcxml (empty) transform
2017 SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1;
2018 prev_xfrm := xfrm.name;
2021 attr_value := oils_xpath_string(attr_def.xpath, transformed_xml, COALESCE(attr_def.joiner,' '), ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]]);
2023 ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map
2024 SELECT m.value INTO attr_value
2025 FROM biblio.marc21_physical_characteristics(NEW.id) v
2026 JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value)
2027 WHERE v.subfield = attr_def.phys_char_sf
2028 LIMIT 1; -- Just in case ...
2032 -- apply index normalizers to attr_value
2034 SELECT n.func AS func,
2035 n.param_count AS param_count,
2037 FROM config.index_normalizer n
2038 JOIN config.record_attr_index_norm_map m ON (m.norm = n.id)
2039 WHERE attr = attr_def.name
2041 EXECUTE 'SELECT ' || normalizer.func || '(' ||
2042 COALESCE( quote_literal( attr_value ), 'NULL' ) ||
2044 WHEN normalizer.param_count > 0
2045 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
2048 ')' INTO attr_value;
2052 -- Add the new value to the hstore
2053 new_attrs := new_attrs || hstore( attr_def.name, attr_value );
2057 IF TG_OP = 'INSERT' OR OLD.deleted THEN -- initial insert OR revivication
2058 INSERT INTO metabib.record_attr (id, attrs) VALUES (NEW.id, new_attrs);
2060 UPDATE metabib.record_attr SET attrs = new_attrs WHERE id = NEW.id;
2066 -- Gather and insert the field entry data
2067 PERFORM metabib.reingest_metabib_field_entries(NEW.id);
2069 -- Located URI magic
2070 IF TG_OP = 'INSERT' THEN
2071 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
2073 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
2076 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
2078 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
2082 -- (re)map metarecord-bib linking
2083 IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
2084 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
2086 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
2088 ELSE -- we're doing an update, and we're not deleted, remap
2089 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
2091 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
2097 $func$ LANGUAGE PLPGSQL;
2100 -- Evergreen DB patch xxxx.data.authority_thesaurus_update.sql
2103 -- check whether patch can be applied
2104 SELECT evergreen.upgrade_deps_block_check('0773', :eg_version);
2107 INSERT INTO authority.thesaurus (code, name, control_set) VALUES
2108 (' ', oils_i18n_gettext(' ','Alternate no attempt to code','at','name'), NULL);
2112 SELECT evergreen.upgrade_deps_block_check('0774', :eg_version);
2114 CREATE TABLE config.z3950_source_credentials (
2115 id SERIAL PRIMARY KEY,
2116 owner INTEGER NOT NULL REFERENCES actor.org_unit(id),
2117 source TEXT NOT NULL REFERENCES config.z3950_source(name) ON DELETE CASCADE ON UPDATE CASCADE DEFERRABLE INITIALLY DEFERRED,
2118 -- do some Z servers require a username but no password or vice versa?
2121 CONSTRAINT czsc_source_once_per_lib UNIQUE (source, owner)
2124 -- find the most relevant set of credentials for the Z source and org
2125 CREATE OR REPLACE FUNCTION config.z3950_source_credentials_lookup
2126 (source TEXT, owner INTEGER)
2127 RETURNS config.z3950_source_credentials AS $$
2130 FROM config.z3950_source_credentials creds
2131 JOIN actor.org_unit aou ON (aou.id = creds.owner)
2132 JOIN actor.org_unit_type aout ON (aout.id = aou.ou_type)
2133 WHERE creds.source = $1 AND creds.owner IN (
2134 SELECT id FROM actor.org_unit_ancestors($2)
2136 ORDER BY aout.depth DESC LIMIT 1;
2138 $$ LANGUAGE SQL STABLE;
2140 -- since we are not exposing config.z3950_source_credentials
2141 -- via the IDL, providing a stored proc gives us a way to
2142 -- set values in the table via cstore
2143 CREATE OR REPLACE FUNCTION config.z3950_source_credentials_apply
2144 (src TEXT, org INTEGER, uname TEXT, passwd TEXT)
2147 PERFORM 1 FROM config.z3950_source_credentials
2148 WHERE owner = org AND source = src;
2151 IF COALESCE(uname, '') = '' AND COALESCE(passwd, '') = '' THEN
2152 DELETE FROM config.z3950_source_credentials
2153 WHERE owner = org AND source = src;
2155 UPDATE config.z3950_source_credentials
2156 SET username = uname, password = passwd
2157 WHERE owner = org AND source = src;
2160 IF COALESCE(uname, '') <> '' OR COALESCE(passwd, '') <> '' THEN
2161 INSERT INTO config.z3950_source_credentials
2162 (source, owner, username, password)
2163 VALUES (src, org, uname, passwd);
2167 $$ LANGUAGE PLPGSQL;
2172 SELECT evergreen.upgrade_deps_block_check('0775', :eg_version);
2174 ALTER TABLE config.z3950_attr
2175 DROP CONSTRAINT z3950_attr_source_fkey,
2176 ADD CONSTRAINT z3950_attr_source_fkey
2177 FOREIGN KEY (source)
2178 REFERENCES config.z3950_source(name)
2181 DEFERRABLE INITIALLY DEFERRED;
2184 SELECT evergreen.upgrade_deps_block_check('0776', :eg_version);
2186 ALTER TABLE acq.lineitem_attr
2187 ADD COLUMN order_ident BOOLEAN NOT NULL DEFAULT FALSE;
2189 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
2191 'ACQ_ADD_LINEITEM_IDENTIFIER',
2194 'When granted, newly added lineitem identifiers will propagate to linked bib records',
2200 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
2202 'ACQ_SET_LINEITEM_IDENTIFIER',
2205 'Allows staff to change the lineitem identifier',
2212 SELECT evergreen.upgrade_deps_block_check('0777', :eg_version);
2214 -- Listed here for reference / ease of access. The update
2215 -- is not applied here (see the WHERE clause).
2216 UPDATE action_trigger.event_definition SET template =
2220 # extract some commonly used variables
2222 VENDOR_SAN = target.provider.san;
2223 VENDCODE = target.provider.edi_default.vendcode;
2224 VENDACCT = target.provider.edi_default.vendacct;
2225 ORG_UNIT_SAN = target.ordering_agency.mailing_address.san;
2227 # set the vendor / provider
2229 VENDOR_BT = 0; # Baker & Taylor
2232 VENDOR_MW_TAPE = 0; # Midwest Tape
2233 VENDOR_RB = 0; # Recorded Books
2234 VENDOR_ULS = 0; # ULS
2236 IF VENDOR_SAN == '1556150'; VENDOR_BT = 1;
2237 ELSIF VENDOR_SAN == '1697684'; VENDOR_BRODART = 1;
2238 ELSIF VENDOR_SAN == '1697978'; VENDOR_INGRAM = 1;
2239 ELSIF VENDOR_SAN == '2549913'; VENDOR_MW_TAPE = 1;
2240 ELSIF VENDOR_SAN == '1113984'; VENDOR_RB = 1;
2241 ELSIF VENDOR_SAN == '1699342'; VENDOR_ULS = 1;
2244 # if true, pass the PO name as a secondary identifier
2245 # RFF+LI:<name>/li_id
2251 # GIR configuration --------------------------------------
2253 INC_COPIES = 1; # copies on/off switch
2258 INC_COLLECTION_CODE = 1;
2273 INC_COLLECTION_CODE = 0;
2277 # END GIR configuration ---------------------------------
2280 [%- BLOCK big_block -%]
2282 "recipient":"[% VENDOR_SAN %]",
2283 "sender":"[% ORG_UNIT_SAN %]",
2285 "ORDERS":[ "order", {
2287 "po_number":[% target.id %],
2289 [% IF INC_PO_NAME %]
2290 "po_name":"[% target.name | replace('\/', ' ') | replace('"', '\"') %]",
2293 "date":"[% date.format(date.now, '%Y%m%d') %]",
2297 {"id-qualifier": 91, "id":"[% ORG_UNIT_SAN %] [% VENDCODE %]"}
2299 {"id":"[% ORG_UNIT_SAN %]"},
2300 {"id-qualifier": 91, "id":"[% VENDACCT %]"}
2306 {"id-qualifier": 92, "id":"[% target.provider.id %]"}
2309 "currency":"[% target.provider.currency_type %]",
2312 [%- FOR li IN target.lineitems %]
2314 "line_index":"[% li.id %]",
2318 idqual = 'EN'; # default ISBN/UPC/EAN-13
2319 ident_attr = helpers.get_li_order_ident(li.attributes);
2321 idname = ident_attr.attr_name;
2322 idval = ident_attr.attr_value;
2323 IF idname == 'isbn' AND idval.length != 13;
2325 ELSIF idname == 'issn';
2332 {"id-qualifier":"[% idqual %]","id":"[% idval %]"}
2334 "price":[% li.estimated_unit_price || '0.00' %],
2336 {"BTI":"[% helpers.get_li_attr_jedi('title', '', li.attributes) %]"},
2337 {"BPU":"[% helpers.get_li_attr_jedi('publisher', '', li.attributes) %]"},
2338 {"BPD":"[% helpers.get_li_attr_jedi('pubdate', '', li.attributes) %]"},
2339 [% IF VENDOR_ULS -%]
2340 {"BEN":"[% helpers.get_li_attr_jedi('edition', '', li.attributes) %]"},
2341 {"BAU":"[% helpers.get_li_attr_jedi('author', '', li.attributes) %]"}
2343 {"BPH":"[% helpers.get_li_attr_jedi('pagination','', li.attributes) %]"}
2347 FOR note IN li.lineitem_notes;
2348 NEXT UNLESS note.vendor_public == 't';
2349 ftx_vals.push(note.value);
2351 IF VENDOR_BRODART; # look for copy-level spec code
2352 FOR lid IN li.lineitem_details;
2354 spec_note = lid.note.match('spec code ([a-zA-Z0-9_])');
2355 IF spec_note.0; ftx_vals.push(spec_note.0); END;
2359 IF xtra_ftx; ftx_vals.unshift(xtra_ftx); END;
2361 # BT & ULS want FTX+LIN for every LI, even if empty
2362 IF ((VENDOR_BT OR VENDOR_ULS) AND ftx_vals.size == 0);
2363 ftx_vals.unshift('');
2368 [% FOR note IN ftx_vals -%] "[% note %]"[% UNLESS loop.last %], [% END %][% END %]
2371 "quantity":[% li.lineitem_details.size %],
2373 [%- IF INC_COPIES -%]
2375 [%- compressed_copies = [];
2376 FOR lid IN li.lineitem_details;
2377 fund = lid.fund.code;
2378 item_type = lid.circ_modifier;
2379 callnumber = lid.cn_label;
2380 owning_lib = lid.owning_lib.shortname;
2381 location = lid.location;
2382 collection_code = lid.collection_code;
2384 # when we have real copy data, treat it as authoritative for some fields
2385 acp = lid.eg_copy_id;
2387 item_type = acp.circ_modifier;
2388 callnumber = acp.call_number.label;
2389 location = acp.location.name;
2393 # collapse like copies into groups w/ quantity
2396 IF !INC_COPY_ID; # INC_COPY_ID implies 1 copy per GIR
2397 FOR copy IN compressed_copies;
2398 IF (fund == copy.fund OR (!fund AND !copy.fund)) AND
2399 (item_type == copy.item_type OR (!item_type AND !copy.item_type)) AND
2400 (callnumber == copy.callnumber OR (!callnumber AND !copy.callnumber)) AND
2401 (owning_lib == copy.owning_lib OR (!owning_lib AND !copy.owning_lib)) AND
2402 (location == copy.location OR (!location AND !copy.location)) AND
2403 (collection_code == copy.collection_code OR (!collection_code AND !copy.collection_code));
2405 copy.quantity = copy.quantity + 1;
2412 compressed_copies.push({
2414 item_type => item_type,
2415 callnumber => callnumber,
2416 owning_lib => owning_lib,
2417 location => location,
2418 collection_code => collection_code,
2419 copy_id => lid.id, # for INC_COPY_ID
2424 FOR copy IN compressed_copies;
2426 # If we assume owning_lib is required and set,
2427 # it is safe to prepend each following copy field w/ a ","
2429 # B&T EDI requires expected GIR fields to be
2430 # present regardless of whether a value exists.
2431 # some fields are required to have a value in ACQ,
2432 # though, so they are not forced into place below.
2434 %]{[%- IF INC_OWNING_LIB AND copy.owning_lib %] "owning_lib":"[% copy.owning_lib %]"[% END -%]
2435 [%- IF INC_FUND AND copy.fund %],"fund":"[% copy.fund %]"[% END -%]
2436 [%- IF INC_CALLNUMBER AND (VENDOR_BT OR copy.callnumber) %],"call_number":"[% copy.callnumber %]"[% END -%]
2437 [%- IF INC_ITEM_TYPE AND (VENDOR_BT OR copy.item_type) %],"item_type":"[% copy.item_type %]"[% END -%]
2438 [%- IF INC_LOCATION AND copy.location %],"copy_location":"[% copy.location %]"[% END -%]
2439 [%- IF INC_COLLECTION_CODE AND (VENDOR_BT OR copy.collection_code) %],"collection_code":"[% copy.collection_code %]"[% END -%]
2440 [%- IF INC_QUANTITY %],"quantity":"[% copy.quantity %]"[% END -%]
2441 [%- IF INC_COPY_ID %],"copy_id":"[% copy.copy_id %]" [% END %]}[% ',' UNLESS loop.last -%]
2442 [%- END -%] [%# FOR compressed_copies -%]
2444 [%- END -%] [%# IF INC_COPIES %]
2446 }[% UNLESS loop.last %],[% END -%]
2448 [% END %] [%# END lineitems %]
2450 "line_items":[% target.lineitems.size %]
2451 }] [%# close ORDERS array %]
2452 }] [%# close body array %]
2455 [% tempo = PROCESS big_block; helpers.escape_json(tempo) %]
2457 WHERE ID = 23 AND FALSE; -- remove 'AND FALSE' to apply this update
2460 -- lineitem worksheet
2461 UPDATE action_trigger.event_definition SET template =
2465 # find a lineitem attribute by name and optional type
2467 FOR attr IN li.attributes;
2468 IF attr.attr_name == attr_name;
2469 IF !attr_type OR attr_type == attr.attr_type;
2478 <h2>Purchase Order [% target.id %]</h2>
2480 date <b>[% date.format(date.now, '%Y%m%d') %]</b>
2484 table td { padding:5px; border:1px solid #aaa;}
2485 table { width:95%; border-collapse:collapse; }
2486 #vendor-notes { padding:5px; border:1px solid #aaa; }
2488 <table id='vendor-table'>
2490 <td valign='top'>Vendor</td>
2492 <div>[% target.provider.name %]</div>
2493 <div>[% target.provider.addresses.0.street1 %]</div>
2494 <div>[% target.provider.addresses.0.street2 %]</div>
2495 <div>[% target.provider.addresses.0.city %]</div>
2496 <div>[% target.provider.addresses.0.state %]</div>
2497 <div>[% target.provider.addresses.0.country %]</div>
2498 <div>[% target.provider.addresses.0.post_code %]</div>
2500 <td valign='top'>Ship to / Bill to</td>
2502 <div>[% target.ordering_agency.name %]</div>
2503 <div>[% target.ordering_agency.billing_address.street1 %]</div>
2504 <div>[% target.ordering_agency.billing_address.street2 %]</div>
2505 <div>[% target.ordering_agency.billing_address.city %]</div>
2506 <div>[% target.ordering_agency.billing_address.state %]</div>
2507 <div>[% target.ordering_agency.billing_address.country %]</div>
2508 <div>[% target.ordering_agency.billing_address.post_code %]</div>
2514 <fieldset id='vendor-notes'>
2515 <legend>Notes to the Vendor</legend>
2517 [% FOR note IN target.notes %]
2518 [% IF note.vendor_public == 't' %]
2519 <li>[% note.value %]</li>
2530 <th>ISBN or Item #</th>
2541 [% FOR li IN target.lineitems %]
2544 [% count = li.lineitem_details.size %]
2545 [% price = li.estimated_unit_price %]
2546 [% litotal = (price * count) %]
2547 [% subtotal = subtotal + litotal %]
2549 ident_attr = helpers.get_li_order_ident(li.attributes);
2550 SET ident_value = ident_attr.attr_value IF ident_attr;
2552 <td>[% target.id %]</td>
2553 <td>[% ident_value %]</td>
2554 <td>[% PROCESS get_li_attr attr_name = 'title' %]</td>
2555 <td>[% count %]</td>
2556 <td>[% price %]</td>
2557 <td>[% litotal %]</td>
2560 [% FOR note IN li.lineitem_notes %]
2561 [% IF note.vendor_public == 't' %]
2562 <li>[% note.value %]</li>
2570 <td/><td/><td/><td/>
2572 <td>[% subtotal %]</td>
2579 Total Line Item Count: [% target.lineitems.size %]
2581 WHERE ID = 4; -- PO HTML
2584 SELECT evergreen.upgrade_deps_block_check('0778', :eg_version);
2586 CREATE OR REPLACE FUNCTION extract_marc_field_set
2587 (TEXT, BIGINT, TEXT, TEXT) RETURNS SETOF TEXT AS $$
2595 FROM oils_xpath_table(
2596 'id', 'marc', $1, $3, 'id = ' || $2)
2597 AS t(id int, t text))x
2599 IF $4 IS NOT NULL THEN
2600 SELECT INTO output (SELECT regexp_replace(output, $4, '', 'g'));
2606 $$ LANGUAGE PLPGSQL IMMUTABLE;
2609 CREATE OR REPLACE FUNCTION
2610 public.extract_acq_marc_field_set ( BIGINT, TEXT, TEXT)
2611 RETURNS SETOF TEXT AS $$
2612 SELECT extract_marc_field_set('acq.lineitem', $1, $2, $3);
2616 CREATE OR REPLACE FUNCTION public.ingest_acq_marc ( ) RETURNS TRIGGER AS $function$
2625 FOR adef IN SELECT *,tableoid FROM acq.lineitem_attr_definition LOOP
2627 SELECT relname::TEXT INTO atype FROM pg_class WHERE oid = adef.tableoid;
2629 IF (atype NOT IN ('lineitem_usr_attr_definition','lineitem_local_attr_definition')) THEN
2630 IF (atype = 'lineitem_provider_attr_definition') THEN
2631 SELECT provider INTO prov FROM acq.lineitem_provider_attr_definition WHERE id = adef.id;
2632 CONTINUE WHEN NEW.provider IS NULL OR prov <> NEW.provider;
2635 IF (atype = 'lineitem_provider_attr_definition') THEN
2636 SELECT xpath INTO xpath_string FROM acq.lineitem_provider_attr_definition WHERE id = adef.id;
2637 ELSIF (atype = 'lineitem_marc_attr_definition') THEN
2638 SELECT xpath INTO xpath_string FROM acq.lineitem_marc_attr_definition WHERE id = adef.id;
2639 ELSIF (atype = 'lineitem_generated_attr_definition') THEN
2640 SELECT xpath INTO xpath_string FROM acq.lineitem_generated_attr_definition WHERE id = adef.id;
2643 xpath_string := REGEXP_REPLACE(xpath_string,$re$//?text\(\)$$re$,'');
2645 IF (adef.code = 'title' OR adef.code = 'author') THEN
2646 -- title and author should not be split
2647 -- FIXME: once oils_xpath can grok XPATH 2.0 functions, we can use
2648 -- string-join in the xpath and remove this special case
2649 SELECT extract_acq_marc_field(id, xpath_string, adef.remove) INTO value FROM acq.lineitem WHERE id = NEW.id;
2650 IF (value IS NOT NULL AND value <> '') THEN
2651 INSERT INTO acq.lineitem_attr (lineitem, definition, attr_type, attr_name, attr_value)
2652 VALUES (NEW.id, adef.id, atype, adef.code, value);
2657 -- each application of the regex may produce multiple values
2659 SELECT * FROM extract_acq_marc_field_set(
2660 NEW.id, xpath_string || '[' || pos || ']', adef.remove)
2663 IF (value IS NOT NULL AND value <> '') THEN
2664 INSERT INTO acq.lineitem_attr
2665 (lineitem, definition, attr_type, attr_name, attr_value)
2666 VALUES (NEW.id, adef.id, atype, adef.code, value);
2684 $function$ LANGUAGE PLPGSQL;
2687 SELECT evergreen.upgrade_deps_block_check('0779', :eg_version);
2689 CREATE TABLE vandelay.import_bib_trash_group(
2690 id SERIAL PRIMARY KEY,
2691 owner INT NOT NULL REFERENCES actor.org_unit(id),
2692 label TEXT NOT NULL, --i18n
2693 always_apply BOOLEAN NOT NULL DEFAULT FALSE,
2694 CONSTRAINT vand_import_bib_trash_grp_owner_label UNIQUE (owner, label)
2697 -- otherwise, the ALTER TABLE statement below
2698 -- will fail with pending trigger events.
2699 SET CONSTRAINTS ALL IMMEDIATE;
2701 ALTER TABLE vandelay.import_bib_trash_fields
2702 -- allow null-able for now..
2703 ADD COLUMN grp INTEGER REFERENCES vandelay.import_bib_trash_group;
2705 -- add any existing trash_fields to "Legacy" groups (one per unique field
2706 -- owner) as part of the upgrade, since grp is now required.
2707 -- note that vandelay.import_bib_trash_fields was never used before,
2708 -- so in most cases this should be a no-op.
2710 INSERT INTO vandelay.import_bib_trash_group (owner, label)
2711 SELECT DISTINCT(owner), 'Legacy' FROM vandelay.import_bib_trash_fields;
2713 UPDATE vandelay.import_bib_trash_fields field SET grp = tgroup.id
2714 FROM vandelay.import_bib_trash_group tgroup
2715 WHERE tgroup.owner = field.owner;
2717 ALTER TABLE vandelay.import_bib_trash_fields
2718 -- now that have values, we can make this non-null
2719 ALTER COLUMN grp SET NOT NULL,
2720 -- drop outdated constraint
2721 DROP CONSTRAINT vand_import_bib_trash_fields_idx,
2722 -- owner is implied by the grp
2724 -- make grp+field unique
2725 ADD CONSTRAINT vand_import_bib_trash_fields_once_per UNIQUE (grp, field);
2728 SELECT evergreen.upgrade_deps_block_check('0780', :eg_version);
2730 ALTER TABLE acq.distribution_formula_entry
2731 ADD COLUMN fund INT REFERENCES acq.fund (id),
2732 ADD COLUMN circ_modifier TEXT REFERENCES config.circ_modifier (code),
2733 ADD COLUMN collection_code TEXT ;
2736 -- support option to roll distribution formula funds
2737 CREATE OR REPLACE FUNCTION acq.rollover_funds_by_org_tree(
2740 org_unit_id INTEGER,
2741 encumb_only BOOL DEFAULT FALSE,
2742 include_desc BOOL DEFAULT TRUE
2743 ) RETURNS VOID AS $$
2747 new_year INT := old_year + 1;
2750 xfer_amount NUMERIC := 0;
2754 roll_distrib_forms BOOL;
2760 IF old_year IS NULL THEN
2761 RAISE EXCEPTION 'Input year argument is NULL';
2762 ELSIF old_year NOT BETWEEN 2008 and 2200 THEN
2763 RAISE EXCEPTION 'Input year is out of range';
2766 IF user_id IS NULL THEN
2767 RAISE EXCEPTION 'Input user id argument is NULL';
2770 IF org_unit_id IS NULL THEN
2771 RAISE EXCEPTION 'Org unit id argument is NULL';
2774 -- Validate the org unit
2779 WHERE id = org_unit_id;
2781 IF org_found IS NULL THEN
2782 RAISE EXCEPTION 'Org unit id % is invalid', org_unit_id;
2783 ELSIF encumb_only THEN
2784 SELECT INTO perm_ous value::BOOL FROM
2785 actor.org_unit_ancestor_setting(
2786 'acq.fund.allow_rollover_without_money', org_unit_id
2788 IF NOT FOUND OR NOT perm_ous THEN
2789 RAISE EXCEPTION 'Encumbrance-only rollover not permitted at org %', org_unit_id;
2794 -- Loop over the propagable funds to identify the details
2795 -- from the old fund plus the id of the new one, if it exists.
2799 oldf.id AS old_fund,
2805 newf.id AS new_fund_id
2808 LEFT JOIN acq.fund AS newf
2809 ON ( oldf.code = newf.code )
2811 oldf.year = old_year
2813 AND newf.year = new_year
2814 AND ( ( include_desc AND oldf.org IN ( SELECT id FROM actor.org_unit_descendants( org_unit_id ) ) )
2815 OR (NOT include_desc AND oldf.org = org_unit_id ) )
2817 --RAISE NOTICE 'Processing fund %', roll_fund.old_fund;
2819 IF roll_fund.new_fund_id IS NULL THEN
2821 -- The old fund hasn't been propagated yet. Propagate it now.
2823 INSERT INTO acq.fund (
2831 balance_warning_percent,
2832 balance_stop_percent
2837 roll_fund.currency_type,
2841 roll_fund.balance_warning_percent,
2842 roll_fund.balance_stop_percent
2844 RETURNING id INTO new_fund;
2846 new_fund = roll_fund.new_fund_id;
2849 -- Determine the amount to transfer
2853 FROM acq.fund_spent_balance
2854 WHERE fund = roll_fund.old_fund;
2856 IF xfer_amount <> 0 THEN
2857 IF NOT encumb_only AND roll_fund.rollover THEN
2859 -- Transfer balance from old fund to new
2861 --RAISE NOTICE 'Transferring % from fund % to %', xfer_amount, roll_fund.old_fund, new_fund;
2863 PERFORM acq.transfer_fund(
2873 -- Transfer balance from old fund to the void
2875 -- RAISE NOTICE 'Transferring % from fund % to the void', xfer_amount, roll_fund.old_fund;
2877 PERFORM acq.transfer_fund(
2883 'Rollover into the void'
2888 IF roll_fund.rollover THEN
2890 -- Move any lineitems from the old fund to the new one
2891 -- where the associated debit is an encumbrance.
2893 -- Any other tables tying expenditure details to funds should
2894 -- receive similar treatment. At this writing there are none.
2896 UPDATE acq.lineitem_detail
2899 fund = roll_fund.old_fund -- this condition may be redundant
2905 fund = roll_fund.old_fund
2909 -- Move encumbrance debits from the old fund to the new fund
2911 UPDATE acq.fund_debit
2914 fund = roll_fund.old_fund
2918 -- Rollover distribution formulae funds
2919 SELECT INTO roll_distrib_forms value::BOOL FROM
2920 actor.org_unit_ancestor_setting(
2921 'acq.fund.rollover_distrib_forms', org_unit_id
2924 IF roll_distrib_forms THEN
2925 UPDATE acq.distribution_formula_entry
2926 SET fund = roll_fund.new_fund_id
2927 WHERE fund = roll_fund.old_fund;
2931 -- Mark old fund as inactive, now that we've closed it
2935 WHERE id = roll_fund.old_fund;
2938 $$ LANGUAGE plpgsql;
2942 SELECT evergreen.upgrade_deps_block_check('0781', :eg_version);
2944 INSERT INTO config.org_unit_setting_type
2945 (name, label, description, grp, datatype)
2947 'acq.fund.rollover_distrib_forms',
2949 'acq.fund.rollover_distrib_forms',
2950 'Rollover Distribution Formulae Funds',
2955 'acq.fund.rollover_distrib_forms',
2956 'During fiscal rollover, update distribution formalae to use new funds',
2965 -- No transaction needed. This can be run on a live, production server.
2966 SELECT evergreen.upgrade_deps_block_check('0782', :eg_version);
2968 /* ** Handled by the supplemental script ** */
2969 -- On a heavily used system, user activity lookup is painful. This is used
2970 -- on the patron display in the staff client.
2972 -- Measured speed increase: ~2s -> .01s
2973 -- CREATE INDEX CONCURRENTLY usr_activity_usr_idx on actor.usr_activity (usr);
2975 -- Finding open holds, often as a subquery within larger hold-related logic,
2976 -- can be sped up with the following.
2978 -- Measured speed increase: ~3s -> .02s
2979 -- CREATE INDEX CONCURRENTLY hold_request_open_idx on action.hold_request (id) where cancel_time IS NULL AND fulfillment_time IS NULL;
2981 -- Hold queue position is a particularly difficult thing to calculate
2982 -- efficiently. Recent changes in the query structure now allow some
2983 -- optimization via indexing. These do that.
2985 -- Measured speed increase: ~6s -> ~0.4s
2986 -- CREATE INDEX CONCURRENTLY cp_available_by_circ_lib_idx on asset.copy (circ_lib) where status IN (0,7);
2987 -- CREATE INDEX CONCURRENTLY hold_request_current_copy_before_cap_idx on action.hold_request (current_copy) where capture_time IS NULL AND cancel_time IS NULL;
2989 -- After heavy use, fetching EDI messages becomes time consuming. The following
2990 -- index addresses that for large-data environments.
2992 -- Measured speed increase: ~3s -> .1s
2993 -- CREATE INDEX CONCURRENTLY edi_message_account_status_idx on acq.edi_message (account,status);
2995 -- After heavy use, fetching POs becomes time consuming. The following
2996 -- index addresses that for large-data environments.
2998 -- Measured speed increase: ~1.5s -> .1s
2999 -- CREATE INDEX CONCURRENTLY edi_message_po_idx on acq.edi_message (purchase_order);
3001 -- Related to EDI messages, fetching of certain A/T events benefit from specific
3002 -- indexing. This index is more general than necessary for the observed query
3003 -- but ends up speeding several other (already relatively fast) queries.
3005 -- Measured speed increase: ~2s -> .06s
3006 -- CREATE INDEX CONCURRENTLY atev_def_state on action_trigger.event (event_def,state);
3008 -- Retrieval of hold transit by hold id (for transit completion or cancelation)
3009 -- is slow in some query formulations.
3011 -- Measured speed increase: ~.5s -> .1s
3012 -- CREATE INDEX CONCURRENTLY hold_transit_copy_hold_idx on action.hold_transit_copy (hold);
3015 SELECT evergreen.upgrade_deps_block_check('0785', :eg_version);
3017 DROP INDEX actor.prox_adj_once_idx;
3019 CREATE UNIQUE INDEX prox_adj_once_idx ON actor.org_unit_proximity_adjustment (
3020 COALESCE(item_circ_lib, -1),
3021 COALESCE(item_owning_lib, -1),
3022 COALESCE(copy_location, -1),
3023 COALESCE(hold_pickup_lib, -1),
3024 COALESCE(hold_request_lib, -1),
3025 COALESCE(circ_mod, ''),
3030 --Check if we can apply the upgrade.
3031 SELECT evergreen.upgrade_deps_block_check('0786', :eg_version);
3034 CREATE TYPE search.search_result AS ( id BIGINT, rel NUMERIC, record INT, total INT, checked INT, visible INT, deleted INT, excluded INT );
3035 CREATE TYPE search.search_args AS ( id INT, field_class TEXT, field_name TEXT, table_alias TEXT, term TEXT, term_type TEXT );
3037 CREATE OR REPLACE FUNCTION search.query_parser_fts (
3039 param_search_ou INT,
3042 param_statuses INT[],
3043 param_locations INT[],
3049 param_pref_ou INT DEFAULT NULL
3050 ) RETURNS SETOF search.search_result AS $func$
3053 current_res search.search_result%ROWTYPE;
3054 search_org_list INT[];
3055 luri_org_list INT[];
3064 core_cursor REFCURSOR;
3065 core_rel_query TEXT;
3067 total_count INT := 0;
3068 check_count INT := 0;
3069 deleted_count INT := 0;
3070 visible_count INT := 0;
3071 excluded_count INT := 0;
3075 check_limit := COALESCE( param_check, 1000 );
3076 core_limit := COALESCE( param_limit, 25000 );
3077 core_offset := COALESCE( param_offset, 0 );
3079 -- core_skip_chk := COALESCE( param_skip_chk, 1 );
3081 IF param_search_ou > 0 THEN
3082 IF param_depth IS NOT NULL THEN
3083 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
3085 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
3088 SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
3090 ELSIF param_search_ou < 0 THEN
3091 SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
3093 FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
3094 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
3095 luri_org_list := luri_org_list || tmp_int_list;
3098 SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
3100 ELSIF param_search_ou = 0 THEN
3101 -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
3104 IF param_pref_ou IS NOT NULL THEN
3105 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
3106 luri_org_list := luri_org_list || tmp_int_list;
3109 OPEN core_cursor FOR EXECUTE param_query;
3113 FETCH core_cursor INTO core_result;
3114 EXIT WHEN NOT FOUND;
3115 EXIT WHEN total_count >= core_limit;
3117 total_count := total_count + 1;
3119 CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset;
3121 check_count := check_count + 1;
3123 PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3125 -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
3126 deleted_count := deleted_count + 1;
3131 FROM biblio.record_entry b
3132 JOIN config.bib_source s ON (b.source = s.id)
3133 WHERE s.transcendant
3134 AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3137 -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
3138 visible_count := visible_count + 1;
3140 current_res.id = core_result.id;
3141 current_res.rel = core_result.rel;
3145 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3149 current_res.record = core_result.records[1];
3151 current_res.record = NULL;
3154 RETURN NEXT current_res;
3160 FROM asset.call_number cn
3161 JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
3162 JOIN asset.uri uri ON (map.uri = uri.id)
3163 WHERE NOT cn.deleted
3164 AND cn.label = '##URI##'
3166 AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
3167 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3168 AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
3172 -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
3173 visible_count := visible_count + 1;
3175 current_res.id = core_result.id;
3176 current_res.rel = core_result.rel;
3180 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3184 current_res.record = core_result.records[1];
3186 current_res.record = NULL;
3189 RETURN NEXT current_res;
3194 IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
3197 FROM asset.call_number cn
3198 JOIN asset.copy cp ON (cp.call_number = cn.id)
3199 WHERE NOT cn.deleted
3201 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3202 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3203 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3208 FROM biblio.peer_bib_copy_map pr
3209 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3210 WHERE NOT cp.deleted
3211 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3212 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3213 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3217 -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
3218 excluded_count := excluded_count + 1;
3225 IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
3228 FROM asset.call_number cn
3229 JOIN asset.copy cp ON (cp.call_number = cn.id)
3230 WHERE NOT cn.deleted
3232 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3233 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3234 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3239 FROM biblio.peer_bib_copy_map pr
3240 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3241 WHERE NOT cp.deleted
3242 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3243 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3244 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3248 -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
3249 excluded_count := excluded_count + 1;
3256 IF staff IS NULL OR NOT staff THEN
3259 FROM asset.opac_visible_copies
3260 WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3261 AND record IN ( SELECT * FROM unnest( core_result.records ) )
3266 FROM biblio.peer_bib_copy_map pr
3267 JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
3268 WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3269 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3274 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3275 excluded_count := excluded_count + 1;
3283 FROM asset.call_number cn
3284 JOIN asset.copy cp ON (cp.call_number = cn.id)
3285 WHERE NOT cn.deleted
3287 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3288 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3294 FROM biblio.peer_bib_copy_map pr
3295 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3296 WHERE NOT cp.deleted
3297 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3298 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3304 FROM asset.call_number cn
3305 JOIN asset.copy cp ON (cp.call_number = cn.id)
3306 WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3311 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3312 excluded_count := excluded_count + 1;
3321 visible_count := visible_count + 1;
3323 current_res.id = core_result.id;
3324 current_res.rel = core_result.rel;
3328 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3332 current_res.record = core_result.records[1];
3334 current_res.record = NULL;
3337 RETURN NEXT current_res;
3339 IF visible_count % 1000 = 0 THEN
3340 -- RAISE NOTICE ' % visible so far ... ', visible_count;
3345 current_res.id = NULL;
3346 current_res.rel = NULL;
3347 current_res.record = NULL;
3348 current_res.total = total_count;
3349 current_res.checked = check_count;
3350 current_res.deleted = deleted_count;
3351 current_res.visible = visible_count;
3352 current_res.excluded = excluded_count;
3356 RETURN NEXT current_res;
3359 $func$ LANGUAGE PLPGSQL;
3363 SELECT evergreen.upgrade_deps_block_check('0788', :eg_version);
3365 -- New view including 264 as a potential tag for publisher and pubdate
3366 CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
3372 FIRST(title.value) AS title,
3373 FIRST(author.value) AS author,
3374 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT publisher.value), ', ') AS publisher,
3375 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$) ), ', ') AS pubdate,
3376 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
3377 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn
3378 FROM biblio.record_entry r
3379 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
3380 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
3381 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND (publisher.tag = '260' OR (publisher.tag = '264' AND publisher.ind2 = '1')) AND publisher.subfield = 'b')
3382 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND (pubdate.tag = '260' OR (pubdate.tag = '264' AND pubdate.ind2 = '1')) AND pubdate.subfield = 'c')
3383 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
3384 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
3387 -- Update reporter.materialized_simple_record with new 264-based values for publisher and pubdate
3388 DELETE FROM reporter.materialized_simple_record WHERE id IN (
3389 SELECT DISTINCT record FROM metabib.full_rec WHERE tag = '264' AND subfield IN ('b', 'c')
3392 INSERT INTO reporter.materialized_simple_record
3393 SELECT DISTINCT rossr.* FROM reporter.old_super_simple_record rossr INNER JOIN metabib.full_rec mfr ON mfr.record = rossr.id
3394 WHERE mfr.tag = '264' AND mfr.subfield IN ('b', 'c')
3397 SELECT evergreen.upgrade_deps_block_check('0789', :eg_version);
3398 SELECT evergreen.upgrade_deps_block_check('0790', :eg_version);
3400 ALTER TABLE config.metabib_class ADD COLUMN combined BOOL NOT NULL DEFAULT FALSE;
3401 UPDATE config.metabib_class SET combined = TRUE WHERE name = 'subject';
3404 --Check if we can apply the upgrade.
3405 SELECT evergreen.upgrade_deps_block_check('0791', :eg_version);
3409 CREATE OR REPLACE FUNCTION search.query_parser_fts (
3411 param_search_ou INT,
3414 param_statuses INT[],
3415 param_locations INT[],
3421 deleted_search BOOL,
3422 param_pref_ou INT DEFAULT NULL
3423 ) RETURNS SETOF search.search_result AS $func$
3426 current_res search.search_result%ROWTYPE;
3427 search_org_list INT[];
3428 luri_org_list INT[];
3437 core_cursor REFCURSOR;
3438 core_rel_query TEXT;
3440 total_count INT := 0;
3441 check_count INT := 0;
3442 deleted_count INT := 0;
3443 visible_count INT := 0;
3444 excluded_count INT := 0;
3448 check_limit := COALESCE( param_check, 1000 );
3449 core_limit := COALESCE( param_limit, 25000 );
3450 core_offset := COALESCE( param_offset, 0 );
3452 -- core_skip_chk := COALESCE( param_skip_chk, 1 );
3454 IF param_search_ou > 0 THEN
3455 IF param_depth IS NOT NULL THEN
3456 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
3458 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
3461 SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
3463 ELSIF param_search_ou < 0 THEN
3464 SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
3466 FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
3467 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
3468 luri_org_list := luri_org_list || tmp_int_list;
3471 SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
3473 ELSIF param_search_ou = 0 THEN
3474 -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
3477 IF param_pref_ou IS NOT NULL THEN
3478 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
3479 luri_org_list := luri_org_list || tmp_int_list;
3482 OPEN core_cursor FOR EXECUTE param_query;
3486 FETCH core_cursor INTO core_result;
3487 EXIT WHEN NOT FOUND;
3488 EXIT WHEN total_count >= core_limit;
3490 total_count := total_count + 1;
3492 CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset;
3494 check_count := check_count + 1;
3496 IF NOT deleted_search THEN
3498 PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3500 -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
3501 deleted_count := deleted_count + 1;
3506 FROM biblio.record_entry b
3507 JOIN config.bib_source s ON (b.source = s.id)
3508 WHERE s.transcendant
3509 AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3512 -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
3513 visible_count := visible_count + 1;
3515 current_res.id = core_result.id;
3516 current_res.rel = core_result.rel;
3520 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3524 current_res.record = core_result.records[1];
3526 current_res.record = NULL;
3529 RETURN NEXT current_res;
3535 FROM asset.call_number cn
3536 JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
3537 JOIN asset.uri uri ON (map.uri = uri.id)
3538 WHERE NOT cn.deleted
3539 AND cn.label = '##URI##'
3541 AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
3542 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3543 AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
3547 -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
3548 visible_count := visible_count + 1;
3550 current_res.id = core_result.id;
3551 current_res.rel = core_result.rel;
3555 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3559 current_res.record = core_result.records[1];
3561 current_res.record = NULL;
3564 RETURN NEXT current_res;
3569 IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
3572 FROM asset.call_number cn
3573 JOIN asset.copy cp ON (cp.call_number = cn.id)
3574 WHERE NOT cn.deleted
3576 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3577 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3578 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3583 FROM biblio.peer_bib_copy_map pr
3584 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3585 WHERE NOT cp.deleted
3586 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3587 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3588 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3592 -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
3593 excluded_count := excluded_count + 1;
3600 IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
3603 FROM asset.call_number cn
3604 JOIN asset.copy cp ON (cp.call_number = cn.id)
3605 WHERE NOT cn.deleted
3607 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3608 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3609 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3614 FROM biblio.peer_bib_copy_map pr
3615 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3616 WHERE NOT cp.deleted
3617 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3618 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3619 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3623 -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
3624 excluded_count := excluded_count + 1;
3631 IF staff IS NULL OR NOT staff THEN
3634 FROM asset.opac_visible_copies
3635 WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3636 AND record IN ( SELECT * FROM unnest( core_result.records ) )
3641 FROM biblio.peer_bib_copy_map pr
3642 JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
3643 WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3644 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3649 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3650 excluded_count := excluded_count + 1;
3658 FROM asset.call_number cn
3659 JOIN asset.copy cp ON (cp.call_number = cn.id)
3660 WHERE NOT cn.deleted
3662 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3663 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3669 FROM biblio.peer_bib_copy_map pr
3670 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3671 WHERE NOT cp.deleted
3672 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3673 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3679 FROM asset.call_number cn
3680 JOIN asset.copy cp ON (cp.call_number = cn.id)
3681 WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3686 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3687 excluded_count := excluded_count + 1;
3698 visible_count := visible_count + 1;
3700 current_res.id = core_result.id;
3701 current_res.rel = core_result.rel;
3705 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3709 current_res.record = core_result.records[1];
3711 current_res.record = NULL;
3714 RETURN NEXT current_res;
3716 IF visible_count % 1000 = 0 THEN
3717 -- RAISE NOTICE ' % visible so far ... ', visible_count;
3722 current_res.id = NULL;
3723 current_res.rel = NULL;
3724 current_res.record = NULL;
3725 current_res.total = total_count;
3726 current_res.checked = check_count;
3727 current_res.deleted = deleted_count;
3728 current_res.visible = visible_count;
3729 current_res.excluded = excluded_count;
3733 RETURN NEXT current_res;
3736 $func$ LANGUAGE PLPGSQL;
3739 -- AFTER UPDATE OR INSERT trigger for biblio.record_entry
3740 CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
3742 transformed_xml TEXT;
3745 xfrm config.xml_transform%ROWTYPE;
3747 new_attrs HSTORE := ''::HSTORE;
3748 attr_def config.record_attr_definition%ROWTYPE;
3751 IF NEW.deleted IS TRUE THEN -- If this bib is deleted
3752 PERFORM * FROM config.internal_flag WHERE
3753 name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
3755 -- One needs to keep these around to support searches
3756 -- with the #deleted modifier, so one should turn on the named
3757 -- internal flag for that functionality.
3758 DELETE FROM metabib.metarecord_source_map WHERE source = NEW.id;
3759 DELETE FROM metabib.record_attr WHERE id = NEW.id;
3762 DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
3763 DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
3764 DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
3765 RETURN NEW; -- and we're done
3768 IF TG_OP = 'UPDATE' THEN -- re-ingest?
3769 PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
3771 IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
3776 -- Record authority linking
3777 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
3779 PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
3782 -- Flatten and insert the mfr data
3783 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
3785 PERFORM metabib.reingest_metabib_full_rec(NEW.id);
3787 -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
3788 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
3790 FOR attr_def IN SELECT * FROM config.record_attr_definition ORDER BY format LOOP
3792 IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection
3793 SELECT ARRAY_TO_STRING(ARRAY_ACCUM(value), COALESCE(attr_def.joiner,' ')) INTO attr_value
3794 FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x
3795 WHERE record = NEW.id
3796 AND tag LIKE attr_def.tag
3798 WHEN attr_def.sf_list IS NOT NULL
3799 THEN POSITION(subfield IN attr_def.sf_list) > 0
3806 ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field
3807 attr_value := biblio.marc21_extract_fixed_field(NEW.id, attr_def.fixed_field);
3809 ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression
3811 SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format;
3813 -- See if we can skip the XSLT ... it's expensive
3814 IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN
3815 -- Can't skip the transform
3816 IF xfrm.xslt <> '---' THEN
3817 transformed_xml := oils_xslt_process(NEW.marc,xfrm.xslt);
3819 transformed_xml := NEW.marc;
3822 prev_xfrm := xfrm.name;
3825 IF xfrm.name IS NULL THEN
3826 -- just grab the marcxml (empty) transform
3827 SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1;
3828 prev_xfrm := xfrm.name;
3831 attr_value := oils_xpath_string(attr_def.xpath, transformed_xml, COALESCE(attr_def.joiner,' '), ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]]);
3833 ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map
3834 SELECT m.value INTO attr_value
3835 FROM biblio.marc21_physical_characteristics(NEW.id) v
3836 JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value)
3837 WHERE v.subfield = attr_def.phys_char_sf
3838 LIMIT 1; -- Just in case ...
3842 -- apply index normalizers to attr_value
3844 SELECT n.func AS func,
3845 n.param_count AS param_count,
3847 FROM config.index_normalizer n
3848 JOIN config.record_attr_index_norm_map m ON (m.norm = n.id)
3849 WHERE attr = attr_def.name
3851 EXECUTE 'SELECT ' || normalizer.func || '(' ||
3852 COALESCE( quote_literal( attr_value ), 'NULL' ) ||
3854 WHEN normalizer.param_count > 0
3855 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
3858 ')' INTO attr_value;
3862 -- Add the new value to the hstore
3863 new_attrs := new_attrs || hstore( attr_def.name, attr_value );
3867 IF TG_OP = 'INSERT' OR OLD.deleted THEN -- initial insert OR revivication
3868 DELETE FROM metabib.record_attr WHERE id = NEW.id;
3869 INSERT INTO metabib.record_attr (id, attrs) VALUES (NEW.id, new_attrs);
3871 UPDATE metabib.record_attr SET attrs = new_attrs WHERE id = NEW.id;
3877 -- Gather and insert the field entry data
3878 PERFORM metabib.reingest_metabib_field_entries(NEW.id);
3880 -- Located URI magic
3881 IF TG_OP = 'INSERT' THEN
3882 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
3884 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
3887 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
3889 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
3893 -- (re)map metarecord-bib linking
3894 IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
3895 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
3897 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
3899 ELSE -- we're doing an update, and we're not deleted, remap
3900 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
3902 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
3908 $func$ LANGUAGE PLPGSQL;
3910 SELECT evergreen.upgrade_deps_block_check('0792', :eg_version);
3912 UPDATE permission.perm_list SET code = 'URL_VERIFY_UPDATE_SETTINGS' WHERE id = 544 AND code = '544';
3915 SELECT evergreen.upgrade_deps_block_check('0793', :eg_version);
3917 UPDATE config.best_hold_order
3928 WHERE name = 'Traditional' AND
3936 UPDATE config.best_hold_order
3947 WHERE name = 'Traditional with Holds-always-go-home' AND
3957 UPDATE config.best_hold_order
3968 WHERE name = 'Traditional with Holds-go-home' AND
3981 -- These are from 0789, and can and should be run outside of a transaction
3982 CREATE TEXT SEARCH CONFIGURATION title ( COPY = english_nostop );
3983 CREATE TEXT SEARCH CONFIGURATION author ( COPY = english_nostop );
3984 CREATE TEXT SEARCH CONFIGURATION subject ( COPY = english_nostop );
3985 CREATE TEXT SEARCH CONFIGURATION series ( COPY = english_nostop );
3986 CREATE TEXT SEARCH CONFIGURATION identifier ( COPY = english_nostop );
3988 \qecho Please run Open-ILS/src/sql/Pg/version-upgrade/2.3-2.4-supplemental.sh now, which contains additional required SQL to complete your Evergreen upgrade!