diff --git a/appveyor.yml b/appveyor.yml index acdea6078..56524ef64 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -33,12 +33,12 @@ environment: - SYS: MSVC JDK: 21 PG: 13 - - SYS: MSVC - JDK: 21 - PG: 12 - - SYS: MSVC - JDK: 11 - PG: 9.6 +# - SYS: MSVC +# JDK: 21 +# PG: 12 +# - SYS: MSVC +# JDK: 11 +# PG: 9.6 before_build: - ps: .appveyor/appveyor_download_java.ps1 - set JAVA_HOME=%ProgramFiles%\Java\jdk%JDK% diff --git a/pljava-api/src/main/java/module-info.java b/pljava-api/src/main/java/module-info.java index fbfcb8bd4..d501d86a9 100644 --- a/pljava-api/src/main/java/module-info.java +++ b/pljava-api/src/main/java/module-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2020-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -21,14 +21,21 @@ requires transitive java.compiler; exports org.postgresql.pljava; + exports org.postgresql.pljava.adt; + exports org.postgresql.pljava.adt.spi; exports org.postgresql.pljava.annotation; + exports org.postgresql.pljava.model; exports org.postgresql.pljava.sqlgen; exports org.postgresql.pljava.annotation.processing to org.postgresql.pljava.internal; + uses org.postgresql.pljava.Adapter.Service; + uses org.postgresql.pljava.Session; + uses org.postgresql.pljava.model.CatalogObject.Factory; + provides javax.annotation.processing.Processor with org.postgresql.pljava.annotation.processing.DDRProcessor; } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/Adapter.java b/pljava-api/src/main/java/org/postgresql/pljava/Adapter.java new file mode 100644 index 000000000..6783d7275 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/Adapter.java @@ -0,0 +1,2040 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles.Lookup; +import static java.lang.invoke.MethodHandles.collectArguments; +import static java.lang.invoke.MethodHandles.dropArguments; +import static java.lang.invoke.MethodHandles.lookup; +import static java.lang.invoke.MethodHandles.permuteArguments; +import java.lang.invoke.MethodType; +import static java.lang.invoke.MethodType.methodType; + +import static java.lang.reflect.Array.newInstance; +import java.lang.reflect.Method; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; + +import java.security.Permission; +import java.security.PermissionCollection; + +import java.sql.SQLException; +import java.sql.SQLDataException; + +import java.util.Arrays; +import static java.util.Arrays.stream; +import static java.util.Collections.emptyEnumeration; +import static java.util.Collections.enumeration; +import java.util.Enumeration; +import java.util.List; +import static java.util.Objects.requireNonNull; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; + +import java.util.function.Consumer; +import java.util.function.Predicate; + +import org.postgresql.pljava.adt.spi.AbstractType; +import org.postgresql.pljava.adt.spi.AbstractType.Bindings; +import org.postgresql.pljava.adt.spi.AbstractType.MultiArray; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.adt.spi.TwosComplement; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.TupleTableSlot.Indexed; + +import org.postgresql.pljava.model.SlotTester.Visible; // temporary for test jig + +import static org.postgresql.pljava.adt.spi.AbstractType.erase; +import static org.postgresql.pljava.adt.spi.AbstractType.isSubtype; +import static org.postgresql.pljava.adt.spi.AbstractType.refine; +import static org.postgresql.pljava.adt.spi.AbstractType.specialization; +import static org.postgresql.pljava.adt.spi.AbstractType.substitute; + +/** + * Base for classes that implement data types over raw PostgreSQL datums. + *

+ * A PL/Java data type adapter is a concrete subclass of this class that knows + * the structure of one or more PostgreSQL data types and can convert between + * their raw {@code Datum} form and an appropriate Java class or primitive type + * for use in PL/Java code. It will use the {@code Via...} enum declared here + * (to indicate how it will access the PostgreSQL {@code Datum}), and extend + * an {@code As...} abstract class declared here (to indicate the supported + * Java reference or primitive type). + *

+ * An adapter should be stateless and thread-safe. There should be no need to + * instantiate more than one instance of an adapter for a given type mapping. + *

+ * An adapter has a "top" type T, indicating the type it will present to client + * code, and an "under" type U, which client code can generally wildcard and + * ignore; an implementing class that can be composed over another adapter uses + * U to indicate what that "under" adapter's "top" type must be. The Java + * compiler records enough information for both parameters to allow PL/Java to + * reconstruct the type relationships in a stack of composed adapters. + *

+ * An implementing leaf adapter (which will work directly on PostgreSQL Datum + * rather than being composed over another adapter) can declare {@code Void} + * for U by convention. An adapter meant to be composed over another, where the + * "under" adapter has a primitive type, can declare the primitive type's boxed + * counterpart as U. + *

+ * For a primitive-typed adapter, the "top" type is implicit in the class name + * {@code AsLong}, {@code AsInt}, and so on, and the "under" type follows as the + * parameter U. For ease of reading, the type parameters of the two-parameter + * classes like {@code As} are also in that order, T first. + *

+ * The precise meaning of the "top" type T depends on whether an adapter is + * an instance of {@code As} or of {@code Primitive}. In the + * {@code As} case, the top type is a reference type and is given by T directly. + * In the primitive case, T is the boxed counterpart of the actual top type. + *

+ * To preserve type safety, only recognized "leaf" adapters (those registered + * to {@link #configure configure} with a non-null {@link Via via}) + * will be able to manipulate raw {@code Datum}s. An adapter class + * should avoid leaking a {@code Datum} to other code. + */ +public abstract class Adapter implements Visible +{ + /** + * The full generic type returned by this adapter, as refined at the time + * of construction, making use of the type returned by an "under" adapter + * or array contract, if used. + */ + final Type m_topType; + + /** + * The erasure of the type to be returned. + */ + final Class m_topErased; + + /** + * The "under" adapter in the composed case; null in a leaf adapter. + */ + final Adapter m_underAdapter; + + /** + * Method handle constructed for this adapter's fetch operation. + */ + final MethodHandle m_fetchHandle; + + /** + * In this private constructor, witness is declared as + * {@code Type} rather than {@code Class}. + *

+ * It can be invoked that way from {@code As} for array adapters; otherwise, + * the subclass constructors all declare the parameter as {@code Class}. + *

+ * The adapter and contract here are raw types. The accessible subclass + * constructors will constrain their type arguments to be compatible. + */ + private Adapter( + Configuration configuration, Adapter over, Contract using, Type witness) + { + requireNonNull(configuration, + () -> getClass() + " instantiated without a Configuration object"); + if ( getClass() != configuration.m_class ) + throw new IllegalArgumentException( + getClass() + " instantiated with a Configuration object " + + "for the wrong class"); + + if ( configuration instanceof Configuration.Leaf ) + { + if ( null != over ) + throw new IllegalArgumentException( + getClass() + " instantiated with non-null 'over' but is " + + "a leaf adapter"); + + Configuration.Leaf leaf = (Configuration.Leaf)configuration; + + Type top = leaf.m_top; + /* + * If instantiated with a subclass of Contract, the type with + * which it specializes Contract may tell us more than our top + * type precomputed at configuration. + */ + if ( null != using ) + { + if ( witness instanceof TypeWrapper ) + { + top = ((TypeWrapper)witness).wrapped; + witness = null; + } + else + top = specialization(using.getClass(), Contract.class)[0]; + } + + MethodHandle mh = leaf.m_fetch.bindTo(this); + + @SuppressWarnings("unchecked") + Class erased = (Class)erase(top); + + if ( null == witness ) + { + if ( top instanceof TypeVariable + && 1 == ((TypeVariable)top).getBounds().length ) + top = erased; + } + else + { + if ( ! isSubtype(witness, erased) ) + throw new IllegalArgumentException( + "cannot instantiate " + getClass() + " as " + + "adapter producing " + witness); + top = witness; + mh = mh.asType(mh.type().changeReturnType(erase(witness))); + } + m_topType = top; + m_topErased = erased; + m_underAdapter = null; + m_fetchHandle = mh; + return; + } + + /* + * Very well then, it is not a leaf adapter. + */ + + requireNonNull(over, + getClass() + " instantiated with null 'over' but is " + + "a non-leaf adapter"); + if ( null != using ) + throw new IllegalArgumentException( + getClass() + " instantiated with non-null 'using' but is " + + "not a leaf adapter"); + + Configuration.NonLeaf nonLeaf = (Configuration.NonLeaf)configuration; + + Type[] refined = refine(over.m_topType, nonLeaf.m_under, nonLeaf.m_top); + Type under = refined[0]; + Type top = refined[1]; + + if ( null != witness ) + { + if ( ! isSubtype(witness, top) ) + throw new IllegalArgumentException( + "cannot instantiate " + getClass() + " as " + + "adapter producing " + witness); + top = witness; + } + + m_topType = top; + + @SuppressWarnings("unchecked") + Class erased = (Class)erase(top); + m_topErased = erased; + + /* + * 'over' was declared as a raw type to make this constructor also + * usable from the Array subclass constructor. Here, being an ordinary + * composing adapter, we reassert that 'over' is parameterized , as + * the ordinary subclass constructor will have ensured. + */ + @SuppressWarnings("unchecked") + Adapter underAdapter = over; + m_underAdapter = underAdapter; + + MethodHandle producer = nonLeaf.m_adapt.bindTo(this); + MethodHandle fetcher = over.m_fetchHandle; + + MethodType mt = producer + .type() + .changeReturnType(erased) + .changeParameterType(1, erase(under)); + + producer = producer.asType(mt); + fetcher = fetcher.asType( + fetcher.type().changeReturnType(mt.parameterType(1))); + + mt = fetcher + .type() // this is the expected type of a fetcher, but it needs + .changeReturnType(erased); // new return type. After collect we will + fetcher = collectArguments(producer, 1, fetcher); // need 1st arg twice + fetcher = permuteArguments(fetcher, mt, 0, 0, 1, 2, 3, 4); // so do that + + m_fetchHandle = fetcher; + } + + /** + * Specifies, for a leaf adapter (one not composed over a lower adapter), + * the form in which the value fetched from PostgreSQL will be presented to + * it (or how it will produce a value to be stored to PostgreSQL). + *

+ * At this level, an adapter is free to use {@code Via.CHAR} and treat + * {@code char} internally as a 16-bit unsigned integral type with no other + * special meaning. If an adapter will return an unsigned 16-bit + * type, it should extend either {@code AsShort.Unsigned} or {@code AsChar}, + * based on whether the value it returns represents UTF-16 character data. + */ + protected enum Via + { + DATUM ( Datum.Input.class, "getDatum"), + INT64SX ( long.class, "getLongSignExtended"), + INT64ZX ( long.class, "getLongZeroExtended"), + DOUBLE ( double.class, "getDouble"), + INT32SX ( int.class, "getIntSignExtended"), + INT32ZX ( int.class, "getIntZeroExtended"), + FLOAT ( float.class, "getFloat"), + SHORT ( short.class, "getShort"), + CHAR ( char.class, "getChar"), + BYTE ( byte.class, "getByte"), + BOOLEAN ( boolean.class, "getBoolean"); + + Via(Class type, String method) + { + try + { + MethodHandle h; + h = lookup().findVirtual(Datum.Accessor.class, method, + type.isPrimitive() + ? methodType( + type, Object.class, int.class) + : methodType( + type, Object.class, int.class, Attribute.class)); + + if ( type.isPrimitive() ) + h = dropArguments(h, 3, Attribute.class); + + m_handle = h; + } + catch ( ReflectiveOperationException e ) + { + throw wrapped(e); + } + } + + MethodHandle m_handle; + } + + @Override + public String toString() + { + Class c = getClass(); + Module m = c.getModule(); + return + c.getModule().getName() + "/" + + c.getCanonicalName().substring(1 + c.getPackageName().length() ) + + " to produce " + topType(); + } + + /** + * Method that a leaf {@code Adapter} must implement to indicate whether it + * is capable of fetching a given PostgreSQL type. + *

+ * In a composing adapter, this default implementation delegates to + * the adapter beneath. + * @throws UnsupportedOperationException if called in a leaf adapter + */ + public boolean canFetch(RegType pgType) + { + if ( null != m_underAdapter ) + return m_underAdapter.canFetch(pgType); + throw new UnsupportedOperationException( + toString() + " is a leaf adapter and does not override canFetch"); + } + + /** + * Method that an {@code Adapter} may override to indicate whether it + * is capable of fetching a given PostgreSQL attribute. + *

+ * If not overridden, this implementation delegates to the adapter beneath, + * if composed; in a leaf adapter, it delegates to + * {@link #canFetch(RegType) canFetch} for the attribute's declared + * PostgreSQL type. + */ + public boolean canFetch(Attribute attr) + { + if ( null != m_underAdapter ) + return m_underAdapter.canFetch(attr); + return canFetch(attr.type()); + } + + /** + * Method that an {@code Adapter} must implement to indicate whether it + * is capable of returning some usable representation of SQL null values. + *

+ * An {@code Adapter} that cannot should only be used with values that + * are known never to be null; it will throw an exception if asked to fetch + * a value that is null. + *

+ * An adapter usable with null values can be formed by composing, for + * example, an adapter producing {@code Optional} over an adapter that + * cannot fetch nulls. + */ + public abstract boolean canFetchNull(); + + /** + * A static method to indicate the type returned by a given {@code Adapter} + * subclass, based only on the type information recorded for it by the Java + * compiler. + *

+ * The type returned could contain free type variables that may be given + * concrete values when the instance {@link #topType() topType} method is + * called on a particular instance of the class. + *

+ * When cls is a subclass of {@code Primitive}, this method + * returns the {@code Class} object for the actual primitive type, + * not the boxed type. + */ + public static Type topType(Class cls) + { + Type[] params = specialization(cls, Adapter.class); + if ( null == params ) + throw new IllegalArgumentException( + cls + " does not extend Adapter"); + Type top = params[0]; + if ( Primitive.class.isAssignableFrom(cls) ) + { + top = methodType((Class)top).unwrap().returnType(); + assert ((Class)top).isPrimitive(); + } + return top; + } + + /** + * The full generic {@link Type Type} this Adapter presents to Java. + *

+ * Unlike the static method, this instance method, on an adapter formed + * by composition, returns the actual type obtained by unifying + * the "under" adapter's top type with the top adapter's "under" type, then + * making the indicated substitutions in the top adapter's "top" type. + *

+ * Likewise, for an adapter constructed with an array contract and an + * adapter for the element type, the element adapter's "top" type is unified + * with the contract's element type, and this method returns the contract's + * result type with the same substitutions made. + */ + public Type topType() + { + return m_topType; + } + + /** + * A static method to indicate the "under" type expected by a given + * {@code Adapter} subclass that is intended for composition over another + * adapter, based only on the type information recorded for it by the Java + * compiler. + *

+ * The type returned could contain free type variables. + */ + public static Type underType(Class cls) + { + Type[] params = specialization(cls, Adapter.class); + if ( null == params ) + throw new IllegalArgumentException( + cls + " does not extend Adapter"); + return params[1]; + } + + /** + * A class that is returned by the {@link #configure configure} method, + * intended for use during an {@code Adapter} subclass's static + * initialization, and must be supplied to the constructor when instances + * of the class are created. + */ + protected static abstract class Configuration + { + final Class m_class; + /** + * In the case of a primitive-typed adapter, this will really be the + * primitive Class object, not the corresponding boxed class. + */ + final Type m_top; + + Configuration(Class cls, Type top) + { + m_class = cls; + m_top = top; + } + + static class Leaf extends Configuration + { + final MethodHandle m_fetch; + + Leaf(Class cls, Type top, MethodHandle fetcher) + { + super(cls, top); + m_fetch = fetcher; + } + } + + static class NonLeaf extends Configuration + { + /** + * For an adapter meant to compose over a primitive-typed one, this + * is the actual primitive class object for the under-adapter's + * expected return type, not the boxed counterpart. + */ + final Type m_under; + final MethodHandle m_adapt; + + NonLeaf( + Class cls, Type top, Type under, + MethodHandle fetcher) + { + super(cls, top); + m_under = under; + m_adapt = fetcher; + } + } + } + + /** + * Throws a security exception if permission to configure an adapter + * isn't held. + *

+ * For the time being, there is only Permission("*", "fetch"), so this needs + * no parameters and can use a static instance of the permission. + */ + @SuppressWarnings("removal") // JEP 411 + private static void checkAllowed() + { + Service.CHECKER.accept(Permission.INSTANCE); + } + + /** + * Method that must be called in static initialization of an {@code Adapter} + * subclass, producing a {@code Configuration} object that must be passed + * to the constructor when creating an instance. + *

+ * If the adapter class is in a named module, its containing package must be + * exported to at least {@code org.postgresql.pljava}. + *

+ * When a leaf adapter (one that does not compose over some other adapter, + * but acts directly on PostgreSQL datums) is configured, the necessary + * {@link Permission Permission} is checked. + * @param cls The Adapter subclass being configured. + * @param via null for a composing (non-leaf) adapter; otherwise a value + * of the {@link Via} enumeration, indicating how the underlying PostgreSQL + * datum will be presented to the adapter. + * @throws SecurityException if the class being configured represents a leaf + * adapter and the necessary permission is not held. + */ + protected static Configuration configure( + Class cls, Via via) + { + Adapter.class.getModule().addReads(cls.getModule()); + Type top = topType(cls); + Type under = underType(cls); + Class topErased = erase(top); + Class underErased = erase(under); + + MethodHandle underFetcher = null; + String fetchName; + Predicate fetchPredicate; + + if ( Void.class == underErased ) + { + checkAllowed(); + requireNonNull(via, "a leaf Adapter must have a non-null Via"); + underFetcher = via.m_handle; + underErased = underFetcher.type().returnType(); + Class[] params = { Attribute.class, underErased }; + final String fn = fetchName = "fetch"; + fetchPredicate = m -> fn.equals(m.getName()) + && Arrays.equals(m.getParameterTypes(), params); + } + else + { + if ( null != via ) + throw new IllegalArgumentException( + "a non-leaf (U is not Void) adapter must have null Via"); + final String fn = fetchName = "adapt"; + MethodType mt = methodType(underErased); + if ( mt.hasWrappers() ) // Void, handled above, won't be seen here + { + Class underOrig = underErased; + Class underPrim = mt.unwrap().returnType(); + fetchPredicate = m -> + { + if ( ! fn.equals(m.getName()) ) + return false; + Class[] ptypes = m.getParameterTypes(); + return + 2 == ptypes.length && Attribute.class == ptypes[0] && + ( underOrig == ptypes[1] || underPrim == ptypes[1] ); + }; + } + else + { + Class[] params = { Attribute.class, underErased }; + fetchPredicate = m -> fn.equals(m.getName()) + && Arrays.equals(m.getParameterTypes(), params); + } + } + + Method[] fetchCandidates = stream(cls.getMethods()) + .filter(fetchPredicate).toArray(Method[]::new); + if ( 1 < fetchCandidates.length ) + fetchCandidates = stream(fetchCandidates) + .filter(m -> ! m.isBridge()).toArray(Method[]::new); + if ( 1 != fetchCandidates.length ) + throw new IllegalArgumentException( + cls + " lacks " + fetchName + " method with the " + + "expected signature"); + if ( ! topErased.isAssignableFrom(fetchCandidates[0].getReturnType()) ) + throw new IllegalArgumentException( + cls + " lacks " + fetchName + " method with the " + + "expected return type"); + + MethodHandle fetcher; + + try + { + fetcher = lookup().unreflect(fetchCandidates[0]); + } + catch ( IllegalAccessException e ) + { + throw new IllegalArgumentException( + cls + " has " + fetchName + " method that is inaccessible", + e); + } + + /* + * Adjust the return type. isAssignableFrom was already checked, so + * this can only be a no-op or a widening, to make sure the handle + * will fit invokeExact with the expected return type. + */ + fetcher = fetcher.asType(fetcher.type().changeReturnType(topErased)); + + if ( null != via ) + { + fetcher = collectArguments(fetcher, 2, underFetcher); + return new Configuration.Leaf(cls, top, fetcher); + } + + // unbound virtual handle's type includes receiver; 2nd param is index 2 + Class asFound = fetcher.type().parameterType(2); + if ( asFound.isPrimitive() ) + under = underErased = asFound; + + return new Configuration.NonLeaf(cls, top, under, fetcher); + } + + /** + * Provided to serve as a superclass for a 'container' class that is used + * to group several related adapters without being instantiable + * as an adapter itself. + *

+ * By being technically a subclass of {@code Adapter}, the container class + * will have access to the protected {@code Configuration} class and + * {@code configure} method. + */ + public static abstract class Container extends Adapter + { + protected Container() + { + super(null, null, null, null); + } + } + + /** + * An {@code Adapter} that reports it can be used on any type, but cannot + * fetch anything. + *

+ * Can be useful when constructing a {@link Contract.Array Contract.Array} + * that will inspect metadata for an array (its element type or dimensions) + * without fetching any elements. + */ + public static final class Opaque extends As + { + /** + * Instance of the {@code Opaque} adapter. + */ + public static final Opaque INSTANCE; + + /** + * Returns true unconditionally, so the {@code Opaque} adapter can be + * applied to any type or when type is unknown. + *

+ * However, any actual attempt to fetch a non-null value + * using the {@code Opaque} adapter will incur + * an {@code UnsupportedOperationException}. + */ + @Override + public boolean canFetch(RegType pgType) + { + return true; + } + + private Void fetch( + Attribute a, Datum.Accessor acc, B buffer, int offset, + Attribute aa) + { + throw new UnsupportedOperationException( + "Adapter.Opaque cannot fetch anything"); + } + + private Opaque(Configuration c) + { + super(c, null, null); + } + + static + { + try + { + Lookup lup = lookup(); + MethodHandle fetcher = lup.findVirtual( + Opaque.class, "fetch", methodType(Void.class, + Attribute.class, Datum.Accessor.class, Object.class, + int.class, Attribute.class)); + Configuration c = + new Configuration.Leaf(Opaque.class, Void.class, fetcher); + + INSTANCE = new Opaque(c); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + } + + /** + * Superclass for adapters that fetch something and return it as a reference + * type T. + *

+ * The type variable U for the thing consumed gets no enforcement from + * the compiler, because any extending adapter class provides its own + * {@code T fetch(Attribute,something)} method, with no abstract version + * inherited from this class to constrain it. The method will be found + * reflectively by name and parameter types, so the "something" only has to + * match the type of the accessor method specified with {@code Via}, or the + * type returned by an underlying adapter that this one will be composed + * over. + *

+ * In particular, that means this is the class to extend even if using a + * primitive accessor method, or composing over an adapter that returns a + * primitive type, as long as this adapter will return a reference type T. + * Such an adapter simply declares that it extends {@code As} when + * based on a primitive accessor method, or {@code As} when + * composed over another adapter of primitive type, where boxed-class is the + * boxed counterpart of the other adapter's primitive type. + *

+ * When Java's reflection methods on generic types are used to compute + * the (non-erased) result type of a stack of composed adapters, the type + * variable U can be used in relating the input to the output type of each. + */ + public abstract static class As + extends Adapter + implements ArrayProto + { + private final MethodHandle m_fetchHandleErased; + + /** + * Constructor for a simple leaf {@code Adapter}, or a composing + * (non-leaf) {@code Adapter} when passed another adapter over which + * it should be composed. + * @param c Configuration instance generated for this class + * @param over null for a leaf Adapter, otherwise another Adapter + * to compose this one over + * @param witness if not null, the top type the resulting + * adapter will produce, if a Class object can specify that more + * precisely than the default typing rules. + */ + protected As(Configuration c, Adapter over, Class witness) + { + super(c, over, null, witness); + + MethodHandle mh = m_fetchHandle; + m_fetchHandleErased = + mh.asType(mh.type().changeReturnType(Object.class)); + } + + /** + * Constructor for a leaf {@code Adapter} that is based on + * a {@code Contract}. + * @param using the scalar Contract that will be used to produce + * the value returned + * @param witness if not null, the top type the resulting + * adapter will produce, if a Class object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected As( + Contract.Scalar using, Class witness, Configuration c) + { + super(c, null, using, witness); + + MethodHandle mh = m_fetchHandle; + m_fetchHandleErased = + mh.asType(mh.type().changeReturnType(Object.class)); + } + + /** + * Used only by the {@code Array} subclass below. + *

+ * The contract and element adapter here are raw types. The accessible + * subclass constructors will permit only compatible combinations of + * parameterized types. + */ + private As( + Contract.Array using, Adapter adapter, Type witness, + Configuration c) + { + super(c, null, using, + witness != null ? witness : refinement(using, adapter)); + + MethodHandle mh = m_fetchHandle; + m_fetchHandleErased = + mh.asType(mh.type().changeReturnType(Object.class)); + } + + /** + * Returns the type that will be produced by the array contract + * using when applied to the element-type adapter + * adapter. + *

+ * Determined by unifying the contract's element type with + * the result type of adapter, then repeating any resulting + * substitutions in the contract's result type. + */ + private static Type refinement(Contract.Array using, Adapter adapter) + { + Type[] unrefined = + specialization(using.getClass(), Contract.Array.class); + Type result = unrefined[0]; + Type element = unrefined[1]; + /* + * A Contract that expects a primitive-typed adapter must already be + * specialized to one primitive type, so there is nothing to refine. + */ + if ( adapter instanceof Primitive ) + return result; + return refine(adapter.topType(), element, result)[1]; + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final T fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (T) + m_fetchHandleErased.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * A default implementation of {@code canFetchNull} that unconditionally + * returns true. + *

+ * An adapter that extends this class, if it does not override + * {@link #fetchNull fetchNull}, will simply map any SQL null value + * to a Java null. + */ + @Override + public boolean canFetchNull() + { + return true; + } + + /** + * Determines the value to which SQL null should be mapped. + *

+ * If not overridden, this implementation returns Java null. + */ + public T fetchNull(Attribute a) + { + return null; + } + + /** + * Allocate an array of the given length with this adapter's + * result type as its component type. + */ + @SuppressWarnings("unchecked") + public T[] arrayOf(int length) + { + return (T[])newInstance(m_topErased, length); + } + } + + /** + * Abstract supertype of array adapters. + *

+ * Instantiating an array adapter requires supplying an array contract + * and a compatible adapter for the element type, to be stored in the + * corresponding final fields here, which are declared with raw types. + * The several accessible constructors enforce the various compatible + * parameterizations for the two arguments. + */ + public abstract static class Array extends As + { + /** + * Returns an {@code Adapter.Array} that simply returns the element type + * of the fetched array. + *

+ * Can be used when the only statically-known type for an array + * is the polymorphic {@link RegType#ANYARRAY ANYARRAY} type, + * to determine the actual element type of a given array. A suitable + * {@code Adapter} for that type can then be chosen, and used + * to construct an array adapter that can access the content + * of the array. + */ + public static Array elementType() + { + return Service.INSTANCE.elementTypeAdapter(); + } + + /** + * The {@code Contract.Array} that this array adapter will use, + * together with the supplied element-type adapter. + *

+ * Declared here as the raw type. The accessible constructors enforce + * the compatibility requirements between this and the supplied + * element adapter. + */ + protected final Contract.Array m_contract; + + /** + * The {@code Adapter} that this array adapter will use for the array's + * element type, together with the supplied contract. + *

+ * Declared here as the raw type. The accessible constructors enforce + * the compatibility requirements between this and the supplied + * contract. + */ + protected final Adapter m_elementAdapter; + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a reference-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, As adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a long-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsLong adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a double-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsDouble adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and an int-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsInt adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a float-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsFloat adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a short-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsShort adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a char-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsChar adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a byte-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsByte adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a boolean-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsBoolean adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + } + + /** + * Ancestor class for adapters that fetch something and return it as + * a Java primitive type. + *

+ * Subclasses for integral types, namely {@code AsLong}, {@code asInt}, + * and {@code AsShort}, cannot be extended directly, but only via their + * {@code Signed} or {@code Unsigned} nested subclasses, according to how + * the value is meant to be used. Nothing can change how Java treats the + * primitive types (always as signed), but the {@code Signed} and + * {@code Unsigned} subclasses here offer methods for the operations that + * differ, allowing the right behavior to be achieved if those methods + * are used. + *

+ * Whether an adapter extends {@code AsShort.Unsigned} or {@code AsChar} + * (also an unsigned 16-bit type) should be determined based on whether + * the resulting value is meant to have a UTF-16 character meaning. + */ + public abstract static class Primitive + extends Adapter + implements ArrayProto + { + private > Primitive(Configuration c, A over) + { + super(c, over, null, null); + } + + /** + * Implementation of {@code canFetchNull} that unconditionally returns + * false, as primitive adapters have no reliably distinguishable values + * to which SQL null can be mapped. + */ + @Override + public boolean canFetchNull() + { + return false; + } + } + + /** + * Abstract superclass of signed and unsigned primitive {@code long} + * adapters. + */ + public abstract static class AsLong extends Primitive + implements TwosComplement + { + private > AsLong(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final long fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (long) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public long fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java long", "22002")); + } + + /** + * Abstract superclass of signed primitive {@code long} adapters. + */ + public abstract static class Signed extends AsLong + implements TwosComplement.Signed + { + protected > Signed(Configuration c, A over) + { + super(c, over); + } + } + + /** + * Abstract superclass of unsigned primitive {@code long} adapters. + */ + public abstract static class Unsigned extends AsLong + implements TwosComplement.Unsigned + { + protected > Unsigned( + Configuration c, A over) + { + super(c, over); + } + } + } + + /** + * Abstract superclass of primitive {@code double} adapters. + */ + public abstract static class AsDouble + extends Primitive + { + protected > AsDouble(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final double fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (double) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public double fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java double", "22002")); + } + } + + /** + * Abstract superclass of signed and unsigned primitive {@code int} + * adapters. + */ + public abstract static class AsInt extends Primitive + implements TwosComplement + { + private > AsInt(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final int fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (int) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public int fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java int", "22002")); + } + + /** + * Abstract superclass of signed primitive {@code int} adapters. + */ + public abstract static class Signed extends AsInt + implements TwosComplement.Signed + { + protected > Signed(Configuration c, A over) + { + super(c, over); + } + } + + /** + * Abstract superclass of unsigned primitive {@code int} adapters. + */ + public abstract static class Unsigned extends AsInt + implements TwosComplement.Unsigned + { + protected > Unsigned( + Configuration c, A over) + { + super(c, over); + } + } + } + + /** + * Abstract superclass of primitive {@code float} adapters. + */ + public abstract static class AsFloat extends Primitive + { + protected > AsFloat(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final float fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (float) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public float fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java float", "22002")); + } + } + + /** + * Abstract superclass of signed and unsigned primitive {@code short} + * adapters. + */ + public abstract static class AsShort extends Primitive + implements TwosComplement + { + private > AsShort(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final short fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (short) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public short fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java short", "22002")); + } + + /** + * Abstract superclass of signed primitive {@code short} adapters. + */ + public abstract static class Signed extends AsShort + implements TwosComplement.Signed + { + protected > Signed(Configuration c, A over) + { + super(c, over); + } + } + + /** + * Abstract superclass of unsigned primitive {@code short} adapters. + */ + public abstract static class Unsigned extends AsShort + implements TwosComplement.Unsigned + { + protected > Unsigned( + Configuration c, A over) + { + super(c, over); + } + } + } + + /** + * Abstract superclass of primitive {@code char} adapters. + */ + public abstract static class AsChar extends Primitive + { + protected > AsChar(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final char fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (char) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public char fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java char", "22002")); + } + } + + /** + * Abstract superclass of signed and unsigned primitive {@code byte} + * adapters. + */ + public abstract static class AsByte extends Primitive + implements TwosComplement + { + private > AsByte(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final byte fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (byte) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public byte fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java byte", "22002")); + } + + /** + * Abstract superclass of signed primitive {@code byte} adapters. + */ + public abstract static class Signed extends AsByte + implements TwosComplement.Signed + { + protected > Signed(Configuration c, A over) + { + super(c, over); + } + } + + /** + * Abstract superclass of unsigned primitive {@code byte} adapters. + */ + public abstract static class Unsigned extends AsByte + implements TwosComplement.Unsigned + { + protected > Unsigned( + Configuration c, A over) + { + super(c, over); + } + } + } + + /** + * Abstract superclass of primitive {@code boolean} adapters. + */ + public abstract static class AsBoolean + extends Primitive + { + protected > AsBoolean(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final boolean fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (boolean) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public boolean fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java boolean", "22002")); + } + } + + /** + * A marker interface to be extended by functional interfaces that + * serve as ADT contracts. + *

+ * It facilitates the declaration of "dispenser" interfaces by which + * one contract can rely on others. + * @param the type to be returned by an instance of the contract + */ + public interface Contract + { + /** + * Marker interface for contracts for simple scalar types. + */ + interface Scalar extends Contract + { + } + + /** + * Base for functional interfaces that serve as contracts + * for array-like types. + *

+ * The distinguishing feature is an associated {@code Adapter} handling + * the element type of the array-like type. This form of contract may + * be useful for range and multirange types as well as for arrays. + * @param the type to be returned by an instance of the contract. + * @param the type returned by an associated {@code Adapter} for + * the element type (or the boxed type, if the adapter returns + * a primitive type). + * @param The subtype of {@code Adapter} that the contract requires; + * reference-returning ({@code As}) and all of the primitive-returning + * types must be distinguished. + */ + public interface Array> extends Contract + { + /** + * Constructs a representation T representing + * a PostgreSQL array. + * @param nDims the number of array dimensions (always one half of + * {@code dimsAndBounds.length}, but passed separately for + * convenience) + * @param dimsAndBounds the first nDims elements + * represent the total number of valid indices for each dimension, + * and the next nDims elements represent the first valid index for each + * dimension. For example, if nDims is 3, dimsAndBounds[1] is 6, and + * dimsAndBounds[4] is -2, then the array's second dimension uses + * indices in [-2,4). The array is a copy and may be used freely. + * @param adapter an Adapter producing a representation of + * the array's element type. + * @param slot A TupleTableSlot with multiple components accessible + * by a (single, flat) index, all of the same type, described by + * a one-element TupleDescriptor. + */ + T construct( + int nDims, int[] dimsAndBounds, A adapter, Indexed slot) + throws SQLException; + } + } + + /** + * Functional interface able to dispense one instance of an ADT by passing + * its constituent values to a supplied {@code Contract} and returning + * whatever that returns. + */ + @FunctionalInterface + public interface Dispenser> + { + T get(U constructor); + } + + /** + * Functional interface able to dispense multiple instances of an ADT + * identified by a zero-based index, passing the its constituent values + * to a supplied {@code Contract} and returning whatever that returns. + */ + @FunctionalInterface + public interface PullDispenser> + { + T get(int index, U constructor); + } + + private static RuntimeException wrapped(Throwable t) + { + if ( t instanceof RuntimeException ) + return (RuntimeException)t; + if ( t instanceof Error ) + throw (Error)t; + return new AdapterException(t.getMessage(), t); + } + + /** + * A lightweight unchecked exception used to wrap checked ones + * (often {@link SQLException}) in settings where checked ones are a bother. + *

+ * The idea may or may not be worth keeping, and either way, this particular + * exception might not be part of any final API. + */ + public static class AdapterException extends RuntimeException + { + AdapterException(String message, Throwable cause) + { + super(message, cause, true, false); + } + + /** + * Unwraps this wrapper's cause and returns it, if it is an instance of + * the exception type declared; otherwise, just throws this + * wrapper again. + */ + public X unwrap(Class declared) + { + Throwable t = getCause(); + if ( declared.isInstance(t) ) + return declared.cast(t); + throw this; + } + } + + /** + * A permission allowing the creation of a leaf {@code Adapter}. + *

+ * The proper spelling in a policy file is + * {@code org.postgresql.pljava.Adapter$Permission}. + *

+ * For the time being, only {@code "*"} is allowed as the name, + * and only {@code "fetch"} as the actions. + *

+ * Only a "leaf" adapter (one that will interact with PostgreSQL datum + * values directly) requires permission. Definition of composing adapters + * (those that can be applied over another adapter and transform the Java + * values somehow) is unrestricted. + */ + public static final class Permission extends java.security.Permission + { + private static final long serialVersionUID = 1L; + + /** + * An instance of this permission (not a singleton, merely one among + * possible others). + */ + static final Permission INSTANCE = new Permission("*", "fetch"); + + public Permission(String name, String actions) + { + super("*"); + if ( ! ( "*".equals(name) && "fetch".equals(actions) ) ) + throw new IllegalArgumentException( + "the only currently-allowed name and actions are " + + "* and fetch, not " + name + " and " + actions); + } + + @Override + public boolean equals(Object other) + { + return other instanceof Permission; + } + + @Override + public int hashCode() + { + return 131129; + } + + @Override + public String getActions() + { + return "fetch"; + } + + @Override + public PermissionCollection newPermissionCollection() + { + return new Collection(); + } + + @Override + public boolean implies(java.security.Permission p) + { + return p instanceof Permission; + } + + static class Collection extends PermissionCollection + { + private static final long serialVersionUID = 1L; + + Permission the_permission = null; + + @Override + public void add(java.security.Permission p) + { + if ( isReadOnly() ) + throw new SecurityException( + "attempt to add a Permission to a readonly " + + "PermissionCollection"); + + if ( ! (p instanceof Permission) ) + throw new IllegalArgumentException( + "invalid in homogeneous PermissionCollection: " + p); + + if ( null == the_permission ) + the_permission = (Permission) p; + } + + @Override + public boolean implies(java.security.Permission p) + { + if ( null == the_permission ) + return false; + return the_permission.implies(p); + } + + @Override + public Enumeration elements() + { + if ( null == the_permission ) + return emptyEnumeration(); + return enumeration(List.of(the_permission)); + } + } + } + + /** + * Specification of a service supplied by the internals module for certain + * operations, such as specially instantiating array adapters based on + * {@code ArrayBuilder}s constructed here. + */ + public static abstract class Service + { + static final Service INSTANCE; + static final Consumer CHECKER; + + static + { + INSTANCE = ServiceLoader.load( + Service.class.getModule().getLayer(), Service.class) + .findFirst().orElseThrow(() -> new ServiceConfigurationError( + "could not load PL/Java Adapter.Service")); + CHECKER = INSTANCE.permissionChecker(); + } + + static + Array buildArrayAdapter( + ArrayBuilder builder, TypeWrapper w) + { + return INSTANCE.buildArrayAdapterImpl(builder, w); + } + + /** + * Builds an array adapter, given an {@code ArrayBuilder} (which wraps + * this {@code Adapter} and can describe the resulting array type), and + * an {@code TypeWrapper}. + *

+ * The {@code TypeWrapper} is a contrivance so that the computed array + * type can be passed back up through the constructors in a non-racy + * way. + */ + protected abstract + Array buildArrayAdapterImpl( + ArrayBuilder builder, TypeWrapper w); + + /** + * Returns a permission checker appropriate to whether PL/Java is + * running with enforcement or not. + */ + protected abstract + Consumer permissionChecker(); + + protected abstract + Array elementTypeAdapter(); + + /** + * An upcall from the implementation layer to obtain the + * {@code MultiArray} from an {@code ArrayBuilder} without cluttering + * the latter's exposed API. + */ + protected MultiArray multiArray(ArrayBuilder builder) + { + return builder.multiArray(); + } + + /** + * An upcall from the implementation layer to obtain the + * {@code Adapter} wrapped by an {@code ArrayBuilder} without cluttering + * the latter's exposed API. + */ + protected Adapter adapter(ArrayBuilder builder) + { + return builder.m_adapter; + } + } + + /** + * A class that sneakily implements {@link Type} just so it can be passed + * up through the witness parameter of existing constructors, + * and carry the computed type of an array adapter to be constructed. + *

+ * Can only be instantiated here, to limit the ability for arbitrary code + * to supply computed (or miscomputed) types. + *

+ * The implementation layer will call {@link #setWrappedType setWrappedType} + * and then pass the wrapper to the appropriate adapter constructor. + * @hidden + */ + public static class TypeWrapper implements Type + { + @Override + public String getTypeName() + { + return "(a PL/Java TypeWrapper)"; + } + + private Type wrapped; + + private TypeWrapper() { } + + public void setWrappedType(Type t) + { + wrapped = t; + } + } + + /** + * Mixin allowing properly-typed array adapters of various dimensionalities + * to be derived from an adapter for the array component type. + *

+ * If a is an adapter producing type T, then + * {@code a.a4().a2()} is an {@code ArrayBuilder} that can build a + * six-dimensional array adapter producing type T[][][][][][]. + * + * @param Type of a one-dimension array of the component type; the type + * a builder obtained with a1() would build. + */ + public interface ArrayProto + { + /** + * Returns a builder that will make an array adapter returning + * a one-dimension Java array of this {@code Adapter}'s Java type. + */ + default ArrayBuilder a1() + { + return new ArrayBuilder(this, 1); + } + + /** + * Returns a builder that will make an array adapter returning + * a two-dimension Java array of this {@code Adapter}'s Java type. + */ + default ArrayBuilder a2() + { + return new ArrayBuilder(this, 2); + } + + /** + * Returns a builder that will make an array adapter returning + * a four-dimension Java array of this {@code Adapter}'s Java type. + */ + default ArrayBuilder a4() + { + return new ArrayBuilder(this, 4); + } + } + + /** + * Builder to derive properly-typed array adapters of various + * dimensionalities, first obtained from an {@link ArrayProto}. + * + * @param The array type represented by this builder. a1() will produce + * a builder for TA[], and so on. + * @param The type of a one-dimension array of the original component + * type; remains unchanged by increases to the dimensionality of TA. + */ + @SuppressWarnings("unchecked") + public static final class ArrayBuilder + { + final Adapter m_adapter; + private int m_dimensions; + + /** + * Records the adapter for the component type (necessarily an instance + * of {@code Adapter} but here typed as {@code ArrayProto} to simplify + * call sites), and the dimensionality of array to be built. + */ + ArrayBuilder(ArrayProto adapter, int dimensions) + { + m_adapter = (Adapter)requireNonNull(adapter); + m_dimensions = dimensions; + } + + /** + * Returns an array adapter that will produce arrays with the chosen + * number of dimensions, and the original adapter's + * {@link #topType() topType} as the component type. + */ + public Array build() + { + return Service.buildArrayAdapter(this, new TypeWrapper()); + } + + MultiArray multiArray() + { + return new MultiArray(m_adapter.topType(), m_dimensions); + } + + /** + * Adds one to the result-array dimensions of the {@code Adapter} this + * builder will build. + * @return this builder, with dimensions increased, and a sneaky + * unchecked cast to the corresponding generic type. + */ + public ArrayBuilder a1() + { + m_dimensions += 1; + return (ArrayBuilder)this; + } + + /** + * Adds two to the result-array dimensions of the {@code Adapter} this + * builder will build. + * @return this builder, with dimensions increased, and a sneaky + * unchecked cast to the corresponding generic type. + */ + public ArrayBuilder a2() + { + m_dimensions += 2; + return (ArrayBuilder)this; + } + + /** + * Adds four to the result-array dimensions of the {@code Adapter} this + * builder will build. + * @return this builder, with dimensions increased, and a sneaky + * unchecked cast to the corresponding generic type. + */ + public ArrayBuilder a4() + { + m_dimensions += 4; + return (ArrayBuilder)this; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/Lifespan.java b/pljava-api/src/main/java/org/postgresql/pljava/Lifespan.java new file mode 100644 index 000000000..672079d5c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/Lifespan.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import org.postgresql.pljava.model.MemoryContext; // javadoc +import org.postgresql.pljava.model.ResourceOwner; // javadoc + +/** + * Model of any notional object in PostgreSQL or PL/Java that has a definite + * temporal existence, with a detectable end, and so can be used to scope the + * lifetime of any PL/Java object that has corresponding native resources. + *

+ * A {@code Lifespan} generalizes over assorted classes that can play that role, + * such as PostgreSQL's {@link ResourceOwner ResourceOwner} and + * {@link MemoryContext MemoryContext}. {@code MemoryContext} may see the most + * use in PL/Java, as the typical reason to scope the lifetime of some PL/Java + * object is that it refers to some allocation of native memory. + *

+ * The invocation of a PL/Java function is also usefully treated as a resource + * owner. It is reasonable to depend on the objects passed in the function call + * to remain usable as long as the call is on the stack, if no other explicit + * lifespan applies. + *

+ * Java's incubating foreign function and memory API will bring a + * {@code ResourceScope} object for which some relation to a PL/Java + * {@code Lifespan} can probably be defined. + *

+ * The history of PostgreSQL MemoryContexts + * (the older mechanism, appearing in PostgreSQL 7.1), and ResourceOwners + * (introduced in 8.0) is interesting. As the latter's {@code README} puts it, + * The design of the ResourceOwner API is modeled on our MemoryContext API, + * which has proven very flexible and successful ... It is tempting to consider + * unifying ResourceOwners and MemoryContexts into a single object type, but + * their usage patterns are sufficiently different ...." + *

+ * Only later, in PostgreSQL 9.5, did {@code MemoryContext} gain a callback + * mechanism for detecting reset or delete, with which it also becomes usable + * as a kind of lifespan under PL/Java's broadened view of the concept. + * While not unifying ResourceOwners and MemoryContexts into a single + * object type, PL/Java here makes them both available as subtypes of a + * common interface, so either can be chosen to place an appropriate temporal + * scope on a PL/Java object. + */ +public interface Lifespan +{ +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/PLJavaBasedLanguage.java b/pljava-api/src/main/java/org/postgresql/pljava/PLJavaBasedLanguage.java new file mode 100644 index 000000000..9cc0cd218 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/PLJavaBasedLanguage.java @@ -0,0 +1,792 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; + +import java.util.List; + +import org.postgresql.pljava.model.ProceduralLanguage; // javadoc +import org.postgresql.pljava.model.ProceduralLanguage.PLJavaBased; +import org.postgresql.pljava.model.RegProcedure; +import org.postgresql.pljava.model.RegProcedure.Call; +import org.postgresql.pljava.model.RegProcedure.Call.Context.TriggerData; // jvd +import org.postgresql.pljava.model.RegProcedure.Lookup; +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.Transform; +import org.postgresql.pljava.model.Trigger; +import org.postgresql.pljava.model.Trigger.ForTrigger; +import org.postgresql.pljava.model.TupleTableSlot; // javadoc + +import org.postgresql.pljava.annotation.Trigger.Called; // javadoc +import org.postgresql.pljava.annotation.Trigger.Event; // javadoc +import org.postgresql.pljava.annotation.Trigger.Scope; // javadoc + +/** + * Interface for a procedural language on PL/Java infrastructure. + *

+ * An implementing class does not implement this interface directly, but rather + * implements one or both of the subinterfaces {@link InlineBlocks InlineBlocks} + * and {@link Routines Routines}. A language that implements {@code Routines} + * may also implement one or more of: {@link ReturningSets ReturningSets}, + * {@link Triggers Triggers}, {@link UsingTransforms UsingTransforms}. + * The implementing class must have a public constructor with a + * {@link ProceduralLanguage ProceduralLanguage} parameter, which it may ignore, + * or use to determine the name, oid, accessibility, or other details of the + * declared PostgreSQL language the handler class has been instantiated for. + */ +public interface PLJavaBasedLanguage +{ + /** + * To be implemented by a language that supports routines (that is, + * functions and/or procedures). + *

+ * Whether a routine is a function or procedure can be determined at + * validation time ({@code subject.}{@link RegProcedure#kind() kind()} in + * {@link #essentialChecks essentialChecks} or + * {@link #additionalChecks additionalChecks}) or at + * {@link #prepare prepare} time + * (({@code target.}{@link RegProcedure#kind() kind()}). + * A procedure can also be distinguished from a function in that, + * at {@link Routine#call Routine.call(fcinfo)} time, if a procedure is + * being called, {@code fcinfo.}{@link Call#context() context()} returns + * an instance of {@link Call.Context.CallContext CallContext}. + *

Transaction control

+ *

+ * A function is always called within an existing transaction; while it may + * use subtransactions / savepoints, it can never commit, roll back, or + * start a new top-level transaction. + *

+ * A procedure is allowed to start, commit, and roll back top-level + * transactions, provided it was not called inside an existing explicit + * transation. That condition can be checked by consulting + * {@link Call.Context.CallContext#atomic() CallContext.atomic()} when + * {@code fcinfo.context()} returns an instance of {@code CallContext}. + * When {@code atomic()} returns {@code true}, transaction control is not + * allowed. (If {@code fcinfo.context()} returns anything other than an + * instance of {@code CallContext}, this is not a procedure call, and + * transaction control is never allowed.) + *

+ * A handler may use this information to impose its own (for example, + * compile-time) limits on a routine's access to transaction-control + * operations. Any use of SPI by the routine will be appropriately limited + * with no need for attention from the handler, as PL/Java propagates the + * atomic/nonatomic flag to SPI always. + */ + public interface Routines extends PLJavaBasedLanguage + { + /** + * Performs the essential validation checks on a proposed + * PL/Java-based routine. + *

+ * This method should check (when checkBody is true) all the + * essential conditions that {@link #prepare prepare} may assume have + * been checked. Because there is no guarantee that validation at + * routine-creation time always occurred, PL/Java's dispatcher will not + * only call this method at validation time, but also will never call + * {@code prepare} without making sure this method (passing true for + * checkBody) has been called first. + *

+ * This method should throw an informative exception for any check that + * fails, otherwise returning normally. Unless there is a more-specific + * choice, {@link SQLSyntaxErrorException} with {@code SQLState} + * {@code 42P13} corresponds to PostgreSQL's + * {@code invalid_function_definition}. + *

+ * Checks that are helpful at routine-creation time, but not essential + * to correctness of {@code prepare}, can be made in + * {@link #additionalChecks additionalChecks}. + *

+ * The dispatcher will never invoke this method for a subject + * with {@link RegProcedure#returnsSet returnsSet()} true, so this + * method may assume that property is false, unless the language also + * implements {@link ReturningSets ReturningSets} and the + * {@link ReturningSets#essentialSRFChecks essentialSRFChecks} method + * delegates to this one (as its default implementation does). + *

+ * If checkBody is false, less-thorough checks may be + * needed. The details are left to the language implementation; + * in general, basic checks of syntax, matching parameter counts, and + * so on are ok, while checks that load or compile user code or depend + * on other database state may be better avoided. The validator may be + * invoked with checkBody false at times when not all + * expected state may be in place, such as during {@code pg_restore} + * or {@code pg_upgrade}. + *

+ * This method is invoked with checkBody false only if the + * JVM has been started and PL/Java has already loaded and instantiated + * this language-handler class, or succeeds in doing so. If not, and + * checkBody is false, PL/Java simply treats the validation + * as successful. + *

+ * This default implementation checks nothing. + */ + default void essentialChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + } + + /** + * Performs additional validation checks on a proposed PL/Java-based + * routine. + *

+ * This method should be used for checks that may give helpful feedback + * at routine-creation time, but can be skipped at run time because the + * correct behavior of {@link #prepare prepare} does not depend on them. + * PL/Java calls this method only at routine-creation time, just after + * {@link #essentialChecks essentialChecks} has completed normally. + *

+ * This method should throw an informative exception for any check that + * fails, otherwise returning normally. Unless there is a more-specific + * choice, {@link SQLSyntaxErrorException} with {@code SQLState} + * {@code 42P13} corresponds to PostgreSQL's + * {@code invalid_function_definition}. + *

+ * Checks of conditions essential to correctness of {@code prepare} + * must be made in {@code essentialChecks}. + *

+ * The dispatcher will never invoke this method for a subject + * with {@link RegProcedure#returnsSet returnsSet()} true, so this + * method may assume that property is false, unless the language also + * implements {@link ReturningSets ReturningSets} and the + * {@link ReturningSets#additionalSRFChecks additionalSRFChecks} method + * delegates to this one (as its default implementation does). + *

+ * If checkBody is false, less-thorough checks may be + * needed. The details are left to the language implementation; + * in general, basic checks of syntax, matching parameter counts, and + * so on are ok, while checks that load or compile user code or depend + * on other database state may be better avoided. The validator may be + * invoked with checkBody false at times when not all + * expected state may be in place, such as during {@code pg_restore} + * or {@code pg_upgrade}. + *

+ * This default implementation checks nothing. + */ + default void additionalChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + } + + /** + * Prepares a template for a call of the routine target. + *

+ * This method is never called without + * {@link #essentialChecks essentialChecks} having been called + * immediately prior and completing normally. + *

+ * The information available at this stage comes from the system + * catalogs, reflecting the static declaration of the target routine. + * The methods of target can be used to examine that + * catalog information; the {@link PLJavaBased PLJavaBased} + * memo holds additional derived information, + * including tuple descriptors for the inputs and outputs. + * (All routines, including those treated by PostgreSQL as + * returning a scalar result, are presented to a PL/Java handler with + * the inputs and outputs represented by {@link TupleTableSlot}.) + * The tuple descriptors seen at this stage may include attributes with + * polymorphic types, not resolvable to specific types until the + * {@code Template} instance this method returns is later applied at + * an actual call site. + *

+ * This method is never called for a target with + * {@link RegProcedure#returnsSet returnsSet()} true. If the language + * also implements {@link ReturningSets ReturningSets}, any such + * target will be passed to the + * {@link ReturningSets#prepareSRF prepareSRF} method instead; + * otherwise, it will incur an exception stating the language does not + * support returning sets. + *

+ * This method should return a {@link Template Template}, which may + * encapsulate any useful precomputed values based on the catalog + * information this method consulted. + *

+ * The template, when its {@link Template#specialize specialize} method + * is invoked on an actual {@link Lookup Lookup} instance, should return + * a {@link Routine Routine} able to apply the target function's logic + * when invoked any number of times on {@link Call Call} instances + * associated with the same {@code Lookup}. + *

+ * When there is no polymorphic or variadic-"any" funny business in + * target's declaration, this method may return a + * {@code Template} that ignores its argument and always returns the + * same {@code Routine}. It could even do so in all cases, if + * implementing a language where those dynamic details are left to user + * code. + */ + Template prepare(RegProcedure target, PLJavaBased memo) + throws SQLException; + } + + /** + * To be implemented by a language that can be used to write functions + * returning sets (that is, more than a single result or row). + */ + public interface ReturningSets extends PLJavaBasedLanguage + { + /** + * Performs the essential validation checks on a proposed + * PL/Java-based set-returning function. + *

+ * See {@link Routines#essentialChecks essentialChecks} for + * the explanation of what to consider 'essential' checks. + *

+ * This default implementation simply delegates to the + * {@link Routines#essentialChecks essentialChecks} method, which must + * therefore be prepared for subject to have either value of + * {@link RegProcedure#returnsSet returnsSet()}. + */ + default void essentialSRFChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + /* + * A cast, because the alternative of having SetReturning extend + * Routines would allow Routines to be omitted from the implements + * clause of a language handler, which I would rather not encourage + * as a matter of style. + */ + ((Routines)this).essentialChecks(subject, memo, checkBody); + } + + /** + * Performs additional validation checks on a proposed + * PL/Java-based set-returning function. + *

+ * See {@link Routines#additionalChecks additionalChecks} for + * the explanation of what to consider 'additional' checks. + *

+ * This default implementation simply delegates to the + * {@link Routines#additionalChecks additionalChecks} method, which must + * therefore be prepared for subject to have either value of + * {@link RegProcedure#returnsSet returnsSet()}. + */ + default void additionalSRFChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + ((Routines)this).additionalChecks(subject, memo, checkBody); + } + + /** + * Prepares a template for a call of the set-returning function + * target. + *

+ * This method is never called without + * {@link #essentialSRFChecks essentialSRFChecks} having been called + * immediately prior and completing normally. + *

+ * This method is analogous to the + * {@link Routines#prepare prepare} method, but is called only for + * a target with {@link RegProcedure#returnsSet returnsSet()} + * true, and must return {@link SRFTemplate SRFTemplate} rather than + * {@link Template Template}. + *

+ * The documentation of the {@link Routines#prepare prepare} method + * further describes what is expected of an implementation. + */ + SRFTemplate prepareSRF(RegProcedure target, PLJavaBased memo) + throws SQLException; + } + + /** + * To be implemented by a language that supports triggers. + *

+ * The methods of this interface will be called, instead of those declared + * in {@link Routines Routines}, for any function declared with return type + * {@link RegType#TRIGGER TRIGGER}. If a language does not implement + * this interface, any attempt to validate or use such a function will incur + * an exception. + */ + public interface Triggers extends PLJavaBasedLanguage + { + /** + * Performs the essential validation checks on a proposed + * trigger function. + *

+ * See {@link Routines#essentialChecks Routines.essentialChecks} for + * details on what to check here. + *

+ * Any subject passed to this method is already known to be + * a function with no declared parameters and a non-set return type of + * {@link RegType#TRIGGER TRIGGER}. + *

+ * This default implementation checks nothing further. + */ + default void essentialTriggerChecks( + RegProcedure subject, PLJavaBased memo, + boolean checkBody) + throws SQLException + { + } + + /** + * Performs additional validation checks on a proposed trigger function. + *

+ * See {@link Routines#additionalChecks Routines.additionalChecks} for + * details on what to check here. + *

+ * Any subject passed to this method is already known to be + * a function with no declared parameters and a non-set return type of + * {@link RegType#TRIGGER TRIGGER}, and to have passed the + * {@link #essentialTriggerChecks essentialTriggerChecks}. + *

+ * This default implementation checks nothing further. + */ + default void additionalTriggerChecks( + RegProcedure subject, PLJavaBased memo, + boolean checkBody) + throws SQLException + { + } + + /** + * Prepares a template for a call of the trigger function + * target. + *

+ * This method is never called without + * {@link #essentialTriggerChecks essentialTriggerChecks} having + * been called immediately prior and completing normally. + *

+ * See {@link Routines#prepare Routines.prepare} for background on + * what to do here. + *

+ * This method should return a {@link TriggerTemplate TriggerTemplate}, + * which may encapsulate any useful precomputed values based on + * the catalog information this method consulted. + *

+ * Any target passed to this method is already known to be + * a function with no declared parameters and a non-set return type of + * {@link RegType#TRIGGER TRIGGER}, and to have passed the + * {@link #essentialTriggerChecks essentialTriggerChecks}. + *

+ * The template, when its {@link TriggerTemplate#specialize specialize} + * method is invoked on a {@link Trigger Trigger} instance, should + * return a {@link TriggerFunction TriggerFunction} that can be invoked + * on a {@link TriggerData TriggerData} instance. + *

+ * The template may generate a {@code TriggerFunction} that encapsulates + * specifics of the {@code Trigger} such as its target table, name, + * arguments, enabled events, scope, and columns of interest. + * A {@code TriggerFunction} will not be invoked for any trigger except + * the one passed to the {@code specialize} call that returned it. + */ + TriggerTemplate prepareTrigger( + RegProcedure target, PLJavaBased memo) + throws SQLException; + } + + /** + * To be implemented by a language that supports routines declared with + * {@code TRANSFORM FOR TYPE}. + *

+ * In addition to implementing the abstract method declared here, a language + * that implements this interface takes up full responsibility for doing + * whatever must be done to give effect to any such transforms declared on + * routines that use the language. PostgreSQL itself provides nothing but + * a way to declare transforms and associate them with routine declarations. + *

+ * PL/Java will reject, at validation time when possible, any routine + * declared with {@code TRANSFORM FOR TYPE} if the language does not + * implement this interface. + *

+ * A language that does implement this interface can learn + * what transforms are to be applied by calling + * {@link PLJavaBased#transforms() memo.transforms()} in its + * {@link Routines#prepare prepare} and/or + * {@link Triggers#prepareTrigger prepareTrigger} methods, and perhaps + * also in its validation methods to detect configuration issues as early + * as possible. + */ + public interface UsingTransforms extends PLJavaBasedLanguage + { + /** + * Performs validation checks on a {@link Transform} that purports to be + * usable with this language. + *

+ * PL/Java will already have checked that t's + * {@link Transform#language() language()} refers to this language. + * This method should use best effort to make sure that t's + * {@link Transform#fromSQL() fromSQL()} and + * {@link Transform#toSQL() toSQL()} functions are, in fact, functions + * that this language implementation can use to transform values between + * t's target PostgreSQL {@link Transform#type() type()} and + * a data type available to this language. See documentation of the + * {@link Transform#fromSQL() fromSQL()} and + * {@link Transform#toSQL() toSQL()} methods for more detail on what may + * need to be checked. + *

+ * It is possible for {@link Transform#fromSQL() fromSQL()} + * or {@link Transform#toSQL() toSQL()} to return + * a {@code RegProcedure} instance for which + * {@link RegProcedure#isValid() isValid()} is false, which indicates + * that this language's default from-SQL or to-SQL handling, + * respectively, is to be used for the transform's + * {@linkplain Transform#type() type}. In such cases, this method should + * check that this language has a usable default conversion in the + * indicated direction for that type. + *

+ * This method should return normally on success, otherwise throwing + * an informative exception. Unless there is a more-specific + * choice, {@link SQLSyntaxErrorException} with {@code SQLState} + * {@code 42P17} corresponds to PostgreSQL's + * {@code invalid_object_definition}. + */ + void essentialTransformChecks(Transform t) throws SQLException; + } + + /** + * To be implemented by a language that supports inline code blocks. + *

Transaction control

+ *

+ * A {@code DO} block is allowed to start, commit, and roll back top-level + * transactions, as long as it was not invoked inside an existing explicit + * transaction. The atomic parameter passed to + * {@link #execute execute} will be {@code true} if transaction control + * is disallowed. + *

+ * A handler may use this information to impose its own (for example, + * compile-time) limits on the availability of transaction-control + * operations. Any use of SPI by the code block will be appropriately + * limited with no need for attention from the handler, as PL/Java + * propagates the atomic/nonatomic flag to SPI always. + */ + public interface InlineBlocks extends PLJavaBasedLanguage + { + /** + * Parses and executes an inline code block. + * @param source_text the inline code to be parsed and executed + * @param atomic true if transaction control actions must be disallowed + * within the code block + */ + void execute(String source_text, boolean atomic) throws SQLException; + } + + /** + * The result of a {@link Template#specialize specialize} call on + * a {@link Template Template}. + *

+ * An instance can incorporate whatever can be precomputed based on the + * resolved parameter types and other information available to + * {@code specialize}. Its {@link #call call} method will then be invoked to + * supply the arguments and produce the results for each call made + * at that call site. + */ + @FunctionalInterface + public interface Routine + { + /** + * Actually executes the prepared and specialized {@code Routine}, using + * the arguments and other call-specific information passed in + * {@code fcinfo}. + *

+ * Various special cases of routine calls (triggers, procedure calls, + * and so on) can be distinguished by the specific subtypes of + * {@link Call.Context} that may be returned by + * {@code fcinfo.}{@link Call#context() context()}. + */ + void call(Call fcinfo) throws SQLException; + } + + /** + * The result of a {@link TriggerTemplate#specialize specialize} call on + * a {@link TriggerTemplate TriggerTemplate}. + *

+ * An instance can incorporate whatever can be precomputed based on the + * specific {@link Trigger Trigger} that was passed to {@code specialize}. + * Its {@link #apply apply} method will then be invoked to act on + * the {@link TriggerData TriggerData} and produce the results each time + * that trigger fires. + */ + @FunctionalInterface + public interface TriggerFunction + { + /** + * Actually executes the prepared and specialized + * {@code TriggerFunction}, with the triggering data available in + * triggerData. + *

+ * The return value, ignored for an {@link Called#AFTER AFTER} trigger, + * and restricted to null for any + * {@link Called#BEFORE BEFORE} {@link Scope#STATEMENT STATEMENT} + * trigger, can influence the triggering operation for other types + * of triggers. To permit the operation with no changes by the trigger, + * return exactly {@link TriggerData#triggerTuple triggerTuple} (for + * a trigger on {@link Event#INSERT INSERT} or + * {@link Event#DELETE DELETE}), or exactly + * {@link TriggerData#newTuple newTuple} (for a trigger on + * {@link Event#UPDATE UPDATE}). To suppress the triggering operation, + * return null. + * @return a TupleTableSlot, or null + */ + TupleTableSlot apply(TriggerData triggerData) throws SQLException; + } + + /** + * The result of a {@link Routines#prepare prepare} call on a PL/Java-based + * routine. + *

+ * An instance should depend only on the static catalog information for the + * routine as passed to {@code prepare}, and may encapsulate any values that + * can be precomputed from that information alone. Its + * {@link #specialize specialize} method will be called, passing information + * specific to a call site, to obtain a {@link Routine Routine}. + */ + @FunctionalInterface + public interface Template + { + /** + * Given the information present at a particular call site, specialize + * this template into a {@link Routine Routine} that will handle calls + * through this call site. + *

+ * Typical activities for {@code specialize} would be to consult + * flinfo's {@link Lookup#inputsDescriptor inputsDescriptor} + * and {@link Lookup#outputsDescriptor outputsDescriptor} for the number + * and types of the expected input and output parameters, though it is + * unnecessary if the tuple descriptors obtained at + * {@link Routines#prepare prepare} time included no unresolved types. + * The {@link Lookup#inputsAreSpread inputsAreSpread} method should be + * consulted if the routine has a variadic parameter of the wildcard + * {@code "any"} type. + */ + Routine specialize(Lookup flinfo) throws SQLException; + } + + /** + * Superinterface for the result of a + * {@link ReturningSets#prepareSRF prepareSRF} call on a PL/Java-based + * set-returning function. + *

+ * An instance returned by {@link ReturningSets#prepareSRF prepareSRF} must + * implement at least one of the member subinterfaces. If it implements + * more than one, it will need to override the {@link #negotiate negotiate} + * method to select the behavior to be used at a given call site. + */ + public interface SRFTemplate + { + /** + * Returns the index of a preferred subinterface of {@code SRFTemplate} + * among a list of those the caller supports. + *

+ * The list is ordered with a caller's more-preferred choices early. + *

+ * An implementation could simply return the first index of an + * allowed class C such that + * {@code this instanceof C} to use the caller's preferred method + * always, or could make a choice informed by characteristics of + * the template. + * @return the index within allowed of the interface to be + * used at this call site, or -1 if no interface in allowed + * is supported. + */ + int negotiate(List> allowed); + + /** + * An {@code SRFTemplate} subinterface that can generate + * a specialization returning the set result materialized in + * a {@code Tuplestore}. + */ + interface Materialize extends SRFTemplate + { + /** + * {@inheritDoc} + *

+ * This default implementation simply returns + * {@code allowed.indexOf(Materialize.class)}. + */ + @Override + default int negotiate(List> allowed) + { + return allowed.indexOf(Materialize.class); + } + } + + /** + * An {@code SRFTemplate} subinterface that can generate + * a specialization returning the set result in a series of calls + * each returning one value or row. + */ + interface ValuePerCall extends SRFTemplate + { + /** + * {@inheritDoc} + *

+ * This default implementation simply returns + * {@code allowed.indexOf(ValuePerCall.class)}. + */ + @Override + default int negotiate(List> allowed) + { + return allowed.indexOf(ValuePerCall.class); + } + + SRFFirst specializeValuePerCall(Lookup flinfo) throws SQLException; + } + } + + /** + * The result of a {@link SRFTemplate.ValuePerCall#specializeValuePerCall + * specializeValuePerCall} call on an {@link SRFTemplate SRFTemplate}. + *

+ * An instance can incorporate whatever can be precomputed based on the + * resolved parameter types and other information available to + * {@code specializeValuePerCall}. Its {@link #firstCall firstCall} method + * will then be invoked, for each call made at that call site, to supply the + * arguments and obtain an instance of {@link SRFNext SRFNext} whose + * {@link SRFNext#nextResult nextResult} method will be called, as many + * times as needed, to retrieve all rows of the result. + */ + @FunctionalInterface + public interface SRFFirst + { + /** + * Executes the prepared and specialized {@code SRFFirst} code, using + * the arguments and other call-specific information passed in + * {@code fcinfo} and returns an instance of {@link SRFNext SRFNext} + * to produce a result set row by row. + *

+ * This method should not access fcinfo's + * {@link RegProcedure.Call#result result} or + * {@link RegProcedure.Call#isNull isNull} methods to return any value, + * but should return an instance of {@code SRFNext} that will do so. + */ + SRFNext firstCall(Call fcinfo) throws SQLException; + } + + /** + * The result of a {@link SRFFirst#firstCall firstCall} call on an instance + * of {@link SRFFirst SRFFirst}. + *

+ * The {@link #nextResult nextResult} method will be called repeatedly + * as long as its return value indicates another row may follow, unless + * PostgreSQL earlier determines no more rows are needed. + *

+ * The {@link #close close} method will be called after the last call of + * {@code nextResult}, whether because all rows have been read or because + * PostgreSQL has read all it needs. It is not called, however, + * if {@code nextResult} has returned {@link Result#SINGLE Result.SINGLE}. + * + */ + public interface SRFNext extends AutoCloseable + { + /** + * Called when PostgreSQL will be making no further calls of + * {@link #nextResult nextResult} for this result set, which may be + * before all results have been fetched. + *

+ * When a degenerate single-row set is returned (as indicated by + * {@link #nextResult nextResult} returning + * {@link Result#SINGLE Result.SINGLE}), this method is not called. + */ + void close(); + + /** + * Called to return a single result. + *

+ * As with non-set-returning routines, this method should store result + * values into {@link RegProcedure.Call#result fcinfo.result()} or set + * {@link RegProcedure.Call#isNull fcinfo.isNull(true)} (which, in this + * context, produces a row of all nulls). If there is no result + * to store, the method should return {@link Result#END Result.END}: + * no row will be produced, and the result set is considered complete. + *

+ * If the method has exactly one row to return, it may store the values + * and return {@link Result#SINGLE Result.SINGLE}: the result will be + * considered to be just that one row. None of the rest of the + * set-returning protocol will be involved, and + * {@link SRFNext#close close()} will not be called. + *

+ * Otherwise, the method should return + * {@link Result#MULTIPLE Result.MULTIPLE} after storing each row, and + * conclude by returning {@link Result#END Result.END} from the final + * call (without storing anything). + *

+ * It is a protocol violation to return + * {@link Result#SINGLE Result.SINGLE} from any but the very first call. + *

+ * The arguments in + * {@link RegProcedure.Call#arguments fcinfo.arguments()} will not be + * changing as the rows of a single result are retrieved. Any argument + * values that will be referred to repeatedly may be worth fetching once + * in the {@link SRFFirst#firstCall firstCall} method and their Java + * representations captured in this object, rather than fetching and + * converting them repeatedly. + */ + Result nextResult(Call fcinfo) throws SQLException; + + /** + * Used to indicate the state of the result sequence on return from + * a single call in the {@code ValuePerCall} protocol. + */ + enum Result + { + /** + * There is exactly one row and this call has returned it. + *

+ * None of the rest of the set-returning protocol will be involved, + * and {@link SRFNext#close close()} will not be called. + */ + SINGLE, + + /** + * This call has returned one of possibly multiple rows, and + * another call should be made to retrieve the next row if any. + */ + MULTIPLE, + + /** + * This call has no row to return and the result sequence + * is complete. + */ + END + } + } + + /** + * The result of a {@link Triggers#prepareTrigger prepareTrigger} call on + * a PL/Java-based trigger function. + *

+ * An instance should depend only on the static catalog information for the + * function as passed to {@code prepareTrigger}, and may encapsulate any + * values that can be precomputed from that information alone. Its + * {@link #specialize specialize} method will be called, passing information + * specific to one trigger, to obtain a + * {@link TriggerFunction TriggerFunction}. + */ + @FunctionalInterface + public interface TriggerTemplate + { + /** + * Given the specifics of one {@link Trigger Trigger}, specialize + * this template into a {@link TriggerFunction TriggerFunction} that + * will handle calls through this trigger. + *

+ * Typical activities for {@code specialize} would be to consult + * trigger's {@link Trigger#name name}, + * {@link Trigger#relation relation}, {@link Trigger#called called}, + * {@link Trigger#events events}, {@link Trigger#scope scope}, + * {@link Trigger#arguments arguments}, and + * {@link Trigger#columns columns} to + * determine the kind of trigger it is, and fold those values into + * the returned {@code TriggerFunction}. + *

+ * This stage is well suited for checking that the characteristics of + * the trigger (events, scope, when called, arguments, column types of + * the target table) conform to what the trigger function can handle. + */ + TriggerFunction specialize(Trigger trigger) throws SQLException; + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/RolePrincipal.java b/pljava-api/src/main/java/org/postgresql/pljava/RolePrincipal.java new file mode 100644 index 000000000..cd68813de --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/RolePrincipal.java @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.io.InvalidObjectException; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectStreamException; +import java.io.Serializable; + +import java.nio.file.attribute.GroupPrincipal; + +import java.util.function.Function; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Pseudo; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +public abstract class RolePrincipal extends BasePrincipal +{ + private static final long serialVersionUID = 5650953533699613976L; + + RolePrincipal(String name) + { + super(name); + constrain(IllegalArgumentException::new); + } + + RolePrincipal(Simple name) + { + super(name); + constrain(IllegalArgumentException::new); + /* + * Ensure the subclasses' PUBLIC singletons really are, by rejecting the + * Pseudo.PUBLIC identifier in this constructor. The subclasses use + * private constructors that call the specialized one below when + * initializing their singletons. + */ + if ( s_public == name ) + throw new IllegalArgumentException( + "attempt to create non-singleton PUBLIC RolePrincipal"); + } + + RolePrincipal(Pseudo name) + { + super(name); + constrain(IllegalArgumentException::new); + } + + private final void constrain(Function exc) + throws E + { + Class c = getClass(); + if ( c != Authenticated.class && c != Session.class + && c != Outer.class && c != Current.class ) + throw exc.apply( + "forbidden to create unknown RolePrincipal subclass: " + + c.getName()); + + /* + * Unlike many cases where a delimited identifier can be used whose + * regular-identifier form is a reserved word, PostgreSQL in fact + * forbids giving any role a name that the regular identifier public + * would match, even if the name is quoted. + */ + if ( ( "public".equals(m_name.nonFolded()) + || "public".equals(m_name.pgFolded()) ) && m_name != s_public ) + throw exc.apply( + "forbidden to create a RolePrincipal with name " + + "that matches \"public\" by PostgreSQL rules"); + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + constrain(InvalidObjectException::new); + } + + static final Pseudo s_public = Pseudo.PUBLIC; + + /** + * Compare two {@code RolePrincipal}s for equality, with special treatment + * for the {@code PUBLIC} ones. + *

+ * Each concrete subclass of {@code RolePrincipal} has a singleton + * {@code PUBLIC} instance, which will only compare equal to itself (this + * method is not the place to say everything matches {@code PUBLIC}, because + * {@code equals} should be symmetric, and security checks should not be). + * Otherwise, the result is that of + * {@link Identifier#equals(Object) Identifier.equals}. + *

+ * Note that these {@code PUBLIC} instances are distinct from the wild-card + * principal names that can appear in the Java policy file: those are + * handled without ever instantiating the class, and simply match any + * principal with the identically-spelled class name. + */ + @Override + public final boolean equals(Object other) + { + if ( this == other ) + return true; + /* + * Because the pseudo "PUBLIC" instances are restricted to being + * singletons (one per RolePrincipal subclass), the above test will have + * already handled the matching case for those. Below, if either one is + * a PUBLIC instance, its m_name won't match anything else, which is ok + * because of the PostgreSQL rule that no role can have a potentially + * matching name anyway. + */ + if ( ! getClass().isInstance(other) ) + return false; + RolePrincipal o = (RolePrincipal)other; + return m_name.equals(o.m_name); + } + + public static final class Authenticated extends RolePrincipal + { + private static final long serialVersionUID = -4558155344619605758L; + + public static final Authenticated PUBLIC = new Authenticated(s_public); + + public Authenticated(String name) + { + super(name); + } + + public Authenticated(Simple name) + { + super(name); + } + + private Authenticated(Pseudo name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + return m_name == s_public ? PUBLIC : this; + } + } + + public static final class Session extends RolePrincipal + { + private static final long serialVersionUID = -598305505864518470L; + + public static final Session PUBLIC = new Session(s_public); + + public Session(String name) + { + super(name); + } + + public Session(Simple name) + { + super(name); + } + + private Session(Pseudo name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + return m_name == s_public ? PUBLIC : this; + } + } + + public static final class Outer extends RolePrincipal + { + private static final long serialVersionUID = 2177159367185354785L; + + public static final Outer PUBLIC = new Outer(s_public); + + public Outer(String name) + { + super(name); + } + + public Outer(Simple name) + { + super(name); + } + + private Outer(Pseudo name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + return m_name == s_public ? PUBLIC : this; + } + } + + public static final class Current extends RolePrincipal + implements GroupPrincipal + { + private static final long serialVersionUID = 2816051825662188997L; + + public static final Current PUBLIC = new Current(s_public); + + public Current(String name) + { + super(name); + } + + public Current(Simple name) + { + super(name); + } + + private Current(Pseudo name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + return m_name == s_public ? PUBLIC : this; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/TargetList.java b/pljava-api/src/main/java/org/postgresql/pljava/TargetList.java new file mode 100644 index 000000000..5e643a2af --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/TargetList.java @@ -0,0 +1,971 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.sql.SQLException; // for javadoc +import java.sql.SQLXML; // for javadoc + +import java.util.Arrays; +import java.util.BitSet; +import java.util.Iterator; +import java.util.List; + +import java.util.stream.Stream; + +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsBoolean; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsShort; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.Portal; // for javadoc +import org.postgresql.pljava.model.TupleDescriptor; // for javadoc +import org.postgresql.pljava.model.TupleTableSlot; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Identifies attributes to be retrieved from a set of tuples. + *

+ * {@code TargetList} is more general than {@link Projection Projection}: in a + * {@code Projection}, no attribute can appear more than once, but repetition + * is possible in a {@code TargetList}. + *

+ * In general, it will be more efficient, if processing logic requires more than + * one copy of some attribute's value, to simply mention the attribute once in a + * {@code Projection}, and have the Java logic then copy the value, rather than + * fetching and converting it twice from the database native form. But there + * may be cases where that isn't workable, such as when the value is needed in + * different Java representations from different {@link Adapter}s, or when the + * Java representation is a type like {@link SQLXML} that can only be used once. + * Such cases call for a {@code TargetList} in which the attribute is mentioned + * more than once, to be separately fetched. + *

+ * Given a {@code TargetList}, query results can be processed by supplying a + * lambda body to {@link #applyOver(Iterable,Cursor.Function) applyOver}. The + * lambda will be supplied a {@link Cursor Cursor} whose {@code apply} methods + * can be used to break out the wanted values on each row, in the + * {@code TargetList} order. + */ +public interface TargetList extends List +{ + /** + * A {@code TargetList} in which no one attribute may appear more than once. + *

+ * The prime example of a {@code Projection} is a {@link TupleDescriptor} as + * obtained, for example, from the {@link Portal} for a query result. + *

+ * To preserve the "no attribute appears more than once" property, the only + * new {@code Projection}s derivable from an existing one involve selecting + * a subset of its attributes, and possibly changing their order. The + * {@code project} methods taking attribute names, attribute indices, or the + * attributes themselves can be used to do so, as can the {@code subList} + * method. + */ + interface Projection extends TargetList + { + /** + * From this {@code Projection}, returns a {@code Projection} containing + * only the attributes matching the supplied names and in the + * order of the argument list. + * @throws IllegalArgumentException if more names are supplied than this + * Projection has attributes, or if any remain unmatched after matching + * each attribute in this Projection at most once. + */ + Projection project(Simple... names); + + /** + * From this {@code Projection}, returns a {@code Projection} containing + * only the attributes matching the supplied names and in the + * order of the argument list. + *

+ * The names will be converted to {@link Simple Identifier.Simple} by + * its {@link Simple#fromJava fromJava} method before comparison. + * @throws IllegalArgumentException if more names are supplied than this + * Projection has attributes, or if any remain unmatched after matching + * each attribute in this Projection at most once. + */ + default Projection project(CharSequence... names) + { + return project( + Arrays.stream(names) + .map(CharSequence::toString) + .map(Simple::fromJava) + .toArray(Simple[]::new) + ); + } + + /** + * Returns a {@code Projection} containing only the attributes found + * at the supplied indices in this {@code Projection}, and in + * the order of the argument list. + *

+ * The index of the first attribute is zero. + * @throws IllegalArgumentException if more indices are supplied than + * this Projection has attributes, if any index is negative or beyond + * the last index in this Projection, or if any index appears more than + * once. + */ + Projection project(int... indices); + + /** + * Returns a {@code Projection} containing only the attributes found + * at the supplied indices in this {@code Projection}, and in + * the order of the argument list. + *

+ * The index of the first attribute is zero. + * @throws IllegalArgumentException if more indices are supplied than + * this Projection has attributes, if any index is negative or beyond + * the last index in this Projection, or if any index appears more than + * once. + */ + Projection project(short... indices); + + /** + * Returns a {@code Projection} containing only the attributes whose + * indices in this {@code Projection} are set (true) in the supplied + * {@link BitSet BitSet}, and in the same order. + *

+ * The index of the first attribute is zero. + * @throws IllegalArgumentException if more bits are set than this + * Projection has attributes, or if any bit is set beyond the last index + * in this Projection. + */ + Projection project(BitSet indices); + + /** + * Returns a {@code Projection} containing only the attributes whose + * indices in this {@code Projection} are set (true) in the supplied + * {@link BitSet BitSet}, and in the same order. + *

+ * The index of the first attribute is one. + * @throws IllegalArgumentException if more bits are set than this + * Projection has attributes, or if any bit is set before the first or + * beyond the last corresponding to a 1-based index in this Projection. + */ + Projection sqlProject(BitSet indices); + + /** + * Like {@link #project(int...) project(int...)} but using SQL's 1-based + * indexing convention. + *

+ * The index of the first attribute is 1. + * @throws IllegalArgumentException if more indices are supplied than + * this Projection has attributes, if any index is nonpositive or beyond + * the last 1-based index in this Projection, or if any index appears + * more than once. + */ + Projection sqlProject(int... indices); + + /** + * Like {@link #project(int...) project(int...)} but using SQL's 1-based + * indexing convention. + *

+ * The index of the first attribute is 1. + * @throws IllegalArgumentException if more indices are supplied than + * this Projection has attributes, if any index is nonpositive or beyond + * the last 1-based index in this Projection, or if any index appears + * more than once. + */ + Projection sqlProject(short... indices); + + /** + * Returns a {@code Projection} containing only attributes + * and in the order of the argument list. + *

+ * The attributes must be found in this {@code Projection} by exact + * reference identity. + * @throws IllegalArgumentException if more attributes are supplied than + * this Projection has, or if any remain unmatched after matching + * each attribute in this Projection at most once. + */ + Projection project(Attribute... attributes); + + @Override + Projection subList(int fromIndex, int toIndex); + } + + @Override + TargetList subList(int fromIndex, int toIndex); + + /** + * Like {@link #get(int) get} but following the SQL convention where the + * first element has index 1. + */ + default Attribute sqlGet(int oneBasedIndex) + { + try + { + return get(oneBasedIndex - 1); + } + catch ( IndexOutOfBoundsException e ) + { + throw (IndexOutOfBoundsException) + new IndexOutOfBoundsException(String.format( + "sqlGet() one-based index %d should be > 0 and <= %d", + oneBasedIndex, size() + )) + .initCause(e); + } + } + + /** + * Executes the function f, once, supplying a + * {@link Cursor Cursor} that can be iterated over the supplied + * tuples and used to process each tuple. + * @return whatever f returns. + */ + R applyOver( + Iterable tuples, Cursor.Function f) + throws X, SQLException; + + /** + * Executes the function f, once, supplying a + * {@link Cursor Cursor} that can be used to process the tuple. + *

+ * The {@code Cursor} can be iterated, just as if a one-row + * {@code Iterable} had been passed to + * {@link #applyOver(Iterable,Cursor.Function) applyOver(tuples, f)}, but it + * need not be; it will already have the single supplied tuple as + * its current row, ready for its {@code apply} methods to be used. + * @return whatever f returns. + */ + R applyOver( + TupleTableSlot tuple, Cursor.Function f) + throws X, SQLException; + + /** + * A {@code TargetList} that has been bound to a source of tuples and can + * execute code with the wanted attribute values available. + *

+ * Being derived from a {@link TargetList}, a {@code Cursor} serves directly + * as an {@code Iterator}, supplying the attributes in the + * {@code TargetList} order. + *

+ * Being bound to a source of tuples, a {@code Cursor} also implements + * {@code Iterable}, and can supply an iterator over the bound tuples in + * order. The {@code Cursor} is mutated during the iteration, having a + * current row that becomes each tuple in turn. The object returned by that + * iterator is the {@code Cursor} itself, so the caller has no need for the + * iteration variable, and can use the "unnamed variable" {@code _} for it, + * in Java versions including that feature (which appears in Java 21 but + * only with {@code --enable-preview}). In older Java versions it can be + * given some other obviously throwaway name. + *

+ * When a {@code Cursor} has a current row, its {@code apply} methods can be + * used to execute a lambda body with its parameters mapped to the row's + * values, in {@code TargetList} order, or to a prefix of those, should + * a lambda with fewer parameters be supplied. + *

+ * Each overload of {@code apply} takes some number of + * {@link Adapter Adapter} instances, each of which must be suited to the + * PostgreSQL type at its corresponding position, followed by a lambda body + * with the same number of parameters, each of which will receive the value + * from the corresponding {@code Adapter}, and have an inferred type + * matching what that {@code Adapter} produces. + *

+ * Within a lambda body with fewer parameters than the length of the + * {@code TargetList}, the {@code Cursor}'s attribute iterator has been + * advanced by the number of columns consumed. It can be used again to apply + * an inner lambda body to remaining columns. This "curried" style can be + * useful when the number or types of values to be processed will not + * directly fit any available {@code apply} signature. + *

+	 *  overall_result = targetlist.applyOver(tuples, c ->
+	 *  {
+	 *      var resultCollector = ...;
+	 *      for ( Cursor _ : c )
+	 *      {
+	 *          var oneResult = c.apply(
+	 *              adap0, adap1,
+	 *             ( val0,  val1 ) -> c.apply(
+	 *                  adap2, adap3,
+	 *                 ( val2,  val3 ) -> process(val0, val1, val2, val3)));
+     *          resultCollector.collect(oneResult);
+	 *      }
+	 *      return resultCollector;
+	 *  });
+	 *
+ *

+ * As the {@code apply} overloads for reference-typed values and those for + * primitive values are separate, currying must be used when processing a + * mix of reference and primitive types. + *

+ * The {@code Cursor}'s attribute iterator is reset each time the tuple + * iterator moves to a new tuple. It is also reset on return (normal or + * exceptional) from an outermost {@code apply}, in case another function + * should then be applied to the row. + *

+ * The attribute iterator is not reset on return from an inner (curried) + * {@code apply}. Therefore, it is possible to process a tuple having + * repeating groups of attributes with matching types, reusing an inner + * lambda and its matching adapters for each occurrence of the group. + *

+ * If the tuple is nothing but repeating groups, the effect can still be + * achieved by using the zero-parameter {@code apply} overload as the + * outermost. + */ + interface Cursor extends Iterator, Iterable + { + /** + * Returns an {@link Iterator} that will return this {@code Cursor} + * instance itself, repeatedly, mutated each time to represent the next + * of the bound list of tuples. + *

+ * Because the {@code Iterator} will produce the same {@code Cursor} + * instance on each iteration, and the instance is mutated, saving + * values the iterator returns will not have effects one might expect, + * and no more than one iteration should be in progress at a time. + *

+ * The {@code Iterator} that this {@code Cursor} represents + * will be reset to the first attribute each time a new tuple is + * presented by the {@code Iterator}. + * @throws IllegalStateException within the code body passed to any + * {@code apply} method. Within any such code body, the cursor simply + * represents its current tuple. Only outside of any {@code apply()} may + * {@code iterator()} be called. + */ + @Override // Iterable + Iterator iterator(); + + /** + * Returns a {@link Stream} that will present this {@code Cursor} + * instance itself, repeatedly, mutated each time to represent the next + * of the bound list of tuples. + *

+ * The stream should be used within the scope of the + * {@link #applyOver(Iterable,Function) applyOver} that has made + * this {@code Cursor} available. + *

+ * Because the {@code Stream} will produce the same {@code Cursor} + * instance repeatedly, and the instance is mutated, saving instances + * will not have effects one might expect, and no more than one + * stream should be in progress at a time. Stateful operations such as + * {@code distinct} or {@code sorted} will make no sense applied to + * these instances. Naturally, this method does not return a parallel + * {@code Stream}. + *

+ * These restrictions do not satisfy all expectations of a + * {@code Stream}, and may be topics for future work as this API is + * refined. + *

+ * The {@code Iterator} that this {@code Cursor} represents + * will be reset to the first attribute each time a new tuple is + * presented by the {@code Stream}. + * @throws IllegalStateException within the code body passed to any + * {@code apply} method. Within any such code body, the cursor simply + * represents its current tuple. Only outside of any {@code apply()} may + * {@code stream()} be called. + */ + Stream stream(); + + R apply( + L0 f) + throws X; + + R apply( + As a0, + L1 f) + throws X; + + R apply( + As a0, As a1, + L2 f) + throws X; + + R apply( + As a0, As a1, As a2, + L3 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + L4 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, + L5 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, + L6 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, + L7 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, As a7, + L8 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, As a7, + As a8, As a9, As aa, As ab, + As ac, As ad, As ae, As af, + L16 f) + throws X; + + R apply( + AsLong a0, + J1 f) + throws X; + + R apply( + AsLong a0, AsLong a1, + J2 f) + throws X; + + R apply( + AsLong a0, AsLong a1, AsLong a2, + J3 f) + throws X; + + R apply( + AsLong a0, AsLong a1, AsLong a2, AsLong a3, + J4 f) + throws X; + + R apply( + AsDouble a0, + D1 f) + throws X; + + R apply( + AsDouble a0, AsDouble a1, + D2 f) + throws X; + + R apply( + AsDouble a0, AsDouble a1, AsDouble a2, + D3 f) + throws X; + + R apply( + AsDouble a0, AsDouble a1, AsDouble a2, AsDouble a3, + D4 f) + throws X; + + R apply( + AsInt a0, + I1 f) + throws X; + + R apply( + AsInt a0, AsInt a1, + I2 f) + throws X; + + R apply( + AsInt a0, AsInt a1, AsInt a2, + I3 f) + throws X; + + R apply( + AsInt a0, AsInt a1, AsInt a2, AsInt a3, + I4 f) + throws X; + + R apply( + AsFloat a0, + F1 f) + throws X; + + R apply( + AsFloat a0, AsFloat a1, + F2 f) + throws X; + + R apply( + AsFloat a0, AsFloat a1, AsFloat a2, + F3 f) + throws X; + + R apply( + AsFloat a0, AsFloat a1, AsFloat a2, AsFloat a3, + F4 f) + throws X; + + R apply( + AsShort a0, + S1 f) + throws X; + + R apply( + AsShort a0, AsShort a1, + S2 f) + throws X; + + R apply( + AsShort a0, AsShort a1, AsShort a2, + S3 f) + throws X; + + R apply( + AsShort a0, AsShort a1, AsShort a2, AsShort a3, + S4 f) + throws X; + + R apply( + AsChar a0, + C1 f) + throws X; + + R apply( + AsChar a0, AsChar a1, + C2 f) + throws X; + + R apply( + AsChar a0, AsChar a1, AsChar a2, + C3 f) + throws X; + + R apply( + AsChar a0, AsChar a1, AsChar a2, AsChar a3, + C4 f) + throws X; + + R apply( + AsByte a0, + B1 f) + throws X; + + R apply( + AsByte a0, AsByte a1, + B2 f) + throws X; + + R apply( + AsByte a0, AsByte a1, AsByte a2, + B3 f) + throws X; + + R apply( + AsByte a0, AsByte a1, AsByte a2, AsByte a3, + B4 f) + throws X; + + R apply( + AsBoolean a0, + Z1 f) + throws X; + + R apply( + AsBoolean a0, AsBoolean a1, + Z2 f) + throws X; + + R apply( + AsBoolean a0, AsBoolean a1, AsBoolean a2, + Z3 f) + throws X; + + R apply( + AsBoolean a0, AsBoolean a1, AsBoolean a2, AsBoolean a3, + Z4 f) + throws X; + + @FunctionalInterface + interface Function + { + R apply(Cursor c); + } + + @FunctionalInterface + interface L0 + { + R apply() throws X; + } + + @FunctionalInterface + interface L1 + { + R apply(A v0) throws X; + } + + @FunctionalInterface + interface L2 + { + R apply(A v0, B v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L3 + { + R apply(A v0, B v1, C v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L4 + { + R apply(A v0, B v1, C v2, D v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L5 + { + R apply(A v0, B v1, C v2, D v3, E v4) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L6 + { + R apply(A v0, B v1, C v2, D v3, E v4, F v5) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L7 + { + R apply(A v0, B v1, C v2, D v3, E v4, F v5, G v6) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L8 + { + R apply(A v0, B v1, C v2, D v3, E v4, F v5, G v6, H v7) + throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L16 + { + R apply( + A v0, B v1, C v2, D v3, E v4, F v5, G v6, H v7, + I v8, J v9, K va, L vb, M vc, N vd, O ve, P vf) + throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface J1 + { + R apply(long v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface J2 + { + R apply(long v0, long v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface J3 + { + R apply(long v0, long v1, long v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface J4 + { + R apply(long v0, long v1, long v2, long v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface D1 + { + R apply(double v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface D2 + { + R apply(double v0, double v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface D3 + { + R apply(double v0, double v1, double v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface D4 + { + R apply(double v0, double v1, double v2, double v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface I1 + { + R apply(int v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface I2 + { + R apply(int v0, int v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface I3 + { + R apply(int v0, int v1, int v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface I4 + { + R apply(int v0, int v1, int v2, int v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface F1 + { + R apply(float v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface F2 + { + R apply(float v0, float v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface F3 + { + R apply(float v0, float v1, float v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface F4 + { + R apply(float v0, float v1, float v2, float v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface S1 + { + R apply(short v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface S2 + { + R apply(short v0, short v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface S3 + { + R apply(short v0, short v1, short v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface S4 + { + R apply(short v0, short v1, short v2, short v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface C1 + { + R apply(char v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface C2 + { + R apply(char v0, char v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface C3 + { + R apply(char v0, char v1, char v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface C4 + { + R apply(char v0, char v1, char v2, char v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface B1 + { + R apply(byte v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface B2 + { + R apply(byte v0, byte v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface B3 + { + R apply(byte v0, byte v1, byte v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface B4 + { + R apply(byte v0, byte v1, byte v2, byte v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface Z1 + { + R apply(boolean v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface Z2 + { + R apply(boolean v0, boolean v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface Z3 + { + R apply(boolean v0, boolean v1, boolean v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface Z4 + { + R apply(boolean v0, boolean v1, boolean v2, boolean v3) throws X; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Array.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Array.java new file mode 100644 index 000000000..63cee9dbc --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Array.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.sql.SQLException; + +import java.util.List; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.Contract; + +import org.postgresql.pljava.model.TupleTableSlot.Indexed; + +/** + * Container for functional interfaces presenting a PostgreSQL array. + */ +public interface Array +{ + /** + * A contract whereby an array is returned flattened into a Java list, + * with no attention to its specified dimensionality or index bounds. + */ + @FunctionalInterface + interface AsFlatList extends Contract.Array,E,Adapter.As> + { + /** + * Shorthand for a cast of a suitable method reference to this + * functional interface type. + */ + static AsFlatList of(AsFlatList instance) + { + return instance; + } + + /** + * An implementation that produces a Java list eagerly copied from the + * PostgreSQL array, which is then no longer needed; null elements in + * the array are included in the list. + */ + static List nullsIncludedCopy( + int nDims, int[] dimsAndBounds, Adapter.As adapter, + Indexed slot) + throws SQLException + { + int n = slot.elements(); + E[] result = adapter.arrayOf(n); + for ( int i = 0; i < n; ++ i ) + result[i] = slot.get(i, adapter); + return List.of(result); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Bitstring.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Bitstring.java new file mode 100644 index 000000000..8b43b3b5a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Bitstring.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.nio.ByteBuffer; + +import java.util.OptionalInt; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code BITSTRING} type category. + */ +public interface Bitstring +{ + /** + * The {@code BIT} and {@code VARBIT} types' PostgreSQL semantics: the + * number of bits, and the sequence of bytes they're packed into. + */ + @FunctionalInterface + public interface Bit extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param nBits the actual number of bits in the value, not necessarily + * a multiple of 8. For type BIT, must equal the modifier nBits if + * specified; for VARBIT, must be equal or smaller. + * @param bytes a buffer of ceiling(nBits/8) bytes, not aliasing any + * internal storage, so safely readable (and writable, if useful for + * format conversion). Before accessing it in wider units, its byte + * order should be explicitly set. Within each byte, the logical order + * of the bits is from MSB to LSB; beware that this within-byte bit + * order is the reverse of what java.util.BitSet.valueOf(...) expects. + * When nBits is not a multiple of 8, the unused low-order bits of + * the final byte must be zero. + */ + T construct(int nBits, ByteBuffer bytes); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code Bit} function possibly tailored ("curried") + * with the values from a PostgreSQL type modifier on the type. + * @param nBits for the BIT type, the exact number of bits the + * value must have; for VARBIT, the maximum. When not specified, + * the meaning is 1 for BIT, and unlimited for VARBIT. + */ + Bit modify(OptionalInt nBits); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Datetime.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Datetime.java new file mode 100644 index 000000000..0ac42e41d --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Datetime.java @@ -0,0 +1,596 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.sql.SQLException; +import java.sql.SQLDataException; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.ZoneOffset; + +import static java.time.ZoneOffset.UTC; +import static java.time.temporal.ChronoUnit.DAYS; +import static java.time.temporal.ChronoUnit.MICROS; +import static java.time.temporal.JulianFields.JULIAN_DAY; + +import java.util.OptionalInt; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code DATETIME} type category. + */ +public interface Datetime +{ + /** + * PostgreSQL "infinitely early" date, as a value of what would otherwise be + * days from the PostgreSQL epoch. + */ + int DATEVAL_NOBEGIN = Integer.MIN_VALUE; + + /** + * PostgreSQL "infinitely late" date, as a value of what would otherwise be + * days from the PostgreSQL epoch. + */ + int DATEVAL_NOEND = Integer.MAX_VALUE; + + /** + * PostgreSQL "infinitely early" timestamp, as a value of what would + * otherwise be microseconds from the PostgreSQL epoch. + */ + long DT_NOBEGIN = Long.MIN_VALUE; + + /** + * PostgreSQL "infinitely late" timestamp, as a value of what would + * otherwise be microseconds from the PostgreSQL epoch. + */ + long DT_NOEND = Long.MAX_VALUE; + + /** + * The PostgreSQL "epoch", 1 January 2000, as a Julian day; the date + * represented by a {@code DATE}, {@code TIMESTAMP}, or {@code TIMESTAMPTZ} + * with a stored value of zero. + */ + int POSTGRES_EPOCH_JDATE = 2451545; + + /** + * Maximum value allowed for a type modifier specifying the seconds digits + * to the right of the decimal point for a {@code TIME} or {@code TIMETZ}. + */ + int MAX_TIME_PRECISION = 6; + + /** + * Maximum value allowed for a type modifier specifying the seconds digits + * to the right of the decimal point for a {@code TIMESTAMP} or + * {@code TIMESTAMPTZ}. + */ + int MAX_TIMESTAMP_PRECISION = 6; + + /** + * The maximum allowed value, inclusive, for a {@code TIME} or the time + * portion of a {@code TIMETZ}. + *

+ * The limit is inclusive; PostgreSQL officially accepts 24:00:00 + * as a valid time value. + */ + long USECS_PER_DAY = 86400000000L; + + /** + * The {@code DATE} type's PostgreSQL semantics: a signed number of days + * since the "Postgres epoch". + */ + @FunctionalInterface + public interface Date extends Contract.Scalar + { + /** + * The PostgreSQL "epoch" as a {@code java.time.LocalDate}. + */ + LocalDate POSTGRES_EPOCH = + LocalDate.EPOCH.with(JULIAN_DAY, POSTGRES_EPOCH_JDATE); + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The argument represents days since + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}, unless it is one of + * the special values {@link #DATEVAL_NOBEGIN DATEVAL_NOBEGIN} or + * {@link #DATEVAL_NOEND DATEVAL_NOEND}. + *

+ * When constructing a representation that lacks notions of positive or + * negative "infinity", one option is to simply map the above special + * values no differently than ordinary ones, and remember the two + * resulting representations as the "infinite" ones. If that is done + * without wraparound, the resulting "-infinity" value will precede all + * other PostgreSQL-representable dates and the resulting "+infinity" + * will follow them. + *

+ * The older {@code java.util.Date} cannot represent those values + * without wraparound; the two resulting values can still be saved as + * representing -infinity and +infinity, but will not have the expected + * ordering with respect to other values. They will both be quite far + * from the present. + */ + T construct(int daysSincePostgresEpoch); + + /** + * A reference implementation that maps to {@link LocalDate LocalDate}. + *

+ * The PostgreSQL "-infinity" and "+infinity" values are mapped to + * {@code LocalDate} instances matching (by {@code equals}) the special + * instances {@code NOBEGIN} and {@code NOEND} here, respectively. + */ + static class AsLocalDate implements Date + { + private AsLocalDate() // I am a singleton + { + } + + public static final AsLocalDate INSTANCE = new AsLocalDate(); + + /** + * {@code LocalDate} representing PostgreSQL's "infinitely early" + * date. + */ + public static final LocalDate NOBEGIN = + INSTANCE.construct(DATEVAL_NOBEGIN); + + /** + * {@code LocalDate} representing PostgreSQL's "infinitely late" + * date. + */ + public static final LocalDate NOEND = + INSTANCE.construct(DATEVAL_NOEND); + + @Override + public LocalDate construct(int daysSincePostgresEpoch) + { + return POSTGRES_EPOCH.plusDays(daysSincePostgresEpoch); + } + + public T store(LocalDate d, Date f) throws SQLException + { + if ( NOBEGIN.isAfter(d) || d.isAfter(NOEND) ) + throw new SQLDataException(String.format( + "date out of range: \"%s\"", d), "22008"); + + return f.construct((int)POSTGRES_EPOCH.until(d, DAYS)); + } + } + } + + /** + * The {@code TIME} type's PostgreSQL semantics: microseconds since + * midnight. + */ + @FunctionalInterface + public interface Time extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The argument represents microseconds since midnight, nonnegative + * and not exceeding {@code USECS_PER_DAY}. + *

+ * PostgreSQL does allow the value to exactly equal + * {@code USECS_PER_DAY}. 24:00:00 is considered a valid value. That + * may need extra attention if the representation to be constructed + * doesn't allow that. + */ + T construct(long microsecondsSinceMidnight); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code Time} function possibly tailored ("curried") + * with the values from a PostgreSQL type modifier on the type. + *

+ * The precision indicates the number of seconds digits desired + * to the right of the decimal point, and must be positive and + * no greater than {@code MAX_TIME_PRECISION}. + */ + Time modify(OptionalInt precision); + } + + /** + * A reference implementation that maps to {@link LocalTime LocalTime}. + *

+ * While PostgreSQL allows 24:00:00 as a valid time, {@code LocalTime} + * maxes out at the preceding nanosecond. That is still a value that + * can be distinguished, because PostgreSQL's time resolution is only + * to microseconds, so the PostgreSQL 24:00:00 value will be mapped + * to that. + *

+ * In the other direction, nanoseconds will be rounded to microseconds, + * so any value within the half-microsecond preceding {@code HOUR24} + * will become the PostgreSQL 24:00:00 value. + */ + static class AsLocalTime implements Time + { + private AsLocalTime() // I am a singleton + { + } + + public static final AsLocalTime INSTANCE = new AsLocalTime(); + + /** + * {@code LocalTime} representing the 24:00:00 time that PostgreSQL + * accepts but {@code LocalTime} does not. + *

+ * This {@code LocalTime} represents the immediately preceding + * nanosecond. That is still distinguishable from any other + * PostgreSQL time, because those have only microsecond + * resolution. + */ + public static final LocalTime HOUR24 = + LocalTime.ofNanoOfDay(1000L * USECS_PER_DAY - 1L); + + @Override + public LocalTime construct(long microsecondsSinceMidnight) + { + if ( USECS_PER_DAY == microsecondsSinceMidnight ) + return HOUR24; + + return LocalTime.ofNanoOfDay(1000L * microsecondsSinceMidnight); + } + + public T store(LocalTime t, Time f) + { + long nanos = t.toNanoOfDay(); + + return f.construct((500L + nanos) / 1000L); + } + } + } + + /** + * The {@code TIMETZ} type's PostgreSQL semantics: microseconds since + * midnight, accompanied by a time zone offset expressed in seconds. + */ + @FunctionalInterface + public interface TimeTZ extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The first argument represents microseconds since midnight, + * nonnegative and not exceeding {@code USECS_PER_DAY}, and + * the second is a time zone offset expressed in seconds, positive + * for locations west of the prime meridian. + *

+ * It should be noted that other common conventions, such as ISO 8601 + * and {@code java.time.ZoneOffset}, use positive offsets for locations + * east of the prime meridian, requiring a sign flip. + *

+ * Also noteworthy, as with {@link Time Time}, is that the first + * argument may exactly equal {@code USECS_PER_DAY}; 24:00:00 + * is a valid value to PostgreSQL. That may need extra attention if + * the representation to be constructed doesn't allow that. + * @param microsecondsSinceMidnight the time of day, in the zone + * indicated by the second argument + * @param secondsWestOfPrimeMeridian note that the sign of this time + * zone offset will be the opposite of that used in other common systems + * using positive values for offsets east of the prime meridian. + */ + T construct( + long microsecondsSinceMidnight, int secondsWestOfPrimeMeridian); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code TimeTZ} function possibly tailored ("curried") + * with the values from a PostgreSQL type modifier on the type. + *

+ * The precision indicates the number of seconds digits desired + * to the right of the decimal point, and must be positive and + * no greater than {@code MAX_TIME_PRECISION}. + */ + TimeTZ modify(OptionalInt precision); + } + + /** + * A reference implementation that maps to + * {@link OffsetTime OffsetTime}. + *

+ * While PostgreSQL allows 24:00:00 as a valid time, Java's rules + * max out at the preceding nanosecond. That is still a value that + * can be distinguished, because PostgreSQL's time resolution is only + * to microseconds, so the PostgreSQL 24:00:00 value will be mapped + * to a value whose {@code LocalTime} component matches (with + * {@code equals}) {@link Time.AsLocalTime#HOUR24 AsLocalTime.HOUR24}, + * which is really one nanosecond shy of 24 hours. + *

+ * In the other direction, nanoseconds will be rounded to microseconds, + * so any value within the half-microsecond preceding {@code HOUR24} + * will become the PostgreSQL 24:00:00 value. + */ + static class AsOffsetTime implements TimeTZ + { + private AsOffsetTime() // I am a singleton + { + } + + public static final AsOffsetTime INSTANCE = new AsOffsetTime(); + + @Override + public OffsetTime construct( + long microsecondsSinceMidnight, int secondsWestOfPrimeMeridian) + { + ZoneOffset offset = + ZoneOffset.ofTotalSeconds( - secondsWestOfPrimeMeridian); + + LocalTime local = Time.AsLocalTime.INSTANCE + .construct(microsecondsSinceMidnight); + + return OffsetTime.of(local, offset); + } + + public T store(OffsetTime t, TimeTZ f) + { + int secondsWest = - t.getOffset().getTotalSeconds(); + + LocalTime local = t.toLocalTime(); + + return Time.AsLocalTime.INSTANCE + .store(local, micros -> f.construct(micros, secondsWest)); + } + } + } + + /** + * The {@code TIMESTAMP} type's PostgreSQL semantics: microseconds since + * midnight of the PostgreSQL epoch, without an assumed time zone. + */ + @FunctionalInterface + public interface Timestamp extends Contract.Scalar + { + /** + * The PostgreSQL "epoch" as a {@code java.time.LocalDateTime}. + */ + LocalDateTime POSTGRES_EPOCH = Date.POSTGRES_EPOCH.atStartOfDay(); + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The argument represents microseconds since midnight on + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}. + *

+ * Because no particular time zone is understood to apply, the exact + * corresponding point on a standard timeline cannot be identified, + * absent outside information. It is typically used to represent + * a timestamp in the local zone, whatever that is. + *

+ * The argument represents microseconds since + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}, unless it is one of + * the special values {@link #DT_NOBEGIN DT_NOBEGIN} or + * {@link #DT_NOEND DT_NOEND}. + *

+ * When constructing a representation that lacks notions of positive or + * negative "infinity", one option is to simply map the above special + * values no differently than ordinary ones, and remember the two + * resulting representations as the "infinite" ones. If that is done + * without wraparound, the resulting "-infinity" value will precede all + * other PostgreSQL-representable dates and the resulting "+infinity" + * will follow them. + *

+ * The older {@code java.util.Date} cannot represent those values + * without wraparound; the two resulting values can still be saved as + * representing -infinity and +infinity, but will not have the expected + * ordering with respect to other values. They will both be quite far + * from the present. + */ + T construct(long microsecondsSincePostgresEpoch); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code Timestamp} function possibly tailored + * ("curried") with the values from a PostgreSQL type modifier + * on the type. + *

+ * The precision indicates the number of seconds digits desired + * to the right of the decimal point, and must be positive and + * no greater than {@code MAX_TIMESTAMP_PRECISION}. + */ + Timestamp modify(OptionalInt precision); + } + + /** + * A reference implementation that maps to + * {@link LocalDateTime LocalDateTime}. + *

+ * The PostgreSQL "-infinity" and "+infinity" values are mapped to + * {@code LocalDateTime} instances matching (by {@code equals}) + * the special instances {@code NOBEGIN} and {@code NOEND} here, + * respectively. + */ + static class AsLocalDateTime implements Timestamp + { + private AsLocalDateTime() // I am a singleton + { + } + + public static final AsLocalDateTime INSTANCE = + new AsLocalDateTime(); + + /** + * {@code LocalDateTime} representing PostgreSQL's "infinitely + * early" timestamp. + */ + public static final LocalDateTime NOBEGIN = + INSTANCE.construct(DT_NOBEGIN); + + /** + * {@code LocalDateTime} representing PostgreSQL's "infinitely + * late" timestamp. + */ + public static final LocalDateTime NOEND = + INSTANCE.construct(DT_NOEND); + + @Override + public LocalDateTime construct(long microsecondsSincePostgresEpoch) + { + return + POSTGRES_EPOCH.plus(microsecondsSincePostgresEpoch, MICROS); + } + + public T store(LocalDateTime d, Timestamp f) + throws SQLException + { + try + { + return f.construct(POSTGRES_EPOCH.until(d, MICROS)); + } + catch ( ArithmeticException e ) + { + throw new SQLDataException(String.format( + "timestamp out of range: \"%s\"", d), "22008", e); + } + } + } + } + + /** + * The {@code TIMESTAMPTZ} type's PostgreSQL semantics: microseconds since + * midnight UTC of the PostgreSQL epoch. + */ + @FunctionalInterface + public interface TimestampTZ extends Contract.Scalar + { + /** + * The PostgreSQL "epoch" as a {@code java.time.OffsetDateTime}. + */ + OffsetDateTime POSTGRES_EPOCH = + OffsetDateTime.of(Timestamp.POSTGRES_EPOCH, UTC); + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The argument represents microseconds since midnight UTC on + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}. + *

+ * Given any desired local time zone, conversion to/from this value + * is possible if the rules for that time zone as of the represented + * date are available. + *

+ * The argument represents microseconds since + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}, unless it is one of + * the special values {@link #DT_NOBEGIN DT_NOBEGIN} or + * {@link #DT_NOEND DT_NOEND}. + *

+ * When constructing a representation that lacks notions of positive or + * negative "infinity", one option is to simply map the above special + * values no differently than ordinary ones, and remember the two + * resulting representations as the "infinite" ones. If that is done + * without wraparound, the resulting "-infinity" value will precede all + * other PostgreSQL-representable dates and the resulting "+infinity" + * will follow them. + *

+ * The older {@code java.util.Date} cannot represent those values + * without wraparound; the two resulting values can still be saved as + * representing -infinity and +infinity, but will not have the expected + * ordering with respect to other values. They will both be quite far + * from the present. + */ + T construct(long microsecondsSincePostgresEpochUTC); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code TimestampTZ} function possibly tailored + * ("curried") with the values from a PostgreSQL type modifier + * on the type. + *

+ * The precision indicates the number of seconds digits desired + * to the right of the decimal point, and must be positive and + * no greater than {@code MAX_TIMESTAMP_PRECISION}. + */ + TimestampTZ modify(OptionalInt precision); + } + + /** + * A reference implementation that maps to + * {@link OffsetDateTime OffsetDateTime}. + *

+ * A value from PostgreSQL is always understood to be at UTC, and + * will be mapped always to an {@code OffsetDateTime} with UTC as + * its offset. + *

+ * A value from Java is adjusted by its offset so that PostgreSQL will + * always be passed {@code microsecondsSincePostgresEpochUTC}. + *

+ * The PostgreSQL "-infinity" and "+infinity" values are mapped to + * instances whose corresponding {@code LocalDateTime} at UTC will match + * (by {@code equals}) the constants {@code NOBEGIN} and {@code NOEND} + * of {@code AsLocalDateTime}, respectively. + */ + static class AsOffsetDateTime implements TimestampTZ + { + private AsOffsetDateTime() // I am a singleton + { + } + + public static final AsOffsetDateTime INSTANCE = + new AsOffsetDateTime(); + + @Override + public OffsetDateTime construct(long microsecondsSincePostgresEpoch) + { + return + POSTGRES_EPOCH.plus(microsecondsSincePostgresEpoch, MICROS); + } + + public T store(OffsetDateTime d, TimestampTZ f) + throws SQLException + { + try + { + return f.construct(POSTGRES_EPOCH.until(d, MICROS)); + } + catch ( ArithmeticException e ) + { + throw new SQLDataException(String.format( + "timestamp out of range: \"%s\"", d), "22008", e); + } + } + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Geometric.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Geometric.java new file mode 100644 index 000000000..a07052d6b --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Geometric.java @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import org.postgresql.pljava.Adapter.Contract; +import org.postgresql.pljava.Adapter.Dispenser; +import org.postgresql.pljava.Adapter.PullDispenser; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code GEOMETRIC} type category. + */ +public interface Geometric +{ + /** + * The {@code POINT} type's PostgreSQL semantics: a pair of {@code float8} + * coordinates. + */ + @FunctionalInterface + public interface Point extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + */ + T construct(double x, double y); + } + + /** + * The {@code LSEG} type's PostgreSQL semantics: two endpoints. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the {@code Dispenser} uses within the implementing body. + */ + @FunctionalInterface + public interface LSeg extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param endpoints a dispenser that will dispense a {@code Point} for + * index 0 and index 1. + */ + T construct(PullDispenser> endpoints); + } + + /** + * The {@code PATH} type's PostgreSQL semantics: vertex points and whether + * closed. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the {@code Dispenser} uses within the implementing body. + */ + @FunctionalInterface + public interface Path extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param nPoints the number of points on the path + * @param closed whether the path should be understood to include + * a segment joining the last point to the first one. + * @param points a dispenser that will dispense a {@code Point} for + * each index 0 through nPoint - 1. + */ + T construct( + int nPoints, boolean closed, PullDispenser> points); + } + + /** + * The {@code LINE} type's PostgreSQL semantics: coefficients of its + * general equation Ax+By+C=0. + */ + @FunctionalInterface + public interface Line extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + */ + T construct(double A, double B, double C); + } + + /** + * The {@code BOX} type's PostgreSQL semantics: two corner points. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the {@code Dispenser} uses within the implementing body. + */ + @FunctionalInterface + public interface Box extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * As stored, the corner point at index 0 is never below or to the left + * of that at index 1. This may be achieved by permuting the points + * or their coordinates obtained as input, in any way that preserves + * the box. + * @param corners a dispenser that will dispense a {@code Point} for + * index 0 and at index 1. + */ + T construct(PullDispenser> corners); + } + + /** + * The {@code POLYGON} type's PostgreSQL semantics: vertex points and + * a bounding box. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the boundingBox dispenser used within + * the implementing body. + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the vertices dispenser used within + * the implementing body. + */ + @FunctionalInterface + public interface Polygon extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param nVertices the number of vertices in the polygon + * @param boundingBox a dispenser from which the bounding box may be + * obtained. + * @param vertices a dispenser from which a vertex {@code Point} may be + * obtained for each index 0 through nVertices - 1. + */ + T construct( + int nVertices, Dispenser> boundingBox, + PullDispenser> vertices); + } + + /** + * The {@code CIRCLE} type's PostgreSQL semantics: center point and radius. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the {@code Dispenser} uses within the implementing body. + */ + @FunctionalInterface + public interface Circle extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + */ + T construct(Dispenser> center, double radius); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Internal.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Internal.java new file mode 100644 index 000000000..49e344471 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Internal.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * Container for abstract-type functional interfaces, not quite exactly + * corresponding to PostgreSQL's {@code INTERNAL} category; there are some + * fairly "internal" types that ended up in the {@code USER} category too, + * for whatever reason. + */ +public interface Internal +{ + /** + * The {@code tid} type's PostgreSQL semantics: a block ID and + * a row index within that block. + */ + @FunctionalInterface + public interface Tid extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param blockId (treat as unsigned) identifies the block in a table + * containing the target row + * @param offsetNumber (treat as unsigned) the index of the target row + * within the identified block + */ + T construct(int blockId, short offsetNumber); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Money.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Money.java new file mode 100644 index 000000000..d4f35ef19 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Money.java @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.math.BigDecimal; // javadoc + +import java.text.NumberFormat; // javadoc + +import java.util.Currency; // javadoc +import java.util.Locale; // javadoc + +import org.postgresql.pljava.Adapter.Contract; + +/** + * The {@code MONEY} type's PostgreSQL semantics: an integer value, whose + * scaling, display format, and currency are all determined by a + * user-settable configuration setting. + *

+ * This type is a strange duck in PostgreSQL. It is stored + * as a (64 bit) integer, and must have a scaling applied on input and + * output to the appropriate number of decimal places. + *

+ * The appropriate scaling, the symbols for decimal point and grouping + * separators, how the sign is shown, and even what currency it + * represents and the currency symbol to use, are all determined + * from the locale specified by the {@code lc_monetary} configuration + * setting, which can be changed within any session with no special + * privilege at any time. That may make {@code MONEY} the only data type + * in PostgreSQL where a person can use a single {@code SET} command to + * instantly change what an entire table of data means. + *

+ * For example, this little catalog of products: + *

+ * => SELECT * FROM products;
+ *  product |       price
+ * ---------+--------------------
+ *  widget  |             $19.00
+ *  tokamak | $19,000,000,000.00
+ *
+ *

+ * can be instantly marked down by about 12 percent (at the exchange + * rates looked up at this writing): + *

+ * => SET lc_monetary TO 'ja_JP';
+ * SET
+ * => SELECT * FROM products;
+ *  product |        price
+ * ---------+---------------------
+ *  widget  |             ï¿¥1,900
+ *  tokamak | ï¿¥1,900,000,000,000
+ *
+ *

+ * or marked up by roughly the same amount: + *

+ * => SET lc_monetary TO 'de_DE@euro';
+ * SET
+ * => SELECT * FROM products;
+ *  product |        price
+ * ---------+---------------------
+ *  widget  |             19,00 €
+ *  tokamak | 19.000.000.000,00 €
+ *
+ *

+ * or marked up even further (as of this writing, 26%): + *

+ * => SET lc_monetary TO 'en_GB';
+ * SET
+ * => SELECT * FROM products;
+ *  product |       price
+ * ---------+--------------------
+ *  widget  |             £19.00
+ *  tokamak | £19,000,000,000.00
+ *
+ *

+ * Obtaining the locale information in Java + *

+ * Before the integer value provided here can be correctly scaled or + * interpreted, the locale-dependent information must be obtained. + * In Java, that can be done in six steps: + *

    + *
  1. Obtain the string value of PostgreSQL's {@code lc_monetary} + * configuration setting. + *
  2. Let's not talk about step 2 just yet. + *
  3. Obtain a {@code Locale} object by passing the BCP 47 tag to + * {@link Locale#forLanguageTag Locale.forLanguageTag}. + *
  4. Pass the {@code Locale} object to + * {@link NumberFormat#getCurrencyInstance(Locale) + NumberFormat.getCurrencyInstance}. + *
  5. From that, obtain an actual instance of {@code Currency} with + * {@link NumberFormat#getCurrency NumberFormat.getCurrency}. + *
  6. Obtain the correct power of ten for scaling from + * {@link Currency#getDefaultFractionDigits + Currency.getDefaultFractionDigits}. + *
+ *

+ * The {@code NumberFormat} obtained in step 4 knows all the appropriate + * formatting details, but will not automatically scale the integer + * value here by the proper power of ten. That must be done explicitly, + * and to avoid compromising the precision objectives of the + * {@code MONEY} type, should be done with something like a + * {@link BigDecimal BigDecimal}. If fmt was obtained + * in step 4 above and scale is the value from step 6: + *

+ * BigDecimal bd =
+ *     BigDecimal.valueOf(scaledToInteger).movePointLeft(scale);
+ * String s = fmt.format(bd);
+ *
+ *

+ * would produce the correctly-formatted value, where + * scaledToInteger is the parameter supplied to this interface + * method. + *

+ * If the format is not needed, the scale can be obtained in fewer steps + * by passing the {@code Locale} from step 3 directly to + * {@link Currency#getInstance(Locale) Currency.getInstance}. + * That would be enough to build a simple reference implementation for + * this data type that would return a {@code BigDecimal} with its point + * moved left by the scale. + *

+ * Now let's talk about step 2. + *

+ * Java's locale support is based on BCP 47, a format for identifiers + * standardized by + * IETF to ensure that they are reliable and specific. + *

+ * The string obtained from the {@code lc_monetary} setting in step 1 + * above is, most often, a string that makes sense to the underlying + * operating system's C library, using some syntax that predated BCP 47, + * and likely demonstrates all of the problems BCP 47 was created to + * overcome. + *

+ * From a first glance at a few simple examples, it can appear that + * replacing some underscores with hyphens could turn some simple + * OS-library strings into BCP 47 tags, but that is far from the general + * case, which is full of nonobvious rules, special cases, and + * grandfather clauses. + *

+ * A C library, {@code liblangtag}, is available to perform exactly that + * mapping, and weighs in at about two and a half megabytes. The library + * might be present on the system where PostgreSQL is running, in which + * case it could be used in step 2, at the cost of a native call. + *

+ * If PostgreSQL was built with ICU, a native method could accomplish + * the same (as nearly as practical) thing by calling + * {@code uloc_canonicalize} followed by {@code uloc_toLanguageTag}; or, + * if the ICU4J Java library is available, + * {@code ULocale.createCanonical}could be used to the same effect. + *

+ * It might be simplest to just use a native call to obtain the + * scaling and other needed details from the underlying operating system + * library. + *

+ * Because of step 2's complexity, PL/Java does not here supply the + * simple reference implementation to {@code BigDecimal} proposed above. + */ +@FunctionalInterface +public interface Money extends Contract.Scalar +{ + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * It might be necessary to extend this interface with extra parameters + * (or to use the {@code Modifier} mechanism) to receive the needed + * scaling and currency details, and require the corresponding + * {@code Adapter} (which could no longer be pure Java) to make + * the needed native calls to obtain those. + * @param scaledToInteger integer value that must be scaled according + * to the setting of the lc_monetary configuration setting, + * and represents a value in the currency also determined by that + * setting. + */ + T construct(long scaledToInteger); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Network.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Network.java new file mode 100644 index 000000000..9bfb2e542 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Network.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.net.StandardProtocolFamily; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code NETWORK} type category (and MAC addresses, which, for arcane reasons, + * are not in that category). + */ +public interface Network +{ + /** + * The {@code INET} and {@code CIDR} types' PostgreSQL semantics: the + * family ({@code INET} or {@code INET6}), the number of network prefix + * bits, and the address bytes in network byte order. + */ + @FunctionalInterface + public interface Inet extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param addressFamily INET or INET6 + * @param networkPrefixBits nonnegative, not greater than 32 for INET + * or 128 for INET6 (either maximum value indicates the address is for + * a single host rather than a network) + * @param networkOrderAddress the address bytes in network order. When + * the type is CIDR, only the leftmost networkPrefixBits bits are + * allowed to be nonzero. The array does not alias any internal storage + * and may be used as desired. + */ + T construct( + StandardProtocolFamily addressFamily, int networkPrefixBits, + byte[] networkOrderAddress); + } + + /** + * The {@code macaddr} and {@code macaddr8} types' PostgreSQL semantics: + * a byte array (6 or 8 bytes, respectively)., of which byte 0 is the one + * appearing first in the text representation (and stored in the member + * named a of the C struct). + */ + @FunctionalInterface + public interface MAC extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param address array of 6 (macaddr) or 8 (macaddr8) bytes, of which + * byte 0 is the one appearing first in the text representation (and + * stored in the member named a of the C struct). The array + * does not alias any internal storage and may be used as desired. + */ + T construct(byte[] address); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Numeric.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Numeric.java new file mode 100644 index 000000000..4abaa6eaf --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Numeric.java @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import static java.lang.Math.multiplyExact; + +import java.math.BigDecimal; + +import java.sql.SQLException; +import java.sql.SQLDataException; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * The {@code NUMERIC} type's PostgreSQL semantics: a sign (or indication + * that the value is NaN, + infinity, or - infinity), a display scale, + * a weight, and zero or more base-ten-thousand digits. + *

+ * This data type can have a type modifier that specifies a maximum + * precision (total number of base-ten digits to retain) and a maximum scale + * (how many of those base-ten digits are right of the decimal point). + *

+ * A curious feature of the type is that, when a type modifier is specified, + * the value becomes "anchored" to the decimal point: all of its decimal + * digits must be within precision places of the decimal point, + * or an error is reported. This rules out the kind of values that can crop + * up in physics, for example, where there might be ten digits of precision + * but those are twenty places away from the decimal point. This limitation + * apparently follows from the ISO SQL definitions of the precision and + * scale. + *

+ * However, when PostgreSQL {@code NUMERIC} is used with no type modifier, + * such values are not rejected, and are stored efficiently, just as you + * would expect, keeping only the digits that are needed and adjusting + * weight for the distance to the decimal point. + *

+ * In mapping to and from a Java representation, extra care may be needed + * if that capability is to be preserved. + */ +@FunctionalInterface +public interface Numeric extends Contract.Scalar +{ + /** + * The maximum precision that may be specified in a {@code numeric} type + * modifier. + *

+ * Without a modifier, the type is subject only to its implementation + * limits, which are much larger. + */ + int NUMERIC_MAX_PRECISION = 1000; + + /** + * The minimum 'scale' that may be specified in a {@code numeric} type + * modifier in PostgreSQL 15 or later. + *

+ * Negative scale indicates rounding left of the decimal point. A scale of + * -1000 indicates rounding to a multiple of 101000. + *

+ * Prior to PostgreSQL 15, a type modifier did not allow a negative + * scale. + *

+ * Without a modifier, the type is subject only to its implementation + * limits. + */ + int NUMERIC_MIN_SCALE = -1000; + + /** + * The maximum 'scale' that may be specified in a {@code numeric} type + * modifier in PostgreSQL 15 or later. + *

+ * When scale is positive, the digits string represents a value smaller by + * the indicated power of ten. When scale exceeds precision, the digits + * string represents digits that appear following (scale - precision) zeros + * to the right of the decimal point. + *

+ * Prior to PostgreSQL 15, a type modifier did not allow a scale greater + * than the specified precision. + *

+ * Without a modifier, the type is subject only to its implementation + * limits. + */ + int NUMERIC_MAX_SCALE = 1000; + + /** + * The base of the 'digit' elements supplied by PostgreSQL. + *

+ * This is also built into the parameter name base10000Digits and + * is highly unlikely to change; a comment in the PostgreSQL code since 2015 + * confirms "values of {@code NBASE} other than 10000 are considered of + * historical interest only and are no longer supported in any sense". + */ + int NBASE = 10000; + + /** + * Decimal digits per {@code NBASE} digit. + */ + int DEC_DIGITS = 4; + + /** + * Label to distinguish positive, negative, and three kinds of special + * values. + */ + enum Kind { POSITIVE, NEGATIVE, NAN, POSINFINITY, NEGINFINITY } + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * A note about displayScale: when positive, it is information, + * stored with the PostgreSQL value, that conveys how far (right of the + * units place) the least significant decimal digit of the intended value + * falls. + *

+ * An apparentScale can also be computed: + *

+	 *  apparentScale = (1 + weight - base10000Digits.length) * (- DEC_DIGITS)
+	 *
+ * This computation has a simple meaning, and gives the distance, right of + * the units place, of the least-significant decimal digit in the stored + * representation. When negative, of course, it means that least stored + * digit falls left of the units place. + *

+ * Because of the {@code DEC_DIGITS} factor, apparentScale + * computed this way will always be a multiple of four, the next such (in + * the direction of more significant digits) from the position of the + * actual least significant digit in the value. So apparentScale + * may exceed displayScale by as much as three, and, if so, + * displayScale should be used in preference, to avoid + * overstating the value's significant figures. + *

+ * Likewise, if displayScale is positive, it should be used even + * if it exceeds apparentScale. In that case, it conveys that + * PostgreSQL knows additional digits are significant, even though they were + * zero and it did not store them. + *

+ * However, the situation when displayScale is zero is less + * clear-cut, because PostgreSQL simply disallows it ever to be negative. + * This clamping of displayScale loses information, such that a + * value with displayScale zero and apparentScale + * negative may represent any of: + *

    + *
  • A limited-precision value with non-significant trailing zeros (from + * -apparentScale to as many as -apparentScale+3 of + * them)
  • + *
  • A precise integer, all of whose -apparentScale non-stored + * significant digits just happened to be zeros
  • + *
  • or anything in between.
  • + *
+ *

+ * That these cases can't be distinguished is inherent in PostgreSQL's + * representation of the type, and any implementation of this interface will + * need to make and document a choice of how to proceed. If the choice is + * to rely on apparentScale, then the fact that it is a multiple + * of four and may overstate, by up to three, the number of significant + * digits (as known, perhaps, to a human who assigned the value) has to be + * lived with; when displayScale is clamped to zero there simply + * isn't enough information to do better. + *

+ * For example, consider this adapter applied to the result of: + *

+	 * SELECT 6.62607015e-34 AS planck, 6.02214076e23 AS avogadro;
+	 *
+ *

+ * Planck's constant (a small number defined with nine significant places) + * will be presented with displayScale=42, weight=-9, + * and base10000Digits=[662, 6070, 1500]. + * Because apparentScale works out to 44 (placing the least + * stored digit 44 places right of the decimal point, a multiple of 4) but + * displayScale is only 42, it is clear that the two trailing + * zeroes in the last element are non-significant, and the value has not + * eleven but only nine significant figures. + *

+ * In contrast, Avogadro's number (a large one, defined also with nine + * significant places) will arrive with weight=5 and + * base10000Digits=[6022, 1407, 6000], but + * displayScale will not be -15; it is clamped to zero instead. + * If an implementation of this contract chooses to compute + * apparentScale, that will be -12 (the next larger multiple of + * four) and the value will seem to have gained three extra significant + * figures. On the other hand, in an implementation that takes the + * clamped-to-zero displayScale at face value, the number will + * seem to have gained fifteen extra significant figures. + * @param kind POSITIVE, NEGATIVE, POSINFINITY, NEGINFINITY, or NAN + * @param displayScale nominal precision, nonnegative; the number of + * base ten digits right of the decimal point. If this exceeds + * the number of right-of-decimal digits determined by the stored value, + * the excess represents a number of trailing decimal zeroes that are + * significant but trimmed from storage. + * @param weight indicates the power of ten thousand which the first + * base ten-thousand digit is taken is taken to represent. If the array + * base10000Digits has length one, and that one digit has the + * value 3, and weight is zero, the value is 3. If + * weight is 1, the value is 30000, and if weight + * is -1, the value is 0.0003. + * @param base10000Digits each array element is a nonnegative value not + * above 9999, representing a single digit of a base-ten-thousand + * number. The element at index zero is the most significant. The caller + * may pass a zero-length array, but may not pass null. The array is + * unshared and may be used as desired. + */ + T construct(Kind kind, int displayScale, int weight, + short[] base10000Digits) throws SQLException; + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code Numeric} function possibly tailored + * ("curried") with the values from a PostgreSQL type modifier + * on the type. + *

+ * If specified, precision must be at least one and + * not greater than {@code NUMERIC_MAX_PRECISION}, and scale + * must be not less than {@code NUMERIC_MIN_SCALE} nor more than + * {@code NUMERIC_MAX_SCALE}. + * @param specified true if a type modifier was specified, false if + * omitted + * @param precision the maximum number of base-ten digits to be + * retained, counting those on both sides of the decimal point + * @param scale maximum number of base-ten digits to be retained + * to the right of the decimal point. + */ + Numeric modify(boolean specified, int precision, int scale); + } + + /** + * A reference implementation that maps to {@link BigDecimal BigDecimal} + * (but cannot represent all possible values). + *

+ * A Java {@code BigDecimal} cannot represent the not-a-number or positive + * or negative infinity values possible for a PostgreSQL {@code NUMERIC}. + */ + static class AsBigDecimal implements Numeric + { + private AsBigDecimal() // I am a singleton + { + } + + public static final AsBigDecimal INSTANCE = new AsBigDecimal(); + + /** + * Produces a {@link BigDecimal} representation of the {@code NUMERIC} + * value, or throws an exception if the value is not-a-number or + * positive or negative infinity. + *

+ * In resolving the ambiguity when displayScale is zero, + * this implementation constructs a {@code BigDecimal} with significant + * figures inferred from the base10000Digits array's length, + * where decimal digits are grouped in fours, and therefore the + * {@code BigDecimal}'s {@link BigDecimal#scale() scale} method will + * always return a multiple of four in such cases. Therefore, from the + * query + *

+		 * SELECT 6.62607015e-34 AS planck, 6.02214076e23 AS avogadro;
+		 *
+ * this conversion will produce the {@code BigDecimal} 6.62607015E-34 + * for planck ({@code scale} will return 42, as expected), + * but will produce 6.02214076000E+23 for avogadro, showing + * three unexpected trailing zeros; {@code scale()} will not return -15 + * as expected, but the next larger multiple of four, -12. + * @throws SQLException 22000 if the value is NaN or +/- infinity. + */ + @Override + public BigDecimal construct( + Kind kind, int displayScale, int weight, short[] base10000Digits) + throws SQLException + { + switch ( kind ) + { + case NAN: + case POSINFINITY: + case NEGINFINITY: + throw new SQLDataException( + "cannot represent PostgreSQL numeric " + kind + + " as Java BigDecimal", "22000"); + default: + } + + int scale = multiplyExact(weight, - DEC_DIGITS); + + if ( 0 == base10000Digits.length ) + return BigDecimal.valueOf(0L, scale); + + // check that the final value also won't wrap around + multiplyExact(1 + weight - base10000Digits.length, - DEC_DIGITS); + + BigDecimal bd = BigDecimal.valueOf(base10000Digits[0], scale); + + for ( int i = 1 ; i < base10000Digits.length ; ++ i ) + { + scale += DEC_DIGITS; + bd = bd.add(BigDecimal.valueOf(base10000Digits[i], scale)); + } + + /* + * The final value of scale from the loop above is + * (1 + weight - base10000Digits.length) * (- DEC_DIGITS), so + * will always be a multiple of DEC_DIGITS (i.e. 4). It's also + * the scale of the BigDecimal constructed so far, and represents + * the position, right of the decimal point, of the least stored + * digit. Because of that DEC-DIGITS granularity, though, it may + * reflect up to three trailing zeros from the last element of + * base10000Digits that are not really significant. When scale and + * displayScale are positive (the value extends right of the decimal + * point), we can use displayScale to correct the scale of the + * BigDecimal. (This 'correction' applies even when displayScale + * is greater than scale; that means PostgreSQL knows even more + * trailing zeros are significant, and simply avoided storing them.) + * + * When scale ends up negative, though (the least stored digit falls + * somewhere left of the units place), and displayScale is zero, + * we get no such help, because PostgreSQL simply clamps that value + * to zero. We are on our own to decide whether we are looking at + * + * a) a value of limited precision, with (- scale) non-significant + * trailing zeros (and possibly up to three more) + * b) a precise integer value, all of whose (- scale) trailing + * digits happen to be zero (figure the odds...) + * c) anything in between. + * + * The Java BigDecimal will believe whatever we tell it and use the + * corresponding amount of memory, so on efficiency as well as + * plausibility grounds, we'll tell it (a). The scale will still be + * that multiple of four, though, so we may still have bestowed + * significance upon up to three trailing zeros, compared to what a + * human who assigned the value might think. That cannot affect + * roundtripping of the value back to PostgreSQL, because indeed the + * corresponding PostgreSQL forms are identical, so PostgreSQL can't + * notice any difference; that's how we got into this mess. + */ + if ( displayScale > 0 || scale > displayScale ) + { + assert displayScale >= 1 + scale - DEC_DIGITS; + bd = bd.setScale(displayScale); + } + + return Kind.POSITIVE == kind ? bd : bd.negate(); + } + + public T store(BigDecimal bd, Numeric f) + throws SQLException + { + throw new UnsupportedOperationException( + "no BigDecimal->NUMERIC store for now"); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Timespan.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Timespan.java new file mode 100644 index 000000000..862f8319a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Timespan.java @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.util.EnumSet; +import static java.util.Collections.unmodifiableSet; +import static java.util.EnumSet.of; +import static java.util.EnumSet.noneOf; +import static java.util.EnumSet.range; +import java.util.OptionalInt; +import java.util.Set; + +import org.postgresql.pljava.Adapter.Contract; + +/* + * For the javadoc: + */ +import java.time.Duration; +import java.time.Period; +import java.time.chrono.ChronoPeriod; +import java.time.temporal.ChronoUnit; +import java.time.temporal.TemporalAmount; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code TIMESPAN} type category (which, at present, includes the single + * type {@code INTERVAL}). + */ +public interface Timespan +{ + /** + * The {@code INTERVAL} type's PostgreSQL semantics: separate microseconds, + * days, and months components, independently signed. + *

+ * A type modifier can specify field-presence bits, and precision (number of + * seconds digits to the right of the decimal point). An empty fields set + * indicates that fields were not specified. + *

Infinitely negative or positive intervals

+ *

+ * Starting with PostgreSQL 17, intervals whose three components are + * {@code (Long.MIN_VALUE, Integer.MIN_VALUE, Integer.MIN_VALUE)} or + * {@code (Long.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE)} have the + * semantics of infinitely negative or positive intervals, respectively. + * In PostgreSQL versions before 17, they are simply the most negative or + * positive representable finite intervals. + *

Why no reference implementation?

+ *

+ * The types in the {@link Datetime Datetime} interface come with reference + * implementations returning Java's JSR310 {@code java.time} types. + *

+ * For PostgreSQL {@code INTERVAL}, there are two candidate JSR310 concrete + * types, {@link Period Period} and {@link Duration Duration}, each of which + * would be appropriate for a different subset of PostgreSQL + * {@code INTERVAL} values. + *

+ * {@code Period} is appropriate for the months and days components. + * A {@code Period} treats the length of a day as subject to daylight + * adjustments following time zone rules, as does PostgreSQL. + *

+ * {@code Duration} is suitable for the sub-day components. It also allows + * access to a "day" field, but treats that field, unlike PostgreSQL, as + * having invariant 24-hour width. + *

+ * Both share the superinterface {@link TemporalAmount TemporalAmount}. That + * interface itself is described as "a framework-level interface that should + * not be widely used in application code", recommending instead that new + * concrete types can be created that implement it. + *

+ * PostgreSQL's {@code INTERVAL} could be represented by a concrete type + * that implements {@code TemporalAmount} or, preferably (because its days + * and months components are subject to rules of a chronology), its + * subinterface {@link ChronoPeriod ChronoPeriod}. The most natural such + * implementation would have {@link TemporalAmount#getUnits getUnits} return + * {@link ChronoUnit#MONTHS MONTHS}, {@link ChronoUnit#DAYS DAYS}, and + * {@link ChronoUnit#MICROS MICROS}, except for instances representing the + * infinitely negative or positive intervals and using the unit + * {@link ChronoUnit#FOREVER FOREVER} with a negative or positive value. + *

+ * In the datatype library that comes with the PGJDBC-NG driver, there is + * a class {@code com.impossibl.postgres.api.data.Interval} that does + * implement {@code TemporalAmount} (but not {@code ChronoPeriod}) and + * internally segregates the PostgreSQL {@code INTERVAL} components into + * a {@code Period} and a {@code Duration}. An application with that library + * available could use an implementation of this functional interface that + * would return instances of that class. As of PGJDBC-NG 0.8.9, the class + * does not seem to have a representation for the PostgreSQL 17 infinite + * intervals. Its {@code getUnits} method returns a longer list of units + * than needed to naturally represent the PostgreSQL type. + *

+ * The PGJDBC driver includes the {@code org.postgresql.util.PGInterval} + * class for the same purpose; that one does not derive from any JSR310 + * type. As of PGJDBC 42.7.5, it does not explicitly represent infinite + * intervals, and also has an internal state split into more units than the + * natural representation would require. + *

Related notes from the ISO SQL/XML specification

+ *

+ * SQL/XML specifies how to map SQL {@code INTERVAL} types and values to + * the XML Schema types {@code xs:yearMonthDuration} and + * {@code xs:dayTimeDuration}, which were added in XML Schema 1.1 as + * distinct subtypes of the broader {@code xs:duration} type from XML Schema + * 1.0. That Schema 1.0 supertype has a corresponding class in the standard + * Java library, + * {@link javax.xml.datatype.Duration javax.xml.datatype.Duration}, so + * an implementation of this functional interface to return that type would + * also be easy. It would not, however, represent PostgreSQL 17 infinitely + * negative or positive intervals. + *

+ * These XML Schema types do not perfectly align with the PostgreSQL + * {@code INTERVAL} type, because they group the day with the sub-day + * components and treat it as having invariant width. (The only time zone + * designations supported in XML Schema are fixed offsets, for which no + * daylight rules apply). The XML Schema types allow one overall sign, + * positive or negative, but do not allow the individual components to have + * signs that differ, as PostgreSQL does. + *

+ * Java's JSR310 types can be used with equal convenience in the PostgreSQL + * way (by assigning days to the {@code Period} and the smaller + * components to the {@code Duration}) or in the XML Schema way (by storing + * days in the {@code Duration} along with the smaller + * components), but of course those choices have different implications. + *

+ * A related consideration is, in a scheme like SQL/XML's where the SQL + * {@code INTERVAL} can be mapped to a choice of types, whether that choice + * is made statically (i.e. by looking at the declared type modifier such as + * {@code YEAR TO MONTH} or {@code HOUR TO SECOND} for a column) or + * per-value (by looking at which fields are nonzero in each value + * encountered). + *

+ * The SQL/XML rule is to choose a static mapping at analysis time according + * to the type modifier. {@code YEAR}, {@code MONTH}, or + * {@code YEAR TO MONTH} call for a mapping to {@code xs:yearMonthDuration}, + * while any of the finer modifiers call for mapping to + * {@code xs:dayTimeDuration}, and no mapping is defined for an + * {@code INTERVAL} lacking a type modifier to constrain its fields in one + * of those ways. Again, those specified mappings assume that days are not + * subject to daylight rules, contrary to the behavior of the PostgreSQL + * type. + *

+ * In view of those considerations, there seems to be no single mapping of + * PostgreSQL {@code INTERVAL} to a common Java type that is sufficiently + * free of caveats to stand as a reference implementation. An application + * ought to choose an implementation of this functional interface to create + * whatever representation of an {@code INTERVAL} will suit that + * application's purposes. + */ + @FunctionalInterface + public interface Interval extends Contract.Scalar + { + enum Field + { + YEAR, MONTH, DAY, HOUR, MINUTE, SECOND + } + + Set YEAR = unmodifiableSet(of(Field.YEAR)); + Set MONTH = unmodifiableSet(of(Field.MONTH)); + Set DAY = unmodifiableSet(of(Field.DAY)); + Set HOUR = unmodifiableSet(of(Field.HOUR)); + Set MINUTE = unmodifiableSet(of(Field.MINUTE)); + Set SECOND = unmodifiableSet(of(Field.SECOND)); + + Set YEAR_TO_MONTH = + unmodifiableSet(range(Field.YEAR, Field.MONTH)); + Set DAY_TO_HOUR = + unmodifiableSet(range(Field.DAY, Field.HOUR)); + Set DAY_TO_MINUTE = + unmodifiableSet(range(Field.DAY, Field.MINUTE)); + Set DAY_TO_SECOND = + unmodifiableSet(range(Field.DAY, Field.SECOND)); + Set HOUR_TO_MINUTE = + unmodifiableSet(range(Field.HOUR, Field.MINUTE)); + Set HOUR_TO_SECOND = + unmodifiableSet(range(Field.HOUR, Field.SECOND)); + Set MINUTE_TO_SECOND = + unmodifiableSet(range(Field.HOUR, Field.SECOND)); + + Set> ALLOWED_FIELDS = + Set.of( + unmodifiableSet(noneOf(Field.class)), + YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, + YEAR_TO_MONTH, DAY_TO_HOUR, DAY_TO_MINUTE, DAY_TO_SECOND, + HOUR_TO_MINUTE, HOUR_TO_SECOND, MINUTE_TO_SECOND); + + int MAX_INTERVAL_PRECISION = 6; + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * PostgreSQL allows the three components to have independent signs. + * They are stored separately because the results of combining them with + * a date or a timestamp cannot be precomputed without knowing the other + * operand. + *

+ * In arithmetic involving an interval and a timestamp, the width of one + * unit in days can depend on the other operand if a timezone + * applies and has daylight savings rules: + *

+		 * SELECT (t + i) - t
+		 * FROM (VALUES (interval '1' DAY)) AS s(i),
+		 * (VALUES (timestamptz '12 mar 2022'), ('13 mar 2022'), ('6 nov 2022')) AS v(t);
+		 * ----------------
+		 *  1 day
+		 *  23:00:00
+		 *  1 day 01:00:00
+		 *
+ *

+ * In arithmetic involving an interval and a date or timestamp, the + * width of one unit in months can depend on the calendar + * month of the other operand, as well as on timezone shifts as for + * days: + *

+		 * SELECT (t + i) - t
+		 * FROM (VALUES (interval '1' MONTH)) AS s(i),
+		 * (VALUES (timestamptz '1 feb 2022'), ('1 mar 2022'), ('1 nov 2022')) AS v(t);
+		 * ------------------
+		 *  28 days
+		 *  30 days 23:00:00
+		 *  30 days 01:00:00
+		 *
+ */ + T construct(long microseconds, int days, int months); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns an {@code Interval} function possibly tailored + * ("curried") with the values from a PostgreSQL type modifier + * applied to the type. + *

+ * The notional fields to be present in the interval are indicated + * by fields; the SQL standard defines more than three of + * these, which PostgreSQL combines into the three components + * actually stored. In a valid type modifier, the fields + * set must equal one of the members of {@code ALLOWED_FIELDS}: one + * of the named constants in this interface or the empty set. If it + * is empty, the type modifier does not constrain the fields that + * may be present. In practice, it is the finest field allowed in + * the type modifier that matters; PostgreSQL rounds away portions + * of an interval finer than that, but applies no special treatment + * based on the coarsest field the type modifier mentions. + *

+ * The desired number of seconds digits to the right of the decimal + * point is indicated by precision if present, which must + * be between 0 and {@code MAX_INTERVAL_PRECISION} inclusive. In + * a valid type modifier, when this is specified, fields + * must either include {@code SECONDS}, or be unspecified. + */ + Interval modify(Set fields, OptionalInt precision); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/package-info.java new file mode 100644 index 000000000..d97b70340 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/package-info.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Package containing functional interfaces that document and present + * PostgreSQL data types abstractly, but clearly enough for faithful mapping. + *

+ * Interfaces in this package are meant to occupy a level between a PL/Java + * {@link Adapter Adapter} (responsible for PostgreSQL internal details that + * properly remain encapsulated) and some intended Java representation class + * (which may encapsulate details of its own). + *

Example

+ *

+ * Suppose an application would like to manipulate + * a PostgreSQL {@code TIME WITH TIME ZONE} in the form of a Java + * {@link OffsetTime OffsetTime} instance. + *

+ * The application selects a PL/Java {@link Adapter Adapter} that handles the + * PostgreSQL {@code TIME WITH TIME ZONE} type and presents it via the + * functional interface {@link Datetime.TimeTZ Datetime.TimeTZ} in this package. + *

+ * The application can instantiate that {@code Adapter} with some implementation + * (possibly just a lambda) of that functional interface, which will construct + * an {@code OffsetTime} instance. That {@code Adapter} instance now maps + * {@code TIME WITH TIME ZONE} to {@code OffsetTime}, as desired. + *

+ * The PostgreSQL internal details are handled by the {@code Adapter}. The + * internal details of {@code OffsetTime} are {@code OffsetTime}'s business. + * In between those two sits the {@link Datetime.TimeTZ Datetime.TimeTZ} + * interface in this package, with its one simple role: it presents the value + * in a clear, documented form as consisting of: + *

    + *
  • microseconds since midnight, and + *
  • a time zone offset in seconds west of the prime meridian + *
+ *

+ * It serves as a contract for the {@code Adapter} and as a clear starting point + * for constructing the wanted Java representation. + *

+ * It is important that the interfaces here serve as documentation as + * well as code, as it turns out that {@code OffsetTime} expects its + * time zone offsets to be positive east of the prime meridian, + * so a sign flip is needed. Interfaces in this package must be + * documented with enough detail to allow a developer to make correct + * use of the exposed values. + *

+ * The division of labor between what is exposed in these interfaces and what + * is encapsulated within {@code Adapter}s calls for a judgment of which + * details are semantically significant. If PostgreSQL somehow changes the + * internal details needed to retrieve a {@code timetz} value, it should be the + * {@code Adapter}'s job to make that transparent. If PostgreSQL ever changes + * the fact that a {@code timetz} is microseconds since midnight with + * seconds-west as a zone offset, that would require versioning the + * corresponding interface here; it is something a developer would need to know. + *

Reference implementations

+ * A few simple reference implementations (including the + * {@code timetz}-as-{@code OffsetTime} used as the example) can also be found + * in this package, and {@code Adapter} instances using them are available, + * so an application would not really have to follow the steps of the example + * to obtain one. + * @author Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.time.OffsetTime; + +import org.postgresql.pljava.Adapter; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/AbstractType.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/AbstractType.java new file mode 100644 index 000000000..0feece20c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/AbstractType.java @@ -0,0 +1,1168 @@ +/* + * Copyright (c) 2020-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +import static java.lang.System.identityHashCode; + +import java.lang.reflect.Array; +import java.lang.reflect.Type; +import java.lang.reflect.GenericArrayType; +import java.lang.reflect.GenericDeclaration; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.TypeVariable; +import java.lang.reflect.WildcardType; + +import static java.util.Arrays.stream; +import static java.util.Collections.addAll; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import static java.util.Objects.requireNonNull; + +import static java.util.stream.Stream.concat; +import static java.util.stream.Collectors.joining; + +/** + * Custom implementations of Java's {@link Type Type} interfaces, with methods + * for a couple useful manipulations. + *

+ * The implementations returned from Java reflection methods are internal, with + * no way to instantiate arbitrary new ones to represent the results of + * computations with them. + *

+ * Note: the implementations here do not override {@code equals} and + * {@code hashCode} inherited from {@code Object}. The JDK internal ones do, + * but not with documented behaviors, so it didn't seem worthwhile to try + * to match them. (The API specifies an {@code equals} behavior only for + * {@code ParameterizedType}, and no corresponding {@code hashCode} even for + * that, so good luck matching it.) Results from methods in this class can + * include new objects (instances of these classes) and original ones + * constructed by Java; don't assume anything sane will happen using + * {@code equals} or {@code hashCode} between them. There is a + * {@code typesEqual} static method defined here to do that job. + */ +public abstract class AbstractType implements Type +{ + enum TypeKind + { + ARRAY(GenericArrayType.class), + PT(ParameterizedType.class), + TV(TypeVariable.class), + WILDCARD(WildcardType.class), + CLASS(Class.class); + + private Class m_class; + + TypeKind(Class cls) + { + m_class = cls; + } + + static TypeKind of(Class cls) + { + for ( TypeKind k : values() ) + if ( k.m_class.isAssignableFrom(cls) ) + return k; + throw new AssertionError("TypeKind nonexhaustive: " + cls); + } + } + + /** + * Compare two Types for equality without relying on their own + * {@code equals} methods. + */ + static boolean typesEqual(Type a, Type b) + { + if ( a == b ) + return true; + + if ( null == a || null == b ) + return false; + + TypeKind ak = TypeKind.of(a.getClass()); + TypeKind bk = TypeKind.of(b.getClass()); + + if ( ak != bk ) + return false; + + switch ( ak ) + { + case ARRAY: + GenericArrayType gaa = (GenericArrayType)a; + GenericArrayType gab = (GenericArrayType)b; + return typesEqual(gaa, gab); + case PT: + ParameterizedType pta = (ParameterizedType)a; + ParameterizedType ptb = (ParameterizedType)b; + if ( ! typesEqual(pta.getRawType(), ptb.getRawType()) ) + return false; + Type[] taa = pta.getActualTypeArguments(); + Type[] tab = ptb.getActualTypeArguments(); + if ( taa.length != tab.length ) + return false; + for ( int i = 0; i < taa.length; ++ i ) + if ( ! typesEqual(taa[i], tab[i]) ) + return false; + return true; + case TV: + TypeVariable tva = (TypeVariable)a; + TypeVariable tvb = (TypeVariable)b; + return tva.getGenericDeclaration() == tvb.getGenericDeclaration() + && tva.getName().equals(tvb.getName()); + case WILDCARD: + WildcardType wa = (WildcardType)a; + WildcardType wb = (WildcardType)b; + Type[] ua = wa.getUpperBounds(); + Type[] ub = wb.getUpperBounds(); + Type[] la = wa.getLowerBounds(); + Type[] lb = wb.getLowerBounds(); + if ( ua.length != ub.length || la.length != lb.length ) + return false; + for ( int i = 0; i < ua.length; ++ i ) + if ( ! typesEqual(ua[i], ub[i]) ) + return false; + for ( int i = 0; i < la.length; ++ i ) + if ( ! typesEqual(la[i], lb[i]) ) + return false; + return true; + case CLASS: + return false; // they failed the == test at the very top + } + + return false; // unreachable, but tell that to javac + } + + /** + * Refines some {@code Type}s in by unifying the first of them + * with using. + *

+ * The variadic array of in arguments is returned, modified + * in place. + *

+ * The type using is unified with {@code in[0]} and then used to + * replace {@code in[0]}, while any variable substitutions made in + * the unification are repeated in the remaining in elements. + */ + public static Type[] refine(Type using, Type... in) + { + Map bindings = new HashMap<>(); + unify(bindings, using, in[0]); + + TypeVariable[] vars = new TypeVariable[bindings.size()]; + Type [] args = new Type [bindings.size()]; + + int i = 0; + for ( Map.Entry e : bindings.entrySet() ) + { + vars[i] = e.getKey().get(); + args[i] = e.getValue(); + ++ i; + } + Bindings b = new Bindings(vars, args); + + in[0] = using; + for ( i = 1; i < in.length; ++ i ) + in[i] = substitute(b, in[i]); + + return in; + } + + /** + * A simpleminded unify that assumes one argument is always + * the more-specific one, should resolve type variables found in the other, + * and that this can be done for cases of interest without generating and + * then solving constraints. + */ + static void unify(Map bindings, Type specific, Type general) + { + Type element1; + Type element2; + + while ( null != (element1 = toElementIfArray(specific)) + && null != (element2 = toElementIfArray(general)) ) + { + specific = element1; + general = element2; + } + + if ( general instanceof TypeVariable ) + { + // XXX verify here that specific satisfies the variable's bounds + Type wasBound = + bindings.put(new VKey((TypeVariable)general), specific); + if ( null != wasBound && ! typesEqual(specific, wasBound) ) + throw new UnsupportedOperationException( + "unimplemented case in AbstractType.unify: binding again"); + return; + } + + if ( general instanceof ParameterizedType ) + { + ParameterizedType t = (ParameterizedType)general; + Type[] oldActuals = t.getActualTypeArguments(); + Class raw = (Class)t.getRawType(); + Type[] newActuals = specialization(specific, raw); + if ( null != newActuals ) + { + for ( int i = 0; i < oldActuals.length; ++ i ) + unify(bindings, newActuals[i], oldActuals[i]); + return; + } + } + else if ( general instanceof Class ) + { + Class c = (Class)general; + TypeVariable[] formals = c.getTypeParameters(); + Type[] actuals = specialization(specific, c); + if ( null != actuals ) + { + for ( int i = 0; i < formals.length; ++ i ) + unify(bindings, actuals[i], formals[i]); + return; + } + } + + throw new IllegalArgumentException( + "failed to unify " + specific + " with " + general); + } + + /** + * Returns the component type of either a {@code GenericArrayType} or + * an array {@code Class}, otherwise null. + */ + private static Type toElementIfArray(Type possibleArray) + { + if ( possibleArray instanceof GenericArrayType ) + return ((GenericArrayType)possibleArray).getGenericComponentType(); + if ( ! (possibleArray instanceof Class) ) + return null; + return ((Class)possibleArray).getComponentType(); // null if !array + } + + /** + * Needed: test whether sub is a subtype of sup. + *

+ * XXX For the time being, this is nothing but a test of + * erased subtyping, hastily implemented by requiring that + * {@code specialization(sub, erase(sup))} does not return null. + *

+ * This must sooner or later be replaced with an implementation of + * the subtyping rules from Java Language Specification 4.10, taking + * also type parameterization into account. + */ + public static boolean isSubtype(Type sub, Type sup) + { + return null != specialization(sub, erase(sup)); + } + + /** + * Equivalent to {@code specialization(candidate, expected, null)}. + */ + public static Type[] specialization(Type candidate, Class expected) + { + return specialization(candidate, expected, null); + } + + /** + * Test whether the type {@code candidate} is, directly or indirectly, + * a specialization of generic type {@code expected}. + *

+ * For example, the Java type T of a particular adapter A that extends + * {@code Adapter.As} can be retrieved with + * {@code specialization(A.class, As.class)[0]}. + *

+ * More generally, this method can retrieve the generic type information + * from any "super type token", as first proposed by Neal Gafter in 2006, + * where a super type token is generally an instance of an anonymous + * subclass that specializes a certain generic type. Although the idea has + * been often used, the usages have not settled on one agreed name for the + * generic type. This method will work with any of them, by supplying the + * expected generic type itself as the second parameter. For example, a + * super type token {@code foo} derived from Gafter's suggested class + * {@code TypeReference} can be unpacked with + * {@code specialization(foo.getClass(), TypeReference.class)}. + * @param candidate a type to be checked + * @param expected known (normally generic) type to check for + * @param rtype array to receive (if non-null) the corresponding + * (parameterized or raw) type if the result is non-null. + * @return null if candidate does not extend expected, + * otherwise the array of type arguments with which it specializes + * expected + * @throws IllegalArgumentException if passed a Type that is not a + * Class or a ParameterizedType + * @throws NullPointerException if either argument is null + * @throws UnsupportedOperationException if candidate does extend + * expected but does not carry the needed parameter bindings (such as + * when the raw expected Class itself is passed) + */ + public static Type[] specialization( + Type candidate, Class expected, Type[] rtype) + { + Type t = requireNonNull(candidate, "candidate is null"); + requireNonNull(expected, "expected is null"); + boolean superinterfaces = expected.isInterface(); + Class c; + ParameterizedType pt = null; + Bindings latestBindings = null; + boolean ptFound = false; + boolean rawTypeFound = false; + + if ( t instanceof Class ) + { + c = (Class)t; + if ( ! expected.isAssignableFrom(c) ) + return null; + if ( expected == c ) + rawTypeFound = true; + else + latestBindings = // trivial, non-null initial value + new Bindings(new TypeVariable[0], new Type[0]); + } + else if ( t instanceof ParameterizedType ) + { + pt = (ParameterizedType)t; + c = (Class)pt.getRawType(); + if ( ! expected.isAssignableFrom(c) ) + return null; + if ( expected == c ) + ptFound = true; + else + latestBindings = new Bindings(latestBindings, pt); + } + else + throw new IllegalArgumentException( + "expected Class or ParameterizedType, got: " + t); + + if ( ! ptFound && ! rawTypeFound ) + { + List pending = new LinkedList<>(); + pending.add(c.getGenericSuperclass()); + if ( superinterfaces ) + addAll(pending, c.getGenericInterfaces()); + + while ( ! pending.isEmpty() ) + { + t = pending.remove(0); + if ( null == t ) + continue; + if ( t instanceof Class ) + { + c = (Class)t; + if ( expected == c ) + { + rawTypeFound = true; + break; + } + if ( ! expected.isAssignableFrom(c) ) + continue; + pending.add(latestBindings); + } + else if ( t instanceof ParameterizedType ) + { + pt = (ParameterizedType)t; + c = (Class)pt.getRawType(); + if ( expected == c ) + { + ptFound = true; + break; + } + if ( ! expected.isAssignableFrom(c) ) + continue; + pending.add(new Bindings(latestBindings, pt)); + } + else if ( t instanceof Bindings ) + { + latestBindings = (Bindings)t; + continue; + } + else + throw new AssertionError( + "expected Class or ParameterizedType, got: " + t); + + pending.add(c.getGenericSuperclass()); + if ( superinterfaces ) + addAll(pending, c.getGenericInterfaces()); + } + } + + Type[] actualArgs = null; + + if ( ptFound ) + { + if ( null != latestBindings ) + pt = (ParameterizedType) + AbstractType.substitute(latestBindings, pt); + actualArgs = pt.getActualTypeArguments(); + if ( null != rtype ) + rtype[0] = pt; + } + else if ( rawTypeFound ) + { + actualArgs = new Type[0]; + if ( null != rtype ) + rtype[0] = expected; + } + + if ( null == actualArgs + || actualArgs.length != expected.getTypeParameters().length ) + throw new UnsupportedOperationException( + "failed checking whether " + candidate + + " specializes " + expected); + + return actualArgs; + } + + /** + * Returns the erasure of a type. + *

+ * If t is a {@code Class}, it is returned unchanged. + */ + public static Class erase(Type t) + { + if ( t instanceof Class ) + { + return (Class)t; + } + else if ( t instanceof GenericArrayType ) + { + int dims = 0; + do + { + ++ dims; + GenericArrayType a = (GenericArrayType)t; + t = a.getGenericComponentType(); + } while ( t instanceof GenericArrayType ); + Class c = (Class)erase(t); + // in Java 12+ see TypeDescriptor.ofField.arrayType(int) + return Array.newInstance(c, new int [ dims ]).getClass(); + } + else if ( t instanceof ParameterizedType ) + { + return (Class)((ParameterizedType)t).getRawType(); + } + else if ( t instanceof WildcardType ) + { + throw new UnsupportedOperationException("erase on wildcard type"); + /* + * Probably just resolve all the lower and/or upper bounds, as long + * as b is known to be the right set of bindings for the type that + * contains the member declaration, but I'm not convinced at present + * that wouldn't require more work keeping track of bindings. + */ + } + else if ( t instanceof TypeVariable ) + { + return erase(((TypeVariable)t).getBounds()[0]); + } + else + throw new UnsupportedOperationException( + "erase on unknown Type " + t.getClass()); + } + + /** + * Recursively descend t substituting any occurrence of a type variable + * found in b, returning a new object, or t unchanged if no substitutions + * were made. + *

+ * Currently throws {@code UnsupportedOperationException} if t is + * a wildcard, as that case shouldn't be needed for the analysis of + * class/interface inheritance hierarchies that {@code specialization} + * is concerned with. + *

+ */ + public static Type substitute(Bindings b, Type t) + { + if ( t instanceof GenericArrayType ) + { + GenericArrayType a = (GenericArrayType)t; + Type oc = a.getGenericComponentType(); + Type nc = substitute(b, oc); + if ( nc == oc ) + return t; + return new GenericArray(nc); + } + else if ( t instanceof ParameterizedType ) + { + ParameterizedType p = (ParameterizedType)t; + Type[] as = p.getActualTypeArguments(); + Type oown = p.getOwnerType(); + Type oraw = p.getRawType(); + assert oraw instanceof Class; + + boolean changed = substituted(b, as); + + if ( null != oown ) + { + Type nown = substitute(b, oown); + if ( nown != oown ) + { + oown = nown; + changed = true; + } + } + + if ( changed ) + return new Parameterized(as, oown, oraw); + return t; + } + else if ( t instanceof WildcardType ) + { + WildcardType w = (WildcardType)t; + Type[] lbs = w.getLowerBounds(); + Type[] ubs = w.getUpperBounds(); + + boolean changed = substituted(b, lbs) | substituted(b, ubs); + + if ( changed ) + return new Wildcard(lbs, ubs); + return t; + } + else if ( t instanceof TypeVariable ) + { + /* + * First the bad news: there isn't a reimplementation of + * TypeVariable here, to handle returning a changed version with + * substitutions in its bounds. Doesn't seem worth the effort, as + * the classes that hold/supply TypeVariables are Class/Method/ + * Constructor, and we're not going to be reimplementing *them*. + * + * Next the good news: TypeVariable bounds are the places where + * a good story for terminating recursion would be needed, so + * if we can't substitute in them anyway, that's a non-concern. + */ + return b.substitute((TypeVariable)t); + } + else if ( t instanceof Class ) + { + return t; + } + else + throw new UnsupportedOperationException( + "substitute on unknown Type " + t.getClass()); + } + + /** + * Applies substitutions in b to each type in types, + * updating them in place, returning true if any change resulted. + */ + private static boolean substituted(Bindings b, Type[] types) + { + boolean changed = false; + for ( int i = 0; i < types.length; ++ i ) + { + Type ot = types[i]; + Type nt = substitute(b, ot); + if ( nt == ot ) + continue; + types[i] = nt; + changed = true; + } + return changed; + } + + static String toString(Type t) + { + if ( t instanceof Class ) + return ((Class)t).getCanonicalName(); + return t.toString(); + } + + /** + * A key class for entering {@code TypeVariable}s in hash structures, + * without relying on the undocumented behavior of the Java implementation. + *

+ * Assumes that object identity is significant for + * {@code GenericDeclaration} instances ({@code Class} instances are chiefly + * what will be of interest here), just as {@code typesEqual} does. + */ + static final class VKey + { + private final TypeVariable m_tv; + + VKey(TypeVariable tv) + { + m_tv = tv; + } + + @Override + public int hashCode() + { + return + m_tv.getName().hashCode() + ^ identityHashCode(m_tv.getGenericDeclaration()); + } + + @Override + public boolean equals(Object other) + { + if ( this == other ) + return true; + if ( ! (other instanceof VKey) ) + return false; + return typesEqual(m_tv, ((VKey)other).m_tv); + } + + TypeVariable get() + { + return m_tv; + } + } + + public static TypeVariable[] freeVariables(Type t) + { + Set result = new HashSet<>(); + freeVariables(result, t); + return result.stream().map(VKey::get).toArray(TypeVariable[]::new); + } + + private static void freeVariables(Set s, Type t) + { + if ( t instanceof Class ) + return; + if ( t instanceof GenericArrayType ) + { + GenericArrayType a = (GenericArrayType)t; + freeVariables(s, a.getGenericComponentType()); + return; + } + if ( t instanceof ParameterizedType ) + { + ParameterizedType p = (ParameterizedType)t; + freeVariables(s, p.getOwnerType()); + stream(p.getActualTypeArguments()) + .forEach(tt -> freeVariables(s, tt)); + return; + } + if ( t instanceof TypeVariable ) + { + TypeVariable v = (TypeVariable)t; + if ( s.add(new VKey(v)) ) + stream(v.getBounds()).forEach(tt -> freeVariables(s, tt)); + return; + } + if ( t instanceof WildcardType ) + { + WildcardType w = (WildcardType)t; + concat(stream(w.getUpperBounds()), stream(w.getLowerBounds())) + .forEach(tt -> freeVariables(s, tt)); + return; + } + } + + @Override + public String getTypeName() + { + return toString(); + } + + static class GenericArray extends AbstractType implements GenericArrayType + { + private final Type component; + + GenericArray(Type component) + { + this.component = component; + } + + @Override + public Type getGenericComponentType() + { + return component; + } + + @Override + public String toString() + { + return toString(component) + "[]"; + } + } + + static class Parameterized extends AbstractType implements ParameterizedType + { + private final Type[] arguments; + private final Type owner; + private final Type raw; + + Parameterized(Type[] arguments, Type owner, Type raw) + { + this.arguments = arguments; + this.owner = owner; + this.raw = raw; + } + + @Override + public Type[] getActualTypeArguments() + { + return arguments; + } + + @Override + public Type getOwnerType() + { + return owner; + } + + @Override + public Type getRawType() + { + return raw; + } + + @Override + public String toString() + { + if ( 0 == arguments.length ) + return toString(raw); + return toString(raw) + stream(arguments) + .map(AbstractType::toString).collect(joining(",", "<", ">")); + } + } + + static class Wildcard extends AbstractType implements WildcardType + { + private final Type[] lbounds; + private final Type[] ubounds; + + Wildcard(Type[] lbounds, Type[] ubounds) + { + this.lbounds = lbounds; + this.ubounds = ubounds; + } + + @Override + public Type[] getLowerBounds() + { + return lbounds; + } + + @Override + public Type[] getUpperBounds() + { + return ubounds; + } + + @Override + public String toString() + { + if ( 0 < lbounds.length ) + return "? super " + stream(lbounds) + .map(AbstractType::toString).collect(joining(" & ")); + else if ( 0 < ubounds.length && Object.class != ubounds[0] ) + return "? extends " + stream(ubounds) + .map(AbstractType::toString).collect(joining(" & ")); + else + return "?"; + } + } + + /** + * A class recording the bindings made in a ParameterizedType to the type + * parameters in a GenericDeclaration<Class>. Implements {@code Type} + * so it can be added to the {@code pending} queue in + * {@code specialization}. + *

+ * In {@code specialization}, the tree of superclasses/superinterfaces will + * be searched breadth-first, with all of a node's immediate supers enqueued + * before any from the next level. By recording a node's type variable to + * type argument bindings in an object of this class, and enqueueing it + * before any of the node's supers, any type variables encountered as actual + * type arguments to any of those supers should be resolvable in the object + * of this class most recently dequeued. + */ + public static class Bindings implements Type + { + private final TypeVariable[] formalTypeParams; + private final Type[] actualTypeArgs; + + public Bindings(TypeVariable[] formalParams, Type[] actualArgs) + { + actualTypeArgs = actualArgs; + formalTypeParams = formalParams; + if ( actualTypeArgs.length != formalTypeParams.length ) + throw new IllegalArgumentException( + "formalParams and actualArgs differ in length"); + // XXX check actualTypeArgs against bounds of the formalParams + } + + Bindings(Bindings prior, ParameterizedType pt) + { + actualTypeArgs = pt.getActualTypeArguments(); + formalTypeParams = + ((GenericDeclaration)pt.getRawType()).getTypeParameters(); + assert actualTypeArgs.length == formalTypeParams.length; + + if ( 0 == prior.actualTypeArgs.length ) + return; + + for ( int i = 0; i < actualTypeArgs.length; ++ i ) + actualTypeArgs[i] = + AbstractType.substitute(prior, actualTypeArgs[i]); + } + + Type substitute(TypeVariable v) + { + for ( int i = 0; i < formalTypeParams.length; ++ i ) + if ( typesEqual(formalTypeParams[i], v) ) + return actualTypeArgs[i]; + return v; + } + } + + /** + * A class dedicated to manipulating the types of multidimensional Java + * arrays, and their instances, that conform to PostgreSQL array constraints + * (non-'jagged', each dimension's arrays all equal size, no intermediate + * nulls). + *

+ * Construct a {@code MultiArray} by supplying a component {@link Type} and + * a number of dimensions. The resulting {@code MultiArray} represents the + * Java array type, and has a number of bracket pairs equal to the supplied + * dimensions argument plus those of the component type if it is itself a + * Java array. (There could be an {@code Adapter} for some PostgreSQL scalar + * type that presents it as a Java array, and then there could be a + * PostgreSQL array of that type.) So the type reported by + * {@link #arrayType arrayType} may have more bracket pairs than the + * {@code MultiArray}'s dimensions. Parentheses are used by + * {@link #toString toString} to help see what's going on. + *

+ * When converting a {@code MultiArray} to a {@link Sized Sized}, only as + * many sizes are supplied as the multiarray's dimensions, and when + * converting that to an {@link Sized.Allocated Allocated}, only that much + * allocation is done. Populating the arrays at that last allocated level + * with the converted elements of the PostgreSQL array is the work left + * for the caller. + */ + public static class MultiArray + { + public final Type component; + public final int dimensions; + + /** + * Constructs a description of a multiarray with a given component type + * and dimensions. + * @param component the type of the component (which may itself be an + * array) + * @param dimensions dimensions of the multiarray (if the component type + * is an array, the final resulting type will have the sum of its + * dimensions and these) + */ + public MultiArray(Type component, int dimensions) + { + if ( 1 > dimensions ) + throw new IllegalArgumentException( + "dimensions must be positive: " + dimensions); + this.component = component; + this.dimensions = dimensions; + } + + /** + * Returns a representation of the resulting Java array type, with + * parentheses around the component type (which may itself be an array + * type) and around the array brackets corresponding to this + * multiarray's dimensions. + */ + @Override + public String toString() + { + return "MultiArray: (" + component + ")([])*" + dimensions; + } + + /** + * Returns the resulting Java array type (which, if the component type + * is also an array, does not distinguish between its dimensions and + * those of this multiarray). + */ + public Type arrayType() + { + Type t = component; + + if ( t instanceof Class ) + t = Array.newInstance((Class)t, new int[dimensions]) + .getClass(); + else + for ( int i = 0 ; i < dimensions ; ++ i ) + t = new GenericArray(t); + + return t; + } + + /** + * Returns a {@code MultiArray} representing an array type t + * in a canonical form, with its ultimate non-array type as the + * component type, and all of its array dimensions belonging to the + * multiarray. + */ + public static MultiArray canonicalize(Type t) + { + Type t1 = requireNonNull(t); + int dims = 0; + + for ( ;; ) + { + t1 = toElementIfArray(t1); + if ( null == t1 ) + break; + t = t1; + ++ dims; + } + + if ( 0 == dims ) + throw new IllegalArgumentException("not an array type: " + t); + + return new MultiArray(t, dims); + } + + /** + * Returns a new {@code MultiArray} with the same Java array type but + * where {@link #component} is a non-array type and {@link #dimensions} + * holds the total number of dimensions. + */ + public MultiArray canonicalize() + { + if ( null == toElementIfArray(component) ) + return this; + + MultiArray a = canonicalize(component); + return new MultiArray(a.component, dimensions + a.dimensions); + } + + /** + * Returns this {@code MultiArray} as a 'prefix' of suffix + * (which must have the same ultimate non-array type but a smaller + * number of dimensions). + *

+ * The result will have the array type of suffix as its + * component type, and the dimensions required to have the same overall + * Java {@link #arrayType arrayType} as the receiver. + */ + public MultiArray asPrefixOf(MultiArray suffix) + { + MultiArray pfx = canonicalize(); + MultiArray sfx = suffix.canonicalize(); + + if ( 1 + sfx.dimensions > pfx.dimensions ) + throw new IllegalArgumentException( + "suffix too long: ("+ this +").asPrefixOf("+ suffix +")"); + + if ( ! typesEqual(pfx.component, sfx.component) ) + throw new IllegalArgumentException( + "asPrefixOf with different component types: " + + pfx.component + ", " + sfx.component); + + Type c = sfx.arrayType(); + + return new MultiArray(c, pfx.dimensions - sfx.dimensions); + } + + /** + * Returns a new {@code MultiArray} with this one's type (possibly a + * raw, or parameterized type) refined according to the known type of + * model. + */ + public MultiArray refine(Type model) + { + int modelDims = 0; + + if ( null != toElementIfArray(model) ) + { + MultiArray cmodel = canonicalize(model); + modelDims = cmodel.dimensions; + model = cmodel.component; + } + + MultiArray canon = canonicalize(); + + Type[] rtype = new Type[1]; + if ( null == specialization(model, erase(canon.component), rtype) ) + throw new IllegalArgumentException( + "refine: " + model + " does not specialize " + + canon.component); + + MultiArray result = new MultiArray(rtype[0], canon.dimensions); + + if ( 0 < modelDims ) + { + MultiArray suffix = new MultiArray(rtype[0], modelDims); + result = result.asPrefixOf(suffix); + } + + return result; + } + + /** + * Returns a {@link Sized Sized} representing this {@code MultiArray} + * with a size for each of its dimensions. + */ + public Sized size(int... dims) + { + return new Sized(dims); + } + + /** + * Represents a {@code MultiArray} for which sizes for its dimensions + * have been specified, so that an instance can be allocated. + */ + public class Sized + { + private final int[] lengths; + + private Sized(int[] dims) + { + if ( dims.length != dimensions ) + throw new IllegalArgumentException( + "("+ this +").size(passed " + + dims.length +" dimensions)"); + lengths = dims.clone(); + } + + @Override + public String toString() + { + return MultiArray.this.toString(); + } + + /** + * Returns an {@link Allocated Allocated} that wraps a + * freshly-allocated array having the sizes recorded here. + *

+ * The result is returned with wildcard types. If the caller code + * has been written so as to have type variables with the proper + * types at compile time, it may do an unchecked cast on the result, + * which may make later operations more concise. + */ + public Allocated allocate() + { + Class c = erase(component); + Object a = Array.newInstance(c, lengths); + + return new Allocated(a); + } + + /** + * Wraps an existing instance of the multiarray type in question. + * + * @param the overall Java type of the whole array, which + * can be retrieved with array() + * @param the type of the arrays at the final level + * (one-dimensional arrays of the component type) that can be + * iterated, in order, to be populated or read out. <TI> is + * always an array type, but can be a reference array or any + * primitive array type, and therefore not as convenient as it might + * be, because the least upper bound of those types is + * {@code Object}. + */ + public class Allocated implements Iterable + { + final Object array; + + private Allocated(Object a) + { + array = requireNonNull(a); + } + + /** + * Returns the resulting array. + */ + public TA array() + { + @SuppressWarnings("unchecked") + TA result = (TA)array; + return result; + } + + @Override + public String toString() + { + return MultiArray.this.toString(); + } + + /** + * Returns an {@code Iterator} over the array(s) at the bottom + * level of this multiarray, the ones that are one-dimensional + * arrays of the component type. + *

+ * They are returned in order, so that a simple loop to copy the + * component values into or out of each array in turn will + * amount to a row-major traversal (same as PostgreSQL's storage + * order) of the whole array. + */ + @Override + public Iterator iterator() + { + final Object[][] arrays = new Object [ dimensions ] []; + final int[] indices = new int [ dimensions ]; + final int rightmost = dimensions - 1; + + arrays[0] = new Object[] { array }; + + for ( int i = 1; i < arrays.length; ++ i ) + { + Object[] a = arrays[i-1]; + if ( 0 == a.length ) + { + ++ indices[0]; + break; + } + arrays[i] = (Object[])requireNonNull(a[0]); + } + + return new Iterator() + { + @Override + public boolean hasNext() + { + return 0 == indices[0]; + } + + @Override + public TI next() + { + if ( 0 < indices[0] ) + throw new NoSuchElementException(); + + @SuppressWarnings("unchecked") + TI o = (TI)arrays[rightmost][indices[rightmost]++]; + + if (indices[rightmost] >= arrays[rightmost].length) + { + int i = rightmost - 1; + while ( 0 <= i ) + { + if ( ++ indices[i] < arrays[i].length ) + break; + -- i; + } + if ( 0 <= i ) + { + while ( i < rightmost ) + { + Object a = arrays[i][indices[i]]; + ++ i; + arrays[i] = (Object[])requireNonNull(a); + indices[i] = 0; + } + } + } + + return o; + } + }; + } + } + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Datum.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Datum.java new file mode 100644 index 000000000..84fbd669c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Datum.java @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +import java.io.Closeable; +import java.io.InputStream; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; // for javadoc +import org.postgresql.pljava.model.Attribute; + +/** + * Raw access to the contents of a PostgreSQL datum. + *

+ * For type safety, only {@link Adapter Adapter} implementations should be + * able to obtain a {@code Datum}, and should avoid leaking it to other code. + */ +public interface Datum extends Closeable +{ + /** + * Use the given {@link Verifier} to confirm that the {@code Datum} content + * is well-formed, throwing an exception if not. + */ + void verify(Verifier.OfBuffer v) throws SQLException; + + /** + * Use the given {@link Verifier} to confirm that the {@code Datum} content + * is well-formed, throwing an exception if not. + */ + void verify(Verifier.OfStream v) throws SQLException; + + /** + * Interface through which PL/Java code reads the content of an existing + * PostgreSQL datum. + */ + interface Input extends Datum + { + default void pin() throws SQLException + { + } + + default boolean pinUnlessReleased() + { + return false; + } + + default void unpin() + { + } + + /** + * Returns a read-only {@link ByteBuffer} covering the content of the + * datum. + *

+ * When the datum is a {@code varlena}, the "content" does not include + * the four-byte header. When implementing an adapter for a varlena + * datatype, note carefully whether offsets used in the PostgreSQL C + * code are relative to the start of the content or the start of the + * varlena overall. If the latter, they will need adjustment when + * indexing into the {@code ByteBuffer}. + *

+ * If the byte order of the buffer will matter, it should be explicitly + * set. + *

+ * The buffer may window native memory allocated by PostgreSQL, so + * {@link #pin pin()} and {@link #unpin unpin()} should surround + * accesses through it. Like {@code Datum} itself, the + * {@code ByteBuffer} should be used only within an {@code Adapter}, and + * not exposed to other code. + */ + ByteBuffer buffer() throws SQLException; + + /** + * Returns an {@link InputStream} that presents the same bytes contained + * in the buffer returned by {@link #buffer buffer()}. + *

+ * When necessary, the {@code InputStream} will handle pinning the + * buffer when reading, so the {@code InputStream} can safely be exposed + * to other code, if it is a reasonable way to present the contents of + * the datatype in question. + *

+ * The stream supports {@code mark} and {@code reset}. + */ + T inputStream() throws SQLException; + } + + /** + * Empty superinterface of {@code Accessor.Deformed} and + * {@code Accessor.Heap}, which are erased at run time but help distinguish, + * in source code, which memory layout convention an {@code Accessor} + * is tailored for. + */ + interface Layout + { + } + + /** + * Accessor for a {@code Datum} located, at some offset, in + * memory represented by a {@code } object. + *

+ * {@code } is a type variable to anticipate future memory abstractions + * like the incubating {@code MemorySegment} from JEP 412. The present + * implementation will work with any {@code } that you want as long + * as it is {@code java.nio.ByteBuffer}. + *

+ * Given an {@code Accessor} instance properly selected for the memory + * layout, datum width, type length, and by-value/by-reference passing + * convention declared for a given {@link Attribute Attribute}, methods on + * the {@code Accessor} are available to retrieve the individual datum + * in {@code Datum} form (essentially another {@code } of exactly + * the length of the datum, wrapped with methods to avoid access outside + * of its lifetime), or as any Java primitive type appropriate to + * the datum's width. A {@code get} method of the datum's exact width or + * wider may be used (except for {@code float} and {@code double}, which + * only work for width exactly 4 or 8 bytes, respectively). + *

+ * PostgreSQL only allows power-of-two widths up to {@code SIZEOF_DATUM} for + * a type that specifies the by-value convention, and so an {@code Accessor} + * for the by-value case only supports those widths. An {@code Accessor} for + * the by-reference case supports any size, with direct access as a Java + * primitive supported for any size up to the width of a Java long. + *

+ * {@code getBoolean} can be used for any width the {@code Accessor} + * supports up to the width of Java long, and the result will be true + * if the value has any 1 bits. + *

+ * Java {@code long} and {@code int} are always treated as + * signed by the language (though unsigned operations are available as + * methods), but have paired methods here to explicitly indicate which + * treatment is intended. The choice can affect the returned value when + * fetching a value as a primitive type that is wider than its type's + * declared length. Paired methods for {@code byte} are not provided because + * a byte is not wider than any type's length. When a type narrower than + * {@code SIZEOF_DATUM} is stored (in the {@code Deformed} layout), unused + * high bits are stored as zero. This should not strictly matter, as + * PostgreSQL strictly ignores the unused high bits, but it is consistent + * with the way PostgreSQL declares {@code Datum} as an unsigned integral + * type. + * + * @param type of the memory abstraction used. Accessors will be + * available supporting {@code ByteBuffer}, and may be available supporting + * a newer abstraction like {@code MemorySegment}. + * @param a subinterface of {@code Layout}, either {@code Deformed} or + * {@code Heap}, indicating which {@code TupleTableSlot} layout the + * {@code Accessor} is intended for, chiefly as a tool for compile-time + * checking that they haven't been mixed up. + */ + interface Accessor + { + Datum.Input getDatum(B buffer, int offset, Attribute a); + + long getLongSignExtended(B buffer, int offset); + + long getLongZeroExtended(B buffer, int offset); + + double getDouble(B buffer, int offset); + + int getIntSignExtended(B buffer, int offset); + + int getIntZeroExtended(B buffer, int offset); + + float getFloat(B buffer, int offset); + + short getShort(B buffer, int offset); + + char getChar(B buffer, int offset); + + byte getByte(B buffer, int offset); + + boolean getBoolean(B buffer, int offset); + + /** + * An accessor for use with a 'deformed' (array-of-{@code Datum}) + * memory layout. + *

+ * When using a 'deformed' accessor, the caller is responsible for + * passing an {@code offset} value that is an integral multiple of + * {@code SIZEOF_DATUM} from where the array-of-{@code Datum} starts. + */ + interface Deformed extends Layout + { + } + + /** + * An accessor for use with a heap-tuple styled, flattened, + * memory layout. + *

+ * When using a heap accessor, the caller is responsible for passing an + * {@code offset} value properly computed from the sizes of preceding + * members and the alignment of the member to be accessed. + */ + interface Heap extends Layout + { + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/TwosComplement.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/TwosComplement.java new file mode 100644 index 000000000..d2323ed96 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/TwosComplement.java @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +/** + * Methods that have variants on twos-complement Java types that might be signed + * or unsigned. + *

+ * The {@code Signed} or {@code Unsigned} subinterface below, as appropriate, + * can be used as a mixin on a class where the right treatment of a Java + * {@code long}, {@code int}, {@code short}, or {@code byte} might be + * class-specific. + *

+ * The semantic difference between a {@code short} treated as unsigned and a + * {@code char} (also an unsigned 16-bit type) is whether the value is expected + * to mean what UTF-16 says it means. + */ +public interface TwosComplement +{ + boolean unsigned(); + + /* + * Methods for long + */ + + int compare(long x, long y); + + long divide(long dividend, long divisor); + + long remainder(long dividend, long divisor); + + long parseLong(CharSequence s, int beginIndex, int endIndex, int radix); + + String deparse(long i, int radix); + + default long parseLong(CharSequence s, int radix) + { + return parseLong(s, 0, s.length(), radix); + } + + default long parseLong(CharSequence s) + { + return parseLong(s, 0, s.length(), 10); + } + + default String deparse(long i) + { + return deparse(i, 10); + } + + /* + * Methods for int + */ + + int compare(int x, int y); + + int divide(int dividend, int divisor); + + int remainder(int dividend, int divisor); + + long toLong(int i); + + int parseInt(CharSequence s, int beginIndex, int endIndex, int radix); + + String deparse(int i, int radix); + + default int parseInt(CharSequence s, int radix) + { + return parseInt(s, 0, s.length(), radix); + } + + default int parseInt(CharSequence s) + { + return parseInt(s, 0, s.length(), 10); + } + + default String deparse(int i) + { + return deparse(i, 10); + } + + /* + * Methods for short + */ + + int compare(short x, short y); + + short divide(short dividend, short divisor); + + short remainder(short dividend, short divisor); + + long toLong(short i); + + int toInt(short i); + + short parseShort(CharSequence s, int beginIndex, int endIndex, int radix); + + String deparse(short i, int radix); + + default short parseShort(CharSequence s, int radix) + { + return parseShort(s, 0, s.length(), radix); + } + + default short parseShort(CharSequence s) + { + return parseShort(s, 0, s.length(), 10); + } + + default String deparse(short i) + { + return deparse(i, 10); + } + + /* + * Methods for byte + */ + + int compare(byte x, byte y); + + byte divide(byte dividend, byte divisor); + + byte remainder(byte dividend, byte divisor); + + long toLong(byte i); + + int toInt(byte i); + + short toShort(byte i); + + byte parseByte(CharSequence s, int beginIndex, int endIndex, int radix); + + String deparse(byte i, int radix); + + default byte parseByte(CharSequence s, int radix) + { + return parseByte(s, 0, s.length(), radix); + } + + default byte parseByte(CharSequence s) + { + return parseByte(s, 0, s.length(), 10); + } + + default String deparse(byte i) + { + return deparse(i, 10); + } + + /** + * Mixin with default signed implementations of the interface methods. + */ + interface Signed extends TwosComplement + { + @Override + default boolean unsigned() + { + return false; + } + + /* + * Methods for long + */ + + @Override + default int compare(long x, long y) + { + return Long.compare(x, y); + } + + @Override + default long divide(long dividend, long divisor) + { + return dividend / divisor; + } + + @Override + default long remainder(long dividend, long divisor) + { + return dividend % divisor; + } + + @Override + default long parseLong( + CharSequence s, int beginIndex, int endIndex, int radix) + { + return Long.parseLong(s, beginIndex, endIndex, radix); + } + + @Override + default String deparse(long i, int radix) + { + return Long.toString(i, radix); + } + + /* + * Methods for int + */ + + @Override + default int compare(int x, int y) + { + return Integer.compare(x, y); + } + + @Override + default int divide(int dividend, int divisor) + { + return dividend / divisor; + } + + @Override + default int remainder(int dividend, int divisor) + { + return dividend % divisor; + } + + @Override + default long toLong(int i) + { + return i; + } + + @Override + default int parseInt( + CharSequence s, int beginIndex, int endIndex, int radix) + { + return Integer.parseInt(s, beginIndex, endIndex, radix); + } + + @Override + default String deparse(int i, int radix) + { + return Integer.toString(i, radix); + } + + /* + * Methods for short + */ + + @Override + default int compare(short x, short y) + { + return Short.compare(x, y); + } + + @Override + default short divide(short dividend, short divisor) + { + return (short)(dividend / divisor); + } + + @Override + default short remainder(short dividend, short divisor) + { + return (short)(dividend % divisor); + } + + @Override + default long toLong(short i) + { + return i; + } + + @Override + default int toInt(short i) + { + return i; + } + + @Override + default short parseShort( + CharSequence s, int beginIndex, int endIndex, int radix) + { + int i = Integer.parseInt(s, beginIndex, endIndex, radix); + if ( Short.MIN_VALUE <= i && i <= Short.MAX_VALUE ) + return (short)i; + throw new NumberFormatException(String.format( + "Value out of range. Value:\"%s\" Radix:%d", + s.subSequence(beginIndex, endIndex), radix)); + } + + @Override + default String deparse(short i, int radix) + { + return Integer.toString(i, radix); + } + + /* + * Methods for byte + */ + + @Override + default int compare(byte x, byte y) + { + return Byte.compare(x, y); + } + + @Override + default byte divide(byte dividend, byte divisor) + { + return (byte)(dividend / divisor); + } + + @Override + default byte remainder(byte dividend, byte divisor) + { + return (byte)(dividend % divisor); + } + + @Override + default long toLong(byte i) + { + return i; + } + + @Override + default int toInt(byte i) + { + return i; + } + + @Override + default short toShort(byte i) + { + return i; + } + + @Override + default byte parseByte( + CharSequence s, int beginIndex, int endIndex, int radix) + { + int i = Integer.parseInt(s, beginIndex, endIndex, radix); + if ( Byte.MIN_VALUE <= i && i <= Byte.MAX_VALUE ) + return (byte)i; + throw new NumberFormatException(String.format( + "Value out of range. Value:\"%s\" Radix:%d", + s.subSequence(beginIndex, endIndex), radix)); + } + + @Override + default String deparse(byte i, int radix) + { + return Integer.toString(i, radix); + } + } + + /** + * Mixin with default unsigned implementations of the interface methods. + */ + interface Unsigned extends TwosComplement + { + @Override + default boolean unsigned() + { + return true; + } + + /* + * Methods for long + */ + + @Override + default int compare(long x, long y) + { + return Long.compareUnsigned(x, y); + } + + @Override + default long divide(long dividend, long divisor) + { + return Long.divideUnsigned(dividend, divisor); + } + + @Override + default long remainder(long dividend, long divisor) + { + return Long.remainderUnsigned(dividend, divisor); + } + + @Override + default long parseLong( + CharSequence s, int beginIndex, int endIndex, int radix) + { + return Long.parseUnsignedLong(s, beginIndex, endIndex, radix); + } + + @Override + default String deparse(long i, int radix) + { + return Long.toUnsignedString(i, radix); + } + + /* + * Methods for int + */ + + @Override + default int compare(int x, int y) + { + return Integer.compareUnsigned(x, y); + } + + @Override + default int divide(int dividend, int divisor) + { + return Integer.divideUnsigned(dividend, divisor); + } + + @Override + default int remainder(int dividend, int divisor) + { + return Integer.remainderUnsigned(dividend, divisor); + } + + @Override + default long toLong(int i) + { + return Integer.toUnsignedLong(i); + } + + @Override + default int parseInt( + CharSequence s, int beginIndex, int endIndex, int radix) + { + return Integer.parseUnsignedInt(s, beginIndex, endIndex, radix); + } + + @Override + default String deparse(int i, int radix) + { + return Integer.toUnsignedString(i, radix); + } + + /* + * Methods for short + */ + + @Override + default int compare(short x, short y) + { + return Short.compareUnsigned(x, y); + } + + @Override + default short divide(short dividend, short divisor) + { + return (short) + Integer.divideUnsigned(toInt(dividend), toInt(divisor)); + } + + @Override + default short remainder(short dividend, short divisor) + { + return (short) + Integer.remainderUnsigned(toInt(dividend), toInt(divisor)); + } + + @Override + default long toLong(short i) + { + return Short.toUnsignedLong(i); + } + + @Override + default int toInt(short i) + { + return Short.toUnsignedInt(i); + } + + @Override + default short parseShort( + CharSequence s, int beginIndex, int endIndex, int radix) + { + int i = + Integer.parseUnsignedInt(s, beginIndex, endIndex, radix); + if ( 0 <= i && i <= 0xffff ) + return (short)i; + throw new NumberFormatException(String.format( + "Value out of range. Value:\"%s\" Radix:%d", + s.subSequence(beginIndex, endIndex), radix)); + } + + @Override + default String deparse(short i, int radix) + { + return Integer.toUnsignedString(toInt(i), radix); + } + + /* + * Methods for byte + */ + + @Override + default int compare(byte x, byte y) + { + return Byte.compareUnsigned(x, y); + } + + @Override + default byte divide(byte dividend, byte divisor) + { + return (byte) + Integer.divideUnsigned(toInt(dividend), toInt(divisor)); + } + + @Override + default byte remainder(byte dividend, byte divisor) + { + return (byte) + Integer.remainderUnsigned(toInt(dividend), toInt(divisor)); + } + + @Override + default long toLong(byte i) + { + return Byte.toUnsignedLong(i); + } + + @Override + default int toInt(byte i) + { + return Byte.toUnsignedInt(i); + } + + @Override + default short toShort(byte i) + { + return (short)Byte.toUnsignedInt(i); + } + + @Override + default byte parseByte( + CharSequence s, int beginIndex, int endIndex, int radix) + { + int i = + Integer.parseUnsignedInt(s, beginIndex, endIndex, radix); + if ( 0 <= i && i <= 0xff ) + return (byte)i; + throw new NumberFormatException(String.format( + "Value out of range. Value:\"%s\" Radix:%d", + s.subSequence(beginIndex, endIndex), radix)); + } + + @Override + default String deparse(byte i, int radix) + { + return Integer.toUnsignedString(toInt(i), radix); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Verifier.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Verifier.java new file mode 100644 index 000000000..7ee581a4a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Verifier.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +import java.io.InputStream; + +import java.nio.ByteBuffer; + +/** + * A {@code Verifier} verifies the proper form of content written to a + * {@code Datum}. + *

+ * This is necessary only when the correctness of the written stream may be + * doubtful, as when an API spec requires exposing a method for client code + * to write arbitrary bytes. If a type implementation exposes only + * type-appropriate operations to client code, and always controls the byte + * stream written to the varlena, the {@code NOOP} verifier can be used. + *

+ * There are no methods accepting an unextended {@code Verifier}, only those + * accepting one of its contained functional interfaces + * {@link OfBuffer OfBuffer} and {@link OfStream OfStream}. + *

+ * A type-specific verifier must supply a {@code verify} method that reads all + * of the content and completes normally if it is a complete and well-formed + * representation of the type. Otherwise, it must throw an exception. + *

+ * An {@code OfBuffer} verifier must leave the buffer's position equal to the + * value of the buffer's limit when the verifier was entered. An + * {@code OfStream} verifier must leave the stream at end of input. An + * {@code OfStream} verifier may assume that the supplied {@code InputStream} + * supports {@code mark} and {@code reset} efficiently. + *

+ * An {@code OfStream} verifier may execute in another thread concurrently with + * the writing of the content by the adapter. + * Its {@code verify} method must not interact with PostgreSQL. + */ +public interface Verifier +{ + /** + * A verifier interface to be used when the {@code ByteBuffer} API provides + * the most natural interface for manipulating the content. + *

+ * Such a verifier will be run only when the content has been completely + * produced. + */ + @FunctionalInterface + interface OfBuffer extends Verifier + { + /** + * Completes normally if the verification succeeds, otherwise throwing + * an exception. + *

+ * The buffer's {@code position} when this method returns must equal the + * value of the buffer's {@code limit} when the method was called. + */ + void verify(ByteBuffer b) throws Exception; + } + + /** + * A verifier interface to be used when the {@code InputStream} API provides + * the most natural interface for manipulating the content. + *

+ * Such a verifier may be run concurrently in another thread while the + * data type adapter is writing the content. It must therefore be able to + * verify the content without interacting with PostgreSQL. + */ + @FunctionalInterface + interface OfStream extends Verifier + { + /** + * Completes normally if the verification succeeds, otherwise throwing + * an exception. + *

+ * The method must leave the stream at end-of-input. It may assume that + * the stream supports {@code mark} and {@code reset} efficiently. + * It must avoid interacting with PostgreSQL, in case it is run in + * another thread concurrently with the production of the content. + */ + void verify(InputStream s) throws Exception; + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/package-info.java new file mode 100644 index 000000000..de2fe153f --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/package-info.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Types that will be of interest in the implementation of {@code Adapter}s. + *

+ * First-class PL/Java support for a new PostgreSQL data type entails + * implementation of an {@link Adapter Adapter}. Unlike non-{@code Adapter} + * code, an {@code Adapter} implementation may have to concern itself with + * the facilities in this package, {@code Datum} in particular. An + * {@code Adapter} should avoid leaking a {@code Datum} to non-{@code Adapter} + * code. + *

Adapter manager

+ *

+ * There needs to be an {@code Adapter}-manager service to accept application + * requests to connect x PostgreSQL type with y Java type + * and find or compose available {@code Adapter}s (built-in or by service + * loader) to do so. There is some work in that direction (the methods in + * {@link AbstractType AbstractType} should be helpful), but no such manager + * yet. + * @author Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +import org.postgresql.pljava.Adapter; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java index d5b6aa4b6..d67ebd202 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java @@ -122,6 +122,8 @@ import org.postgresql.pljava.annotation.BaseUDT; import org.postgresql.pljava.annotation.MappedUDT; +import org.postgresql.pljava.model.CatalogObject; + import org.postgresql.pljava.sqlgen.Lexicals; import org.postgresql.pljava.sqlgen.Lexicals.Identifier; @@ -5477,6 +5479,8 @@ class TypeMapper this.addMap(OffsetTime.class, "pg_catalog", "timetz"); this.addMap(LocalDateTime.class, "pg_catalog", "timestamp"); this.addMap(OffsetDateTime.class, "pg_catalog", "timestamptz"); + + this.addMap(CatalogObject.class, "pg_catalog", "oid"); } private boolean mappingsFrozen() diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/AccessMethod.java b/pljava-api/src/main/java/org/postgresql/pljava/model/AccessMethod.java new file mode 100644 index 000000000..7620b6632 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/AccessMethod.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a relation access method. + */ +public interface AccessMethod +extends + Addressed, Named +{ + RegClass.Known CLASSID = + formClassId(AccessMethodRelationId, AccessMethod.class); + + enum Type { TABLE, INDEX } + + interface AMHandler extends Why { } + + RegProcedure handler(); + + Type type(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Attribute.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Attribute.java new file mode 100644 index 000000000..3f9d6040a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Attribute.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.annotation.BaseUDT.Alignment; +import org.postgresql.pljava.annotation.BaseUDT.Storage; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * An attribute (column), either of a known relation, or of a transient record + * type. + *

+ * Instances of the transient kind may be retrieved from a + * {@link TupleDescriptor TupleDescriptor} and will compare unequal to other + * {@code Attribute} instances even with the same {@code classId}, + * {@code subId}, and {@code oid} (which will be {@code InvalidOid}); for such + * instances, {@link #containingTupleDescriptor() containingTupleDescriptor} + * will return the specific transient {@code TupleDescriptor} to which + * the attribute belongs. Such 'virtual' instances will appear to have + * the invalid {@code RegClass} as {@code relation()}, and all access granted + * to {@code public}. + */ +public interface Attribute +extends + Addressed, Component, Named, + AccessControlled +{ + /** + * CLASS rather than CLASSID because Attribute isn't an object class + * in its own right. + *

+ * This simply identifies the table in the catalog that holds attribute + * definitions. An Attribute is not regarded as an object of that 'class'; + * it is a subId of whatever other RegClass object it defines an attribute + * of. + */ + RegClass CLASS = formObjectId(RegClass.CLASSID, AttributeRelationId); + + enum Identity { INAPPLICABLE, GENERATED_ALWAYS, GENERATED_BY_DEFAULT } + + enum Generated { INAPPLICABLE, STORED } + + RegClass relation(); + RegType type(); + short length(); + int dimensions(); + boolean byValue(); + Alignment alignment(); + Storage storage(); + boolean notNull(); + boolean hasDefault(); + boolean hasMissing(); + Identity identity(); + Generated generated(); + boolean dropped(); + boolean local(); + int inheritanceCount(); + RegCollation collation(); + Map options(); + Map fdwoptions(); + // missingValue + + /** + * Returns the tuple descriptor to which this attribute belongs. + *

+ * For a 'cataloged' attribute corresponding to a known relation + * or row type, returns a {@code TupleDescriptor} for that. For a 'virtual' + * attribute obtained from some non-cataloged tuple descriptor, returns + * whatever {@code TupleDescriptor} it came from. + */ + TupleDescriptor containingTupleDescriptor(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/CatalogObject.java b/pljava-api/src/main/java/org/postgresql/pljava/model/CatalogObject.java new file mode 100644 index 000000000..bdcb65f00 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/CatalogObject.java @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.List; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; + +import java.util.function.IntPredicate; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * Base interface representing some object in the PostgreSQL catalogs, + * identified by its {@link #oid() oid}. + *

+ * The {@link #oid() oid} by itself does not constitute an object address until + * combined with a {@code classId} identifying the catalog to which it belongs. + * This topmost interface, therefore, represents a catalog object when only + * the {@code oid} is known, and the {@code classId} is: unknown, or simply + * understood from context. An instance of this interface can be explicitly + * combined with a {@code classId}, using the {@link #of of(classId)} method, + * which will yield an instance of an interface that extends {@link Addressed} + * and is specific to catalog objects of that class. + *

+ * A {@code classId}, in turn, is simply an instance of + * {@link RegClass RegClass} (the catalog of relations, whose name "class" + * reflects PostgreSQL's object-relational origins). It identifies the specific + * relation in the PostgreSQL catalogs where objects with that {@code classId} + * can be looked up. + *

+ * Every user relation, of course, is also represented by a {@code RegClass} + * instance, but not one that can be used to form a catalog object address. + * For that matter, not every class in the PostgreSQL catalogs is modeled by + * a class in PL/Java. Therefore, not just any {@code RegClass} instance can be + * passed to {@link #of of(classId)} as a {@code classId}. Those that can be + * have the more-specific type {@code RegClass.Known}, which also identifies + * the Java model class T that will be returned. + */ +public interface CatalogObject +{ + /** + * The distinct integer value that {@link #oid oid()} will return when + * {@link #isValid isValid()} is false. + *

+ * PostgreSQL catalogs typically use this value (rather than a nullable + * column and a null value) in cases where an object may or may not be + * specified and has not been. + */ + int InvalidOid = 0; + + /** + * This catalog object's object ID; the integer value that identifies the + * object to PostgreSQL when the containing catalog is known. + */ + int oid(); + + /** + * Whether this catalog object has a valid {@code oid} + * (any value other than {@code InvalidOid}). + *

+ * This is not the same as whether any corresponding catalog object actually + * exists. This question can be answered directly from the value of + * {@code oid()}. The existence question (which can be asked sensibly only + * of an {@link Addressed Addressed} instance with its + * {@link Addressed#exists exists()} method} can be answered only through + * a lookup attempt for the {@code oid} in the corresponding catalog. + *

+ * There is not a unique singleton invalid catalog object instance. Rather, + * there can be distinct {@link Addressed Addressed} instances that have + * the invalid {@code oid} and distinct {@code classId}s, as well as one + * singleton {@code CatalogObject} that has the invalid {@code oid} and + * no valid {@code classId}. + *

+ * When applied to a {@link RegRole.Grantee RegRole.Grantee}, this method + * simply returns the negation of {@link RegRole.Grantee#isPublic isPublic}, + * which is the method that should be preferred for clarity in that case. + */ + boolean isValid(); + + /** + * Return a catalog object as an {@code Addressed} instance in a known + * class. + *

+ * For example, if a {@code CatalogObject o} is read from an {@code oid} + * column known to represent a namespace, {@code o.of(RegNamespace.CLASSID)} + * will return a {@code RegNamespace} instance. + *

+ * An instance whose class id is already the desired one will return itself. + * On an instance that lacks a valid class id, {@code of} can apply any + * desired class id (a different instance will be returned). The invalid + * instance of any class can be converted to the (distinct) invalid instance + * of any other class. On an instance that is valid and already has a valid + * class id, {@code of} will throw an exception if the desired class id + * differs. + * @param classId A known class id, often from the CLASSID field of a known + * CatalogObject subclass. + * @param Specific subtype of Addressed that represents catalog objects + * with the given class id. + * @return An instance with this instance's oid and the desired class id + * (this instance, if the class id matches). + */ + > T of(RegClass.Known classId); + + /** + * A catalog object that has both {@code oid} and {@code classId} specified, + * and can be looked up in the PostgreSQL catalogs (where it may, or may + * not, be found). + * @param Specific subtype of Addressed that represents catalog objects + * with the given class id. + */ + interface Addressed> extends CatalogObject + { + /** + * Returns the {@code classId} (which is an instance of + * {@link RegClass.Known RegClass.Known} of this addressed catalog + * object. + */ + RegClass.Known classId(); + + /** + * Whether a catalog object with this address in fact exists in + * the PostgreSQL catalogs. + *

+ * Unlike {@link #isValid isValid()}, which depends only on the value + * of {@code oid()}, this reflects the result of a catalog lookup. + */ + boolean exists(); + + /** + * Whether this catalog object is shared across all databases in the + * cluster. + *

+ * Contrast {@link RegClass#isShared() isShared()}, a method found only + * on {@code RegClass}, which indicates whether that {@code RegClass} + * instance represents a shared relation. Catalog objects formed with + * that {@code RegClass} instance as their {@code classId} will have + * {@code shared() == true}, though the {@code RegClass} instance itself + * will have {@code shared() == false} (because it models a row in + * {@code pg_class} itself, a catalog that isn't shared). + * @return classId().isShared() + */ + default boolean shared() + { + return classId().isShared(); + } + } + + /** + * Interface for an object that is regarded as a component of some, other, + * addressed catalog object, and is identified by that other object's + * {@code classId} and {@code oid} along with an integer {@code subId}. + *

+ * The chief (only?) example is an {@link Attribute Attribute}, which is + * identified by the {@code classId} and {@code oid} of its containing + * relation, plus a {@code subId}. + */ + interface Component + { + int subId(); + } + + /** + * Interface for any catalog object that has a name, which can be + * an {@link Identifier.Simple Identifier.Simple} or an + * {@link Identifier.Operator Identifier.Operator}. + */ + interface Named> + { + T name(); + } + + /** + * Interface for any catalog object that has a name and also a namespace + * or schema (an associated instance of {@link RegNamespace RegNamespace}). + */ + interface Namespaced> + extends Named + { + RegNamespace namespace(); + + default Identifier.Qualified qualifiedName() + { + return name().withQualifier(namespaceName()); + } + + default Identifier.Simple namespaceName() + { + return namespace().name(); + } + } + + /** + * Interface for any catalog object that has an owner (an associated + * instance of {@link RegRole RegRole}. + */ + interface Owned + { + RegRole owner(); + } + + /** + * Interface for any catalog object with an access control list + * (a list of some type of {@code Grant}). + * @param The subtype of {@link Grant Grant} that applies to catalog + * objects of this type. + */ + interface AccessControlled + { + /** + * Simple list of direct grants. + *

+ * For any T except {@code Grant.OnRole}, simply returns the list of + * grants directly found in this catalog object's ACL. When T is + * {@code Grant.OnRole}, this catalog object is a {@code RegRole}, and + * the result contains a {@code Grant.OnRole} for every role R that is + * directly a member of the role this catalog object represents; each + * such grant has {@code maySetRole()} by definition, and + * {@code mayExercisePrivileges()} if and only if R has {@code inherit}. + */ + List grants(); + + /** + * Computed list of (possibly transitive) grants to grantee. + *

+ * For any T except {@code Grant.OnRole}, a list of grants to + * grantee assembled from: direct grants in this object's ACL + * to {@code PUBLIC}, or to grantee, or to any role R for which + * {@code R.grants(grantee).mayExercisePrivileges()} is true. + *

+ * When T is {@code Grant.OnRole}, this catalog object is a + * {@code RegRole}, and the result contains a {@code Grant.OnRole} for + * which {@code maySetRole()} is true if a membership path from + * grantee to this role exists, and + * {@code mayExercisePrivileges()} is true if such a path exists using + * only roles with {@code inherit()} true. (The {@code inherit()} status + * of this object itself is not considered.) + */ + List grants(RegRole grantee); // transitive closure when on RegRole + // aclitem[] acl(); + // { Oid grantee; Oid grantor; AclMode bits; } see nodes/parsenodes.h + } + + /** + * Interface representing any single {@code Grant} (or ACL item), a grant + * of some set of possible privileges, to some role, granted by some role. + */ + interface Grant + { + /** + * Role to which the accompanying privileges are granted. + *

+ * There is no actual role named {@code public}, but there is + * a distinguished instance {@link RegRole.Grantee#PUBLIC PUBLIC} of + * {@link RegRole.Grantee RegRole.Grantee}. + */ + RegRole.Grantee to(); + + /** + * Role responsible for granting these privileges. + */ + RegRole by(); + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on an attribute (or column). + */ + interface OnAttribute extends SELECT, INSERT, UPDATE, REFERENCES { } + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on a class (or relation, table, view). + */ + interface OnClass + extends OnAttribute, DELETE, TRUNCATE, TRIGGER, MAINTAIN { } + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on a database. + */ + interface OnDatabase extends CONNECT, CREATE, CREATE_TEMP { } + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on a namespace (or schema). + */ + interface OnNamespace extends CREATE, USAGE { } + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on a configuration setting. + */ + interface OnSetting extends SET, ALTER_SYSTEM { } + + /** + * Subtype of {@code Grant} representing the grants (of membership in, + * and/or privileges of, other roles) that may be made to a role. + */ + interface OnRole extends Grant + { + boolean mayExercisePrivileges(); + boolean maySetRole(); + boolean mayAdmin(); + } + } + + /** + * @hidden + */ + interface INSERT extends Grant + { + boolean insertGranted(); + boolean insertGrantable(); + } + + /** + * @hidden + */ + interface SELECT extends Grant + { + boolean selectGranted(); + boolean selectGrantable(); + } + + /** + * @hidden + */ + interface UPDATE extends Grant + { + boolean updateGranted(); + boolean updateGrantable(); + } + + /** + * @hidden + */ + interface DELETE extends Grant + { + boolean deleteGranted(); + boolean deleteGrantable(); + } + + /** + * @hidden + */ + interface TRUNCATE extends Grant + { + boolean truncateGranted(); + boolean truncateGrantable(); + } + + /** + * @hidden + */ + interface REFERENCES extends Grant + { + boolean referencesGranted(); + boolean referencesGrantable(); + } + + /** + * @hidden + */ + interface TRIGGER extends Grant + { + boolean triggerGranted(); + boolean triggerGrantable(); + } + + /** + * @hidden + */ + interface EXECUTE extends Grant + { + boolean executeGranted(); + boolean executeGrantable(); + } + + /** + * @hidden + */ + interface USAGE extends Grant + { + boolean usageGranted(); + boolean usageGrantable(); + } + + /** + * @hidden + */ + interface CREATE extends Grant + { + boolean createGranted(); + boolean createGrantable(); + } + + /** + * @hidden + */ + interface CREATE_TEMP extends Grant + { + boolean create_tempGranted(); + boolean create_tempGrantable(); + } + + /** + * @hidden + */ + interface CONNECT extends Grant + { + boolean connectGranted(); + boolean connectGrantable(); + } + + /** + * @hidden + */ + interface SET extends Grant + { + boolean setGranted(); + boolean setGrantable(); + } + + /** + * @hidden + */ + interface ALTER_SYSTEM extends Grant + { + boolean alterSystemGranted(); + boolean alterSystemGrantable(); + } + + /** + * @hidden + */ + interface MAINTAIN extends Grant + { + boolean maintainGranted(); + boolean maintainGrantable(); + } + + /** + * @hidden + */ + abstract class Factory + { + static final Factory INSTANCE; + + static + { + INSTANCE = ServiceLoader + .load(Factory.class.getModule().getLayer(), Factory.class) + .findFirst().orElseThrow(() -> new ServiceConfigurationError( + "could not load PL/Java CatalogObject.Factory")); + } + + static > + RegClass.Known formClassId(int classId, Class clazz) + { + return INSTANCE.formClassIdImpl(classId, clazz); + } + + static > + T formObjectId(RegClass.Known classId, int objId) + { + return INSTANCE.formObjectIdImpl(classId, objId, v -> true); + } + + static > + T formObjectId( + RegClass.Known classId, int objId, IntPredicate versionTest) + { + return INSTANCE.formObjectIdImpl(classId, objId, versionTest); + } + + static Database currentDatabase(RegClass.Known classId) + { + return INSTANCE.currentDatabaseImpl(classId); + } + + static RegRole.Grantee publicGrantee() + { + return INSTANCE.publicGranteeImpl(); + } + + protected abstract > + RegClass.Known formClassIdImpl( + int classId, Class clazz); + + protected abstract > + T formObjectIdImpl( + RegClass.Known classId, int objId, IntPredicate versionTest); + + protected abstract Database + currentDatabaseImpl(RegClass.Known classId); + + protected abstract RegRole.Grantee publicGranteeImpl(); + + protected abstract CharsetEncoding serverEncoding(); + protected abstract CharsetEncoding clientEncoding(); + protected abstract CharsetEncoding encodingFromOrdinal(int ordinal); + protected abstract CharsetEncoding encodingFromName(String name); + protected abstract long fetchAll(); + + /* + * These magic numbers are hardcoded here inside the pljava-api project + * so they can be used in static initializers in API interfaces. The + * verification that they are the right magic numbers takes place in + * compilation of the pljava and pljava-so projects, where they are + * included from here, exported in JNI .h files, and compared using + * StaticAssertStmt to the corresponding values from PostgreSQL headers. + * + * Within groups here, numerical order is as good as any. When adding a + * constant here, add a corresponding CONFIRMCONST in ModelConstants.c. + */ + protected static final int TableSpaceRelationId = 1213; + protected static final int TypeRelationId = 1247; + protected static final int AttributeRelationId = 1249; + protected static final int ProcedureRelationId = 1255; + protected static final int RelationRelationId = 1259; + protected static final int AuthIdRelationId = 1260; + protected static final int DatabaseRelationId = 1262; + protected static final int ForeignServerRelationId = 1417; + protected static final int ForeignDataWrapperRelationId = 2328; + protected static final int AccessMethodRelationId = 2601; + protected static final int ConstraintRelationId = 2606; + protected static final int LanguageRelationId = 2612; + protected static final int NamespaceRelationId = 2615; + protected static final int OperatorRelationId = 2617; + protected static final int TriggerRelationId = 2620; + protected static final int ExtensionRelationId = 3079; + protected static final int CollationRelationId = 3456; + protected static final int TransformRelationId = 3576; + protected static final int TSDictionaryRelationId = 3600; + protected static final int TSConfigRelationId = 3602; + + /* + * PG types good to have around because of corresponding JDBC types. + */ + protected static final int BOOLOID = 16; + protected static final int BYTEAOID = 17; + protected static final int CHAROID = 18; + protected static final int INT8OID = 20; + protected static final int INT2OID = 21; + protected static final int INT4OID = 23; + protected static final int XMLOID = 142; + protected static final int FLOAT4OID = 700; + protected static final int FLOAT8OID = 701; + protected static final int BPCHAROID = 1042; + protected static final int VARCHAROID = 1043; + protected static final int DATEOID = 1082; + protected static final int TIMEOID = 1083; + protected static final int TIMESTAMPOID = 1114; + protected static final int TIMESTAMPTZOID = 1184; + protected static final int TIMETZOID = 1266; + protected static final int BITOID = 1560; + protected static final int VARBITOID = 1562; + protected static final int NUMERICOID = 1700; + + /* + * PG types not mentioned in JDBC but bread-and-butter to PG devs. + */ + protected static final int TEXTOID = 25; + protected static final int UNKNOWNOID = 705; + protected static final int RECORDOID = 2249; + protected static final int CSTRINGOID = 2275; + protected static final int VOIDOID = 2278; + protected static final int TRIGGEROID = 2279; + + /* + * Of the several polymorphic types, API features this one because it + * can also be the resolved actual type of some system catalog columns. + */ + protected static final int ANYARRAYOID = 2277; + + /* + * PG types used in modeling PG types themselves. + */ + protected static final int NAMEOID = 19; + protected static final int REGPROCOID = 24; + protected static final int OIDOID = 26; + protected static final int PG_NODE_TREEOID = 194; + protected static final int ACLITEMOID = 1033; + protected static final int REGPROCEDUREOID = 2202; + protected static final int REGOPEROID = 2203; + protected static final int REGOPERATOROID = 2204; + protected static final int REGCLASSOID = 2205; + protected static final int REGTYPEOID = 2206; + protected static final int REGCONFIGOID = 3734; + protected static final int REGDICTIONARYOID = 3769; + protected static final int REGNAMESPACEOID = 4089; + protected static final int REGROLEOID = 4096; + protected static final int REGCOLLATIONOID = 4191; + + /* + * The well-known, pinned procedural languages. + */ + protected static final int INTERNALlanguageId = 12; + protected static final int ClanguageId = 13; + protected static final int SQLlanguageId = 14; + + /* + * The well-known, pinned namespaces. + */ + protected static final int PG_CATALOG_NAMESPACE = 11; + protected static final int PG_TOAST_NAMESPACE = 99; + + /* + * The well-known, pinned collations. + */ + protected static final int DEFAULT_COLLATION_OID = 100; + protected static final int C_COLLATION_OID = 950; + + /* + * These magic numbers are assigned here to allow the various well-known + * PostgreSQL ResourceOwners to be retrieved without a proliferation of + * methods on the factory interface. These are arbitrary array indices, + * visible also to JNI code through the generated headers just as + * described above. The native initialization method may create, + * for example, an array of ByteBuffers that window the corresponding + * PostgreSQL globals, ordered according to these indices. The Java code + * implementing resourceOwner() can be ignorant of these specific values + * and simply use them to index the array. HOWEVER, it does know that + * the first one, index 0, refers to the current resource owner. + */ + protected static final int RSO_Current = 0; // must be index 0 + protected static final int RSO_CurTransaction = 1; + protected static final int RSO_TopTransaction = 2; + protected static final int RSO_AuxProcess = 3; + + protected abstract ResourceOwner resourceOwner(int which); + + /* + * Same as above but for the well-known PostgreSQL MemoryContexts. + * Again, the implementing code knows index 0 is for the current one. + */ + protected static final int MCX_CurrentMemory = 0; // must be index 0 + protected static final int MCX_TopMemory = 1; + protected static final int MCX_Error = 2; + protected static final int MCX_Postmaster = 3; + protected static final int MCX_CacheMemory = 4; + protected static final int MCX_Message = 5; + protected static final int MCX_TopTransaction = 6; + protected static final int MCX_CurTransaction = 7; + protected static final int MCX_Portal = 8; + /* + * A long-lived, never-reset context created by PL/Java as a child of + * TopMemoryContext. + */ + protected static final int MCX_JavaMemory = 9; + + protected abstract MemoryContext memoryContext(int which); + + protected abstract MemoryContext upperMemoryContext(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/CharsetEncoding.java b/pljava-api/src/main/java/org/postgresql/pljava/model/CharsetEncoding.java new file mode 100644 index 000000000..6e5543893 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/CharsetEncoding.java @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; + +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; + +import java.nio.charset.CharacterCodingException; + +import java.sql.SQLException; + +import static org.postgresql.pljava.model.CatalogObject.Factory; + +import org.postgresql.pljava.adt.spi.Datum; + +/** + * Represents one of PostgreSQL's available character set encodings. + *

+ * Not all of the encodings that PostgreSQL supports for communication with + * the client are also supported for use in the backend and in storage. + * The {@link #usableOnServer usableOnServer} method identifies which ones + * are suitable as server encodings. + *

+ * The encoding that is in use for the current database cannot change during + * a session, and is found in the final {@link #SERVER_ENCODING SERVER_ENCODING} + * field. + *

+ * The encoding currently in use by the connected client may change during + * a session, and is returned by the {@link #clientEncoding clientEncoding} + * method. + *

+ * The {@link #charset charset} method returns the corresponding Java + * {@link Charset Charset} if that can be identified, and several convenience + * methods are provided to decode or encode values accordingly. + */ +public interface CharsetEncoding +{ + CharsetEncoding SERVER_ENCODING = Factory.INSTANCE.serverEncoding(); + + /** + * A distinguished {@code CharsetEncoding} representing uses such as + * {@code -1} in the {@code collencoding} column of {@code pg_collation}, + * indicating the collation is usable with any encoding. + *

+ * This is its only instance. + */ + Any ANY = new Any(); + + /** + * Returns the encoding currently selected by the connected client. + */ + static CharsetEncoding clientEncoding() + { + return Factory.INSTANCE.clientEncoding(); + } + + /** + * Returns the {@code CharsetEncoding} for the given PostgreSQL encoding + * number (as used in the {@code encoding} columns of some system catalogs). + * @throws IllegalArgumentException if the argument is not the ordinal of + * some known encoding + */ + static CharsetEncoding fromOrdinal(int ordinal) + { + return Factory.INSTANCE.encodingFromOrdinal(ordinal); + } + + /** + * Returns the {@code CharsetEncoding} for the given PostgreSQL encoding + * name. + * @throws IllegalArgumentException if the argument is not the name of + * some known encoding + */ + static CharsetEncoding fromName(String name) + { + return Factory.INSTANCE.encodingFromName(name); + } + + /** + * Returns the PostgreSQL encoding number (as used in the {@code encoding} + * columns of some system catalogs) for this encoding. + */ + int ordinal(); + + /** + * Returns the PostgreSQL name for this encoding. + *

+ * The PostgreSQL encoding names have a long history and may not match + * cleanly with more standardized names in modern libraries. + */ + String name(); + + /** + * Returns the name identifying this encoding in ICU (international + * components for Unicode), or null if its implementation in PostgreSQL + * does not define one. + *

+ * When present, the ICU name can be a better choice for matching encodings + * in other libraries. + */ + String icuName(); + + /** + * Indicates whether this encoding is usable as a server encoding. + */ + boolean usableOnServer(); + + /** + * Returns the corresponding Java {@link Charset Charset}, or null if none + * can be identified. + */ + Charset charset(); + + /** + * Returns a {@link CharsetDecoder CharsetDecoder}, configured to report + * all decoding errors (rather than silently substituting data), if + * {@link #charset charset()} would return a non-null value. + */ + default CharsetDecoder newDecoder() + { + return charset().newDecoder(); + } + + /** + * Returns a {@link CharsetEncoder CharsetEncoder}, configured to report + * all encoding errors (rather than silently substituting data), if + * {@link #charset charset()} would return a non-null value. + */ + default CharsetEncoder newEncoder() + { + return charset().newEncoder(); + } + + /** + * Decode bytes to characters, with exceptions reported. + *

+ * Unlike the corresponding convenience method on {@link Charset Charset}, + * this method will throw exceptions rather than silently substituting + * characters. This is a database system; it doesn't go changing your data + * without telling you. + *

+ * Other behaviors can be obtained by calling {@link #newDecoder newDecoder} + * and configuring it as desired. + */ + default CharBuffer decode(ByteBuffer bb) throws CharacterCodingException + { + return newDecoder().decode(bb); + } + + /** + * Encode characters to bytes, with exceptions reported. + *

+ * Unlike the corresponding convenience method on {@link Charset Charset}, + * this method will throw exceptions rather than silently substituting + * characters. This is a database system; it doesn't go changing your data + * without telling you. + *

+ * Other behaviors can be obtained by calling {@link #newEncoder newEncoder} + * and configuring it as desired. + */ + default ByteBuffer encode(CharBuffer cb) throws CharacterCodingException + { + return newEncoder().encode(cb); + } + + /** + * Encode characters to bytes, with exceptions reported. + *

+ * Unlike the corresponding convenience method on {@link Charset Charset}, + * this method will throw exceptions rather than silently substituting + * characters. This is a database system; it doesn't go changing your data + * without telling you. + *

+ * Other behaviors can be obtained by calling {@link #newEncoder newEncoder} + * and configuring it as desired. + */ + default ByteBuffer encode(String s) throws CharacterCodingException + { + return encode(CharBuffer.wrap(s)); + } + + /** + * Decode bytes to characters, with exceptions reported. + *

+ * The input {@link Datum Datum} is pinned around the decoding operation. + */ + default CharBuffer decode(Datum.Input in, boolean close) + throws SQLException, IOException + { + in.pin(); + try + { + return decode(in.buffer()); + } + finally + { + in.unpin(); + if ( close ) + in.close(); + } + } + + /** + * Return an {@link InputStreamReader InputStreamReader} that reports + * exceptions. + *

+ * Other behaviors can be obtained by calling {@link #newDecoder newDecoder} + * and configuring it as desired before constructing an + * {@code InputStreamReader}. + */ + default InputStreamReader reader(InputStream in) + { + return new InputStreamReader(in, newDecoder()); + } + + /** + * Return an {@link OutputStreamWriter OutputStreamWriter} that reports + * exceptions. + *

+ * Other behaviors can be obtained by calling {@link #newEncoder newEncoder} + * and configuring it as desired before constructing an + * {@code OutputStreamWriter}. + */ + default OutputStreamWriter writer(OutputStream out) + { + return new OutputStreamWriter(out, newEncoder()); + } + + /** + * A distinguished {@code CharsetEncoding} representing uses such as + * {@code -1} in the {@code collencoding} column of {@code pg_collation}, + * indicating the collation is usable with any encoding. + *

+ * This returns -1 from {@code ordinal()} and {@code null} or {@code false} + * from the other non-default methods according to their types. The only + * instance of this class is {@code CharsetEncoding.ANY}. + */ + class Any implements CharsetEncoding + { + private Any() + { + } + + @Override + public int ordinal() + { + return -1; + } + + @Override + public String name() + { + return null; + } + + @Override + public String icuName() + { + return null; + } + + @Override + public boolean usableOnServer() + { + return false; + } + + @Override + public Charset charset() + { + return null; + } + + @Override + public String toString() + { + return "CharsetEncoding.ANY"; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Constraint.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Constraint.java new file mode 100644 index 000000000..20705e1d9 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Constraint.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import org.postgresql.pljava.TargetList.Projection; + +import java.sql.SQLXML; +import java.util.List; + +/** + * Model of the PostgreSQL {@code pg_constraint} system catalog. + */ +public interface Constraint +extends Addressed, Namespaced +{ + RegClass.Known CLASSID = + formClassId(ConstraintRelationId, Constraint.class); + + enum Type + { + CHECK, FOREIGN_KEY, NOT_NULL, PRIMARY_KEY, UNIQUE, CONSTRAINT_TRIGGER, + EXCLUSION + } + + enum ReferentialAction { NONE, RESTRICT, CASCADE, SET_NULL, SET_DEFAULT } + + enum MatchType { FULL, PARTIAL, SIMPLE } + + Type type(); + boolean deferrable(); + boolean deferred(); + boolean validated(); + RegClass onTable(); + RegType onDomain(); + RegClass index(); + Constraint parent(); + RegClass referencedTable(); + /** + * The action specified for update of a referenced column; null if not + * a foreign key constraint. + */ + ReferentialAction updateAction(); + /** + * The action specified for delete of a referenced row; null if not + * a foreign key constraint. + */ + ReferentialAction deleteAction(); + /** + * How foreign-key columns are to be matched; null if not + * a foreign key constraint. + */ + MatchType matchType(); + boolean isLocal(); + short inheritCount(); + boolean noInherit(); + Projection key(); + Projection fkey(); + List pfEqOp(); + List ppEqOp(); + List ffEqOp(); + /** + * Which columns are to be set null in a referential action; all referencing + * columns if this value is null. + *

+ * Returns null always on PostgreSQL versions earlier than 15. + */ + Projection fdelSetColumns(); + List exclOp(); + SQLXML bin(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Database.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Database.java new file mode 100644 index 000000000..8e11e4d42 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Database.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a database defined within the PostgreSQL cluster. + */ +public interface Database +extends + Addressed, Named, Owned, + AccessControlled +{ + RegClass.Known CLASSID = + formClassId(DatabaseRelationId, Database.class); + + Database CURRENT = currentDatabase(CLASSID); + + CharsetEncoding encoding(); + + /** + * A string identifying the collation rules for use in this database (when + * not overridden for a specific column or expression). + *

+ * At least through PostgreSQL 14, this is always the identifier of an + * operating system ("libc") collation, even in builds with ICU available. + */ + String collate(); + + /** + * A string identifying the collation rules for use in this database (when + * not overridden for a specific column or expression). + *

+ * At least through PostgreSQL 14, this is always the identifier of an + * operating system ("libc") collation, even in builds with ICU available. + */ + String ctype(); + + boolean template(); + boolean allowConnection(); + int connectionLimit(); + // oid lastsysoid + // xid frozenxid + // xid minmxid + Tablespace tablespace(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Extension.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Extension.java new file mode 100644 index 000000000..0bf782037 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Extension.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.List; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a PostgreSQL extension that has been installed for the current + * database. + */ +public interface Extension +extends Addressed, Named, Owned +{ + RegClass.Known CLASSID = + formClassId(ExtensionRelationId, Extension.class); + + /** + * Namespace in which most (or all, for a relocatable extension) of the + * namespace-qualified objects belonging to the extension are installed. + *

+ * Not a namespace qualifying the extension's name; extensions are not + * namespace-qualified. + */ + RegNamespace namespace(); + boolean relocatable(); + String version(); + List config(); + List condition(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignDataWrapper.java b/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignDataWrapper.java new file mode 100644 index 000000000..0d5c984e2 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignDataWrapper.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a foreign data wrapper that can provide the implementation for + * one or more {@link ForeignServer} declarations. + */ +public interface ForeignDataWrapper +extends + Addressed, Named, + Owned, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(ForeignDataWrapperRelationId, ForeignDataWrapper.class); + + interface FDWHandler extends Why { } + interface FDWValidator extends Why { } + + RegProcedure handler(); + + RegProcedure validator(); + + Map options(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignServer.java b/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignServer.java new file mode 100644 index 000000000..4bf8d45ef --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignServer.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a foreign server, with which foreign tables can be declared. + */ +public interface ForeignServer +extends + Addressed, Named, + Owned, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(ForeignServerRelationId, ForeignServer.class); + + ForeignDataWrapper fdw(); + + String type(); + + String version(); + + Map options(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/MemoryContext.java b/pljava-api/src/main/java/org/postgresql/pljava/model/MemoryContext.java new file mode 100644 index 000000000..917012e4c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/MemoryContext.java @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.Lifespan; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +/** + * A PostgreSQL {@code MemoryContext}, which is usable as a PL/Java + * {@link Lifespan Lifespan} to scope the lifetimes of PL/Java objects + * (as when they depend on native memory allocated in the underlying context). + *

+ * The {@code MemoryContext} API in PostgreSQL is described here. + *

+ * Static getters for the globally known contexts are spelled and capitalized + * as they are in PostgreSQL. + */ +public interface MemoryContext extends Lifespan +{ + /** + * The top level of the context tree, of which every other context is + * a descendant. + *

+ * Used as described here. + */ + MemoryContext TopMemoryContext = + INSTANCE.memoryContext(MCX_TopMemory); + + /** + * The "current" memory context, which supplies all allocations made by + * PostgreSQL {@code palloc} and related functions that do not explicitly + * specify a context. + *

+ * Used as described here. + */ + static MemoryContext CurrentMemoryContext() + { + return INSTANCE.memoryContext(MCX_CurrentMemory); + } + + /** + * Getter method equivalent to the final + * {@link #TopMemoryContext TopMemoryContext} field, for consistency with + * the other static getters. + */ + static MemoryContext TopMemoryContext() + { + return TopMemoryContext; + } + + /** + * Holds everything that lives until end of the top-level transaction. + *

+ * Can be appropriate when a specification, for example JDBC, provides that + * an object should remain valid for the life of the transaction. + *

+ * Uses are described here. + */ + static MemoryContext TopTransactionContext() + { + return INSTANCE.memoryContext(MCX_TopTransaction); + } + + /** + * The same as {@link #TopTransactionContext() TopTransactionContext} when + * in a top-level transaction, but different in subtransactions (such as + * those associated with PL/Java savepoints). + *

+ * Used as described here. + */ + static MemoryContext CurTransactionContext() + { + return INSTANCE.memoryContext(MCX_CurTransaction); + } + + /** + * Context of the currently active execution portal. + *

+ * Used as described here. + */ + static MemoryContext PortalContext() + { + return INSTANCE.memoryContext(MCX_Portal); + } + + /** + * A permanent context switched into for error recovery processing. + *

+ * Used as described here. + */ + static MemoryContext ErrorContext() + { + return INSTANCE.memoryContext(MCX_Error); + } + + /** + * A long-lived, never-reset context created by PL/Java as a child of + * {@code TopMemoryContext}. + *

+ * Perhaps useful for PL/Java-related allocations that will be long-lived, + * or managed only from the Java side, as a way of accounting for them + * separately, as opposed to just putting them in {@code TopMemoryContext}. + * It hasn't been used consistently even in the historical PL/Java + * code base, and should perhaps be a candidate for deprecation (or for + * a thorough code review to establish firmer guidelines for its use). + */ + static MemoryContext JavaMemoryContext() + { + return INSTANCE.memoryContext(MCX_JavaMemory); + } + + /** + * The "upper executor" memory context (that is, the context on entry, prior + * to any use of SPI) associated with the current (innermost) PL/Java + * function invocation. + *

+ * This is "precisely the right context for a value returned" from a + * function that uses SPI, as described + * here. + */ + static MemoryContext UpperMemoryContext() + { + return INSTANCE.upperMemoryContext(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Portal.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Portal.java new file mode 100644 index 000000000..42babf51f --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Portal.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLException; + +import java.util.List; + +/** + * Models a PostgreSQL {@code Portal}, an object representing the ongoing + * execution of a query and capable of returning a {@link TupleDescriptor} for + * the result, and fetching tuples of the result, either all at once, or in + * smaller batches. + */ +public interface Portal extends AutoCloseable +{ + /** + * The direction modes that can be used with {@link #fetch fetch} + * and {@link #move move}. + */ + enum Direction { FORWARD, BACKWARD, ABSOLUTE, RELATIVE } + + /** + * A distinguished value for the count argument to + * {@link #fetch fetch} or {@link #move move}. + */ + long ALL = CatalogObject.Factory.INSTANCE.fetchAll(); + + @Override + void close(); // AutoCloseable without checked exceptions + + /** + * Returns the {@link TupleDescriptor} describing any tuples that may be + * fetched from this {@code Portal}. + */ + TupleDescriptor tupleDescriptor() throws SQLException; + + /** + * Fetches count more tuples (or {@link #ALL ALL} of them) in the + * specified direction. + * @return a notional List of the fetched tuples. Iterating through the list + * may return the same TupleTableSlot repeatedly, with each tuple in turn + * stored in the slot. + * @see "PostgreSQL documentation for SPI_scroll_cursor_fetch" + */ + List fetch(Direction dir, long count) + throws SQLException; + + /** + * Moves the {@code Portal}'s current position count rows (or + * {@link #ALL ALL} possible) in the specified direction. + * @return the number of rows by which the position actually moved + * @see "PostgreSQL documentation for SPI_scroll_cursor_move" + */ + long move(Direction dir, long count) + throws SQLException; +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/ProceduralLanguage.java b/pljava-api/src/main/java/org/postgresql/pljava/model/ProceduralLanguage.java new file mode 100644 index 000000000..9c3a29978 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/ProceduralLanguage.java @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Call; +import org.postgresql.pljava.model.RegProcedure.Lookup; +import org.postgresql.pljava.model.RegProcedure.Memo.How; +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; // javadoc +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import org.postgresql.pljava.PLJavaBasedLanguage; // javadoc +import org.postgresql.pljava.PLPrincipal; + +import org.postgresql.pljava.annotation.Function.Trust; + +import java.util.BitSet; +import java.util.List; + +/** + * Model of a PostgreSQL procedural language, including (for non-built-in + * languages, like PL/Java) the handler functions used in its implementation. + */ +public interface ProceduralLanguage +extends + Addressed, Named, Owned, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(LanguageRelationId, ProceduralLanguage.class); + + /** + * The well-known language "internal", for routines implemented within + * PostgreSQL itself. + */ + ProceduralLanguage INTERNAL = formObjectId(CLASSID, INTERNALlanguageId); + + /** + * The well-known language "c", for extension routines implemented using + * PostgreSQL's C language conventions. + */ + ProceduralLanguage C = formObjectId(CLASSID, ClanguageId); + + /** + * The well-known language "sql", for routines in that PostgreSQL + * built-in language. + */ + ProceduralLanguage SQL = formObjectId(CLASSID, SQLlanguageId); + + interface Handler extends Why { } + interface InlineHandler extends Why { } + interface Validator extends Why { } + + /** + * A {@link How How} memo attached to a {@link RegProcedure} that represents + * a PL/Java-based routine, retaining additional information useful to + * a PL/Java-based language implementation. + *

+ * A valid memo of this type will be maintained by PL/Java's dispatcher + * on {@code RegProcedure} instances that represent PL/Java-based routines. + * When passing such a {@code RegProcedure} to a language-handler method, + * the dispatcher also passes the memo. + */ + interface PLJavaBased extends How + { + /** + * A {@link TupleDescriptor} describing the expected parameters, based + * only on the routine declaration. + *

+ * The {@code TupleDescriptor} returned here depends only on the static + * catalog information for the {@link RegProcedure} carrying this memo. + * A language handler can use it to generate template code that can be + * cached with the target {@code RegProcedure}, independently of any one + * call site. + *

+ * {@link Identifier.None} may be encountered among the member names; + * parameters do not have to be named. + *

+ * Some reported types may have + * {@link RegType#needsResolution needsResolution} true, and require + * resolution to specific types using the expression context at + * a given call site. + *

+ * For a routine declared variadic, if the declared type of the variadic + * parameter is the wildcard {@code "any"} type, + * {@link Call#arguments arguments()}{@code .size()} at a call site can + * differ from {@code inputsTemplate().size()}, the variadic arguments + * delivered in "spread" form as distinct (and individually typed) + * arguments. Variadic arguments of any other declared type are always + * delivered in "collected" form as a PostgreSQL array of that type. + * A variadic {@code "any"} routine can also receive its arguments + * collected, when it has been called that way; therefore, there is an + * ambiguity when such a routine is called with a single array argument + * in the variadic position. A language handler must call + * {@link Lookup#inputsAreSpread Lookup.inputsAreSpread()} to determine + * the caller's intent in that case. + * @see #unresolvedInputs() + */ + TupleDescriptor inputsTemplate(); + + /** + * A {@code BitSet} indicating (by zero-based index into + * {@link #inputsTemplate inputsTemplate}) which of the input + * parameter types need resolution against actual supplied argument + * types at a call site. + *

+ * If the set is empty, such per-call-site resolution can be skipped. + * @return a cloned {@code BitSet} + */ + BitSet unresolvedInputs(); + + /** + * A {@link TupleDescriptor} describing the expected result, based + * only on the routine declaration. + *

+ * The {@code TupleDescriptor} returned here depends only on the static + * catalog information for the {@link RegProcedure} carrying this memo. + * A language handler can use it to generate template code that can be + * cached with the target {@code RegProcedure}, independently of any one + * call site. + *

+ * For a function whose return type (in SQL) is not composite (or + * a function with only one output parameter, which PostgreSQL treats + * the same way), this method returns a synthetic ephemeral descriptor + * with one attribute of the declared return type. This convention + * allows {@link TupleTableSlot} to be the uniform API for the data type + * conversions to and from PostgreSQL, regardless of how a routine + * is declared. + *

+ * This method returns null in two cases: if the target returns + * {@link RegType#VOID VOID} and no descriptor is needed, or if the + * target is a function whose call sites must supply a column definition + * list, so there is no template descriptor that can be cached with + * the routine proper. A descriptor can only be obtained later from + * {@link RegProcedure.Lookup#outputsDescriptor outputsDescriptor()} + * when a call site is at hand. + *

+ * Some reported types may have + * {@link RegType#needsResolution needsResolution} true, and require + * resolution to specific types using the expression context at + * a given call site. + *

+ * {@link Identifier.None} will be the name of the single attribute in + * the synthetic descriptor wrapping a scalar. Because PL/Java's + * function dispatcher will undo the wrapping to return a scalar + * to PostgreSQL, the name matters not. + * @see #unresolvedOutputs() + * @return a {@code TupleDescriptor}, null if the target returns + * {@code VOID}, or is a function and can only be called with + * a column definition list supplied at the call site. + */ + TupleDescriptor outputsTemplate(); + + /** + * A {@code BitSet} indicating (by zero-based index into + * {@link #outputsTemplate outputsTemplate}) which + * result types need resolution against actual supplied argument types + * at each call site. + *

+ * If the set is empty, such per-call-site resolution can be skipped. + * @return a cloned {@code BitSet}. In the two circumstances where + *{@link #outputsTemplate outputsTemplate} returns null, this method + * returns either null or an empty {@code BitSet}. It is null for the + * unspecified-record-returning case, where a column definition list + * must be consulted at each call site; it is an empty set for the + * {@code VOID}-returning case where no further resolution is needed + * (just as an empty {@code BitSet} here would normally indicate). + */ + BitSet unresolvedOutputs(); + + /** + * A list of {@link Transform} instances (null if none) indicating + * transforms to be applied on data types supplied to or supplied by + * this routine. + *

+ * When this method returns a non-null result, each {@code Transform} + * in the list has already been checked by the language implementation's + * {@link PLJavaBasedLanguage.UsingTransforms#essentialTransformChecks + * essentialTransformChecks} method. Any exceptions those checks might + * throw should have been thrown when the dispatcher invoked this method + * before dispatching to the language handler, so a language handler + * using this method need not normally expect to handle them. + */ + List transforms(); + } + + default Trust trust() + { + return principal().trust(); + } + + PLPrincipal principal(); + RegProcedure handler(); + RegProcedure inlineHandler(); + RegProcedure validator(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegClass.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegClass.java new file mode 100644 index 000000000..f5681b000 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegClass.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLXML; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of PostgreSQL relations/"classes"/tables. + *

+ * Instances of {@code RegClass} also serve as the "class ID" values for + * objects within the catalog (including for {@code RegClass} objects, which + * are no different from others in being defined by rows that appear in a + * catalog table; there is a row in {@code pg_class} for {@code pg_class}). + */ +public interface RegClass +extends + Addressed, Namespaced, Owned, + AccessControlled +{ + Known CLASSID = formClassId(RelationRelationId, RegClass.class); + + /** + * A more-specifically-typed subinterface of {@code RegClass}, used in the + * {@code CLASSID} static fields of interfaces in this package. + * @param identifies the specific CatalogObject.Addressed subinterface + * to result when this is applied as the {@code classId} to a bare + * {@code CatalogObject}. + */ + interface Known> extends RegClass + { + /** + * Returns the invalid CatalogObject with this class ID. + */ + default T invalid() + { + return formObjectId(this, InvalidOid); + } + } + + enum Persistence { PERMANENT, UNLOGGED, TEMPORARY } + + enum Kind + { + TABLE, INDEX, SEQUENCE, TOAST, VIEW, MATERIALIZED_VIEW, COMPOSITE_TYPE, + FOREIGN_TABLE, PARTITIONED_TABLE, PARTITIONED_INDEX + } + + enum ReplicaIdentity { DEFAULT, NOTHING, ALL, INDEX } + + /** + * The PostgreSQL type that is associated with this relation as its + * "row type". + *

+ * This is the type that will be found in a + * {@link TupleDescriptor TupleDescriptor} for this relation. + */ + RegType type(); + + /** + * Only for a relation that was created with {@code CREATE TABLE ... OF} + * type, this will be that type; the invalid {@code RegType} + * otherwise. + *

+ * Even though the tuple structure will match, this is not the same type + * returned by {@link #type() type()}; that will still be a type distinctly + * associated with this relation. + */ + RegType ofType(); + + AccessMethod accessMethod(); + + /* Of limited interest ... used in forming pathname of relation on disk, + * but in very fiddly ways and dependent on the access method. + * + int filenode(); + */ + + Tablespace tablespace(); + + /* Of limited interest ... estimates used by planner + * + int pages(); + float tuples(); + int allVisible(); + */ + + RegClass toastRelation(); + boolean hasIndex(); + + /** + * Whether this relation is shared across all databases in the cluster. + *

+ * Contrast {@link shared()}, which indicates, for any catalog object, + * whether that object is shared across the cluster. For any + * {@code RegClass} instance, {@code shared()} will be false (the + * {@code pg_class} catalog is not shared), but if the instance represents + * a shared class, {@code isShared()} will be true (and {@code shared()} + * will be true for any catalog object formed with that instance as its + * {@code classId}). + * @return whether the relation represented by this RegClass instance is + * shared across all databases in the cluster. + */ + boolean isShared(); + Persistence persistence(); + Kind kind(); + short nAttributes(); + short checks(); + boolean hasRules(); + boolean hasTriggers(); + boolean hasSubclass(); + boolean rowSecurity(); + boolean forceRowSecurity(); + boolean isPopulated(); + ReplicaIdentity replicaIdentity(); + boolean isPartition(); + // rewrite + // frozenxid + // minmxid + Map options(); + SQLXML partitionBound(); + + /** + * The {@link ForeignServer} if this is a foreign table, otherwise null. + */ + ForeignServer foreignServer(); + + /** + * Table options understood by the {@link #foreignServer foreign server} + * if this is a foreign table, otherwise null. + */ + Map foreignOptions(); + + TupleDescriptor.Interned tupleDescriptor(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegCollation.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegCollation.java new file mode 100644 index 000000000..b8658d84e --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegCollation.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a registered PostgreSQL collation, consisting of a provider and + * version, {@code collate} and {@code ctype} strings meaningful to that + * provider, and a {@code CharsetEncoding} (or {@code ANY} if the collation + * is usable with any encoding). + */ +public interface RegCollation +extends Addressed, Namespaced, Owned +{ + RegClass.Known CLASSID = + formClassId(CollationRelationId, RegCollation.class); + + RegCollation DEFAULT = formObjectId(CLASSID, DEFAULT_COLLATION_OID); + RegCollation C = formObjectId(CLASSID, C_COLLATION_OID); + + /* + * Static lc_messages/lc_monetary/lc_numeric/lc_time getters? They are not + * components of RegCollation, but simply GUCs. They don't have PGDLLIMPORT, + * so on Windows they'd have to be retrieved through the GUC machinery + * by name. At least they're strings anyway. + */ + + enum Provider { DEFAULT, LIBC, ICU } + + CharsetEncoding encoding(); + String collate(); + String ctype(); + + /** + * @since PG 10 + */ + Provider provider(); + + /** + * @since PG 10 + */ + String version(); + + /** + * @since PG 12 + */ + boolean deterministic(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegConfig.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegConfig.java new file mode 100644 index 000000000..ee923ff89 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegConfig.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * A PostgreSQL text search configuration. + *

+ * This interface is included in the model per the (arguably arbitrary) goal of + * covering all the catalog classes for which a {@code Reg...} type is provided + * in PostgreSQL. However, completing its implementation (to include a + * {@code parser()} method) would require also defining an interface to + * represent a text search parser. + */ +public interface RegConfig +extends Addressed, Namespaced, Owned +{ + RegClass.Known CLASSID = + formClassId(TSConfigRelationId, RegConfig.class); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegDictionary.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegDictionary.java new file mode 100644 index 000000000..3e70d8a98 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegDictionary.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * A PostgreSQL text search dictionary. + *

+ * This interface is included in the model per the (arguably arbitrary) goal of + * covering all the catalog classes for which a {@code Reg...} type is provided + * in PostgreSQL. However, completing its implementation (to include a + * {@code template()} method) would require also defining an interface to + * represent a text search template. + */ +public interface RegDictionary +extends Addressed, Namespaced, Owned +{ + RegClass.Known CLASSID = + formClassId(TSDictionaryRelationId, RegDictionary.class); + + /* + * dictinitoption is a text column, but it clearly (see CREATE TEXT SEARCH + * DICTIONARY and examples in the catalog) has an option = value , ... + * structure. An appropriate return type for a method could be a map, + * and the implementation would have to match the quoting/escaping/parsing + * rules used by PG. + */ +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegNamespace.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegNamespace.java new file mode 100644 index 000000000..8dc28cc40 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegNamespace.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a namespace (named schema) entry in the PostgreSQL catalogs. + */ +public interface RegNamespace +extends + Addressed, Named, Owned, + AccessControlled +{ + RegClass.Known CLASSID = + formClassId(NamespaceRelationId, RegNamespace.class); + + RegNamespace PG_CATALOG = formObjectId(CLASSID, PG_CATALOG_NAMESPACE); + RegNamespace PG_TOAST = formObjectId(CLASSID, PG_TOAST_NAMESPACE); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegOperator.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegOperator.java new file mode 100644 index 000000000..ef54acea8 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegOperator.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Operator; + +/** + * Model of a PostgreSQL operator as defined in the system catalogs, including + * its kind (infix or prefix), operand and result types, and a number of + * properties helpful in query planning. + */ +public interface RegOperator +extends Addressed, Namespaced, Owned +{ + RegClass.Known CLASSID = + formClassId(OperatorRelationId, RegOperator.class); + + enum Kind + { + /** + * An operator used between a left and a right operand. + */ + INFIX, + + /** + * An operator used to the left of a single right operand. + */ + PREFIX, + + /** + * An operator used to the right of a single left operand. + * @deprecated Postfix operators are deprecated since PG 13 and + * unsupported since PG 14. + */ + @Deprecated(since="PG 13") + POSTFIX + } + + interface Evaluator extends Why { } + interface RestrictionSelectivity extends Why { } + interface JoinSelectivity extends Why { } + + Kind kind(); + boolean canMerge(); + boolean canHash(); + RegType leftOperand(); + RegType rightOperand(); + RegType result(); + RegOperator commutator(); + RegOperator negator(); + RegProcedure evaluator(); + RegProcedure restrictionEstimator(); + RegProcedure joinEstimator(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegProcedure.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegProcedure.java new file mode 100644 index 000000000..744b94db9 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegProcedure.java @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.BitSet; +import java.util.List; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import org.postgresql.pljava.PLJavaBasedLanguage.SRFTemplate; +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.Function.Effects; +import org.postgresql.pljava.annotation.Function.OnNullInput; +import org.postgresql.pljava.annotation.Function.Parallel; +import org.postgresql.pljava.annotation.Function.Security; + +import org.postgresql.pljava.annotation.Trigger.Called; +import org.postgresql.pljava.annotation.Trigger.Event; +import org.postgresql.pljava.annotation.Trigger.Scope; + +/** + * Model of a PostgreSQL "routine" (which in late versions can include + * procedures and functions of various kinds) as defined in the system catalogs, + * including its parameter and result types and many other properties. + * @param distinguishes {@code RegProcedure} instances used for different + * known purposes, by specifying the type of a 'memo' that could be attached to + * the instance, perhaps with extra information helpful for the intended use. + * At present, such memo interfaces are nearly all empty, but still this + * parameter can serve a compile-time role to discourage mixing different + * procedures up. + */ +public interface RegProcedure> +extends + Addressed>, Namespaced, Owned, + AccessControlled +{ + RegClass.Known> CLASSID = + formClassId(ProcedureRelationId, (Class>)null); + + ProceduralLanguage language(); + + float cost(); + + float rows(); + + RegType variadicType(); + + /** + * A planner-support function that may transform call sites of + * this function. + *

+ * In PG 9.5 to 11, there was a similar, but less flexible, "transform" + * function that this method can return when running on those versions. + * @since PG 12 + */ + RegProcedure support(); + + /** + * The kind of procedure or function. + *

+ * Before PG 11, there were separate booleans to indicate an aggregate or + * window function, which this method can consult when running on earlier + * versions. + * @since PG 11 + */ + Kind kind(); + + Security security(); + + boolean leakproof(); + + OnNullInput onNullInput(); + + boolean returnsSet(); + + Effects effects(); + + Parallel parallel(); + + RegType returnType(); + + List argTypes(); + + List allArgTypes(); + + /** + * Modes corresponding 1-for-1 to the arguments in {@code allArgTypes}. + */ + List argModes(); + + /** + * Names corresponding 1-for-1 to the arguments in {@code allArgTypes}. + */ + List argNames(); + + /** + * A {@code pg_node_tree} representation of a list of n + * expression trees, corresponding to the last n input arguments + * (that is, the last n returned by {@code argTypes}). + */ + SQLXML argDefaults(); + + List transformTypes(); + + String src(); + + String bin(); + + /** + * A {@code pg_node_tree} representation of a pre-parsed SQL function body, + * used when it is given in SQL-standard notation rather than as a string + * literal, otherwise null. + * @since PG 14 + */ + SQLXML sqlBody(); + + /** + * This is surely a list of {@code guc=value} pairs and ought to have + * a more specific return type. + *

+ * XXX + */ + List config(); + + enum ArgMode { IN, OUT, INOUT, VARIADIC, TABLE }; + + enum Kind { FUNCTION, PROCEDURE, AGGREGATE, WINDOW }; + + /** + * Obtain memo attached to this {@code RegProcedure}, if any. + *

+ * A {@code RegProcedure} may have an implementation of {@link Memo Memo} + * attached, providing additional information on what sort of procedure + * it is and how to use it. Many catalog getters that return + * {@code RegProcedure} specialize the return type to indicate + * an expected subinterface of {@code Memo}. + *

+ * It may not be the case that a given {@code RegProcedure} has a valid + * {@code Memo} attached at all times. Documentation for a specific + * {@code Memo} subinterface should explain the circumstances when this + * method can be called to rely on a memo of that type. + */ + M memo(); + + /** + * Superinterface of two memo types a {@code RegProcedure} can carry, + * {@link Why Why} and {@link How How}. + *

+ * A {@code Why} memo pertains to the intended use of a + * {@code RegProcedure}, for example as a + * {@link RegType.TypeInput TypeInput} function or as a + * {@link PlannerSupport PlannerSupport} function. The {@code Why} memo, + * if present, can be retrieved by the {@link #memo() memo()} method, and + * the type parameter of {@code RegProcedure} reflects it, as a compile-time + * safeguard against mixing up {@code RegProcedure}s with different + * purposes. + *

+ * Orthogonally to {@code Why}, a {@code How} memo pertains to how the + * {@code RegProcedure} is implemented, such as + * {@link ProceduralLanguage.PLJavaBased PLJavaBased} if the + * {@code RegProcedure} is implemented in a language built atop PL/Java. + * The language of implementation is ideally independent of the intended + * use, so {@code RegProcedure} is not parameterized with a {@code How} + * type, and has no API method to retrieve an associated {@code How} memo. + * For PL/Java-based languages, PL/Java's dispatcher will pass the + * associated {@code How} memo to the language handler. + */ + interface Memo> + { + /** + * Superinterface of memos that pertain to the intended use of a + * {@link RegProcedure RegProcedure} (why it is used). + */ + interface Why> extends Memo { } + + /** + * Superinterface of memos that pertain to the internal implementation + * of a {@link RegProcedure RegProcedure} (how it is + * implemented). + */ + interface How> extends Memo { } + } + + interface PlannerSupport extends Memo.Why { } + + /** + * Counterpart to the PostgreSQL {@code FmgrInfo}. + */ + interface Lookup + { + /** + * The PostgreSQL function or procedure being called. + */ + RegProcedure target(); + + /* + * Most of the C members of FmgrInfo are just as easy here to look up + * on target. The API here will focus on exposing such higher-level + * queries as might be made in C with the functions in fmgr.h and + * funcapi.h. + */ + + /** + * A {@link TupleDescriptor} describing the incoming arguments, with any + * polymorphic types from the routine's declaration resolved to the + * actual types at this call site. + *

+ * If there are no polymorphic types among the routine's declared + * parameters, an unchanged {@code TupleDescriptor} cached with the + * routine may be returned. + *

+ * See {@link #inputsAreSpread inputsAreSpread} for one case where the + * {@code size()} of this {@code TupleDescriptor} can exceed the + * {@code size()} of a {@code TupleDescriptor} constructed from the + * routine's declaration. + *

+ * {@link RegType#ANYARRAY ANYARRAY}, normally seen only in templates + * as a polymorphic pseudotype, can appear in this result in rare cases, + * where an expression involves certain columns of statistics-related + * system catalogs. An argument with this resolved type represents an + * array, but one whose element type may differ from call to call. See + * {@link RegType#ANYARRAY ANYARRAY} for how such an array can be + * handled. + */ + TupleDescriptor inputsDescriptor() throws SQLException; + + /** + * A {@link TupleDescriptor} describing the expected result, with any + * polymorphic types from the routine's declaration resolved to the + * actual types at this call site. + *

+ * Returns null if the routine has a declared return type of + * {@link RegType#VOID VOID} and does not need to return anything. + *

+ * If there are no polymorphic types among the routine's declared + * outputs, an unchanged {@code TupleDescriptor} cached with the + * routine may be returned. + *

+ * When the routine is a function declared with a non-composite return + * type (or with a single {@code OUT} parameter, a case PostgreSQL + * treats the same way), this method returns a synthetic ephemeral + * {@code TupleDescriptor} with one unnamed attribute of that type. + *

+ * {@link RegType#ANYARRAY ANYARRAY}, normally seen only in templates + * as a polymorphic pseudotype, can appear in this result in rare cases, + * where an expression involves certain columns of statistics-related + * system catalogs. An argument with this resolved type represents an + * array, but one whose element type may differ from call to call. See + * {@link RegType#ANYARRAY ANYARRAY} for how such an array can be + * handled. + */ + TupleDescriptor outputsDescriptor() throws SQLException; + + /** + * Returns true if a routine with a variadic parameter declared with the + * wildcard {@code "any"} type is being called with its arguments in + * "spread" form at this call site. + *

+ * In "spread" form, {@link Call#arguments arguments()}{@code .size()} + * can exceed the routine's declared number of parameters, with + * the values and types of the variadic arguments to be found + * at successive positions of {@link Call#arguments}. In "collected" + * form, the position of the variadic parameter is passed a single + * PostgreSQL array of the variadic arguments' type. A call with zero + * arguments for the variadic parameter can only be made in + * "collected" form, with an empty array at the variadic parameter's + * declared position; therefore, no case arises where the passed + * arguments are fewer than the declared parameters. + *

+ * When the routine declaration has a variadic parameter of any type + * other than the wildcard {@code "any"}, collected form is always used. + * In the wildcard case, collected or spread form may be seen, at the + * caller's option. Therefore, there is an ambiguity when such a routine + * receives a single argument of array type at the variadic position, + * and this method must be used in that case to determine the caller's + * intent. + * @return always false, except for a routine declared + * {@code VARIADIC "any"} when its arguments are being passed + * in "spread" form. + */ + boolean inputsAreSpread(); + + /** + * For the arguments at (zero-based) positions in {@code arguments()} + * indicated by ofInterest, report (in the returned bit set) + * which of those are 'stable', that is, will keep their values across + * calls associated with the current {@code Lookup}. + */ + BitSet stableInputs(BitSet ofInterest); + } + + /** + * Counterpart to the PostgreSQL {@code FunctionCallInfoBaseData}. + *

+ * Presents arguments in the form of a {@code TupleTableSlot}. + */ + interface Call + { + Lookup lookup(); + TupleTableSlot arguments() throws SQLException; + TupleTableSlot result() throws SQLException; + void isNull(boolean nullness); + RegCollation collation(); + Context context(); + ResultInfo resultInfo(); + /* + * Using TupleTableSlot, this interface does not so much need to + * expose the get_call_result_type / get_fn_expr_argtype / + * get_fn_expr_variadic routines as to just go ahead and use them + * and present a coherent picture. + */ + + /** + * Common base of interfaces that can be returned by + * {@link Call#context() Call.context()}. + */ + interface Context + { + /** + * Supplied when the routine is being called as a trigger. + */ + interface TriggerData extends Context + { + /** + * When the trigger is being called (before, after, or instead) + * with respect to the triggering event. + */ + Called called(); + + /** + * The event that has fired this trigger. + */ + Event event(); + + /** + * The scope (per-row or per-statement) of this trigger. + */ + Scope scope(); + + /** + * The relation on which this trigger is declared. + */ + RegClass relation(); + + /** + * The row for which the trigger was fired. + *

+ * In a trigger fired for {@code INSERT} or {@code DELETE}, this + * is the row to return if not altering or skipping the + * operation. + */ + TupleTableSlot triggerTuple(); + + /** + * The proposed new version of the row, only in a trigger fired + * for {@code UPDATE}. + *

+ * In a trigger fired for {@code UPDATE}, this is the row + * to return if not altering or skipping the operation. + */ + TupleTableSlot newTuple(); + + /** + * Information from the trigger's declaration in the system + * catalogs. + */ + Trigger trigger(); + + /** + * For {@code UPDATE} triggers, which columns have been updated + * by the triggering command; null for other triggers. + */ + Projection updatedColumns(); + } + + interface EventTriggerData extends Context + { + } + + interface AggState extends Context + { + } + + interface WindowAggState extends Context + { + } + + interface WindowObject extends Context + { + } + + /** + * Supplied when the routine being called is a procedure + * rather than a function. + */ + interface CallContext extends Context + { + /** + * Indicates whether transaction control operations within + * the procedure are disallowed (true) or allowed (false). + */ + boolean atomic(); + } + + interface ErrorSaveContext extends Context + { + } + } + + /** + * Common base of interfaces that can be returned by + * {@link Call#resultInfo() Call.resultInfo()}. + */ + interface ResultInfo + { + /** + * Supplied when the routine is being called with the expectation + * of a set (not just a single value or row) return. + */ + interface ReturnSetInfo extends ResultInfo + { + /** + * List indicating the set-returning interfaces the caller + * is prepared to accept. + *

+ * Ordering can reflect the caller's preference, + * with a more-preferred interface earlier in the list. + */ + List> allowedModes(); + } + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegRole.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegRole.java new file mode 100644 index 000000000..acb8e55c3 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegRole.java @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.List; + +import org.postgresql.pljava.RolePrincipal; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Pseudo; + +/** + * Model of a PostgreSQL role. + *

+ * In addition to the methods returning the information in the {@code pg_authid} + * system catalog, there are methods to return four different flavors of + * {@link RolePrincipal RolePrincipal}, all representing this role. + *

+ * The {@code ...Principal()} methods should not be confused with environment + * accessors returning actual information about the execution context. Each of + * the methods simply returns an instance of the corresponding class that would + * be appropriate to find in the execution context if this role were, + * respectively, the authenticated, session, outer, or current role. + *

+ * {@link RolePrincipal.Current} implements the + * {@code UserPrincipal/GroupPrincipal} interfaces of + * {@code java.nio.file.attribute}, so + * {@link #currentPrincipal() currentPrincipal()} can also be used to obtain + * {@code Principal}s that will work in the Java NIO.2 filesystem API. + *

+ * The {@code ...Principal} methods only succeed when {@code name()} does, + * therefore not when {@code isValid} is false. The {@code RegRole.Grantee} + * representing {@code PUBLIC} is, for all other purposes, not a valid role, + * including for its {@code ...Principal} methods. + */ +public interface RegRole +extends Addressed, Named, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(AuthIdRelationId, RegRole.class); + + /** + * A {@code RegRole.Grantee} representing {@code PUBLIC}; not a valid + * {@code RegRole} for other purposes. + */ + RegRole.Grantee PUBLIC = publicGrantee(); + + /** + * Subinterface of {@code RegRole} returned by methods of + * {@link CatalogObject.AccessControlled CatalogObject.AccessControlled} + * identifying the role to which a privilege has been granted. + *

+ * A {@code RegRole} appearing as a grantee can be {@link #PUBLIC PUBLIC}, + * unlike a {@code RegRole} in any other context, so the + * {@link #isPublic isPublic()} method appears only on this subinterface, + * as well as the {@link #nameAsGrantee nameAsGrantee} method, which will + * return the correct name even in that case (the ordinary {@code name} + * method will not). + */ + interface Grantee extends RegRole + { + /** + * In the case of a {@code RegRole} obtained as the {@code grantee} of a + * {@link Grant}, indicate whether it is a grant to "public". + */ + default boolean isPublic() + { + return ! isValid(); + } + + /** + * Like {@code name()}, but also returns the expected name for a + * {@code Grantee} representing {@code PUBLIC}. + */ + Simple nameAsGrantee(); + } + + /** + * Return a {@code RolePrincipal} that would represent this role as a + * session's authenticated identity (which was established at connection + * time and will not change for the life of a session). + */ + default RolePrincipal.Authenticated authenticatedPrincipal() + { + return new RolePrincipal.Authenticated(name()); + } + + /** + * Return a {@code RolePrincipal} that would represent this role as a + * session's "session" identity (which can be changed during a session + * by {@code SET SESSION AUTHORIZATION}). + */ + default RolePrincipal.Session sessionPrincipal() + { + return new RolePrincipal.Session(name()); + } + + /** + * Return a {@code RolePrincipal} that would represent this role as the one + * last established by {@code SET ROLE}, and outside of any + * {@code SECURITY DEFINER} function. + */ + default RolePrincipal.Outer outerPrincipal() + { + return new RolePrincipal.Outer(name()); + } + + /** + * Return a {@code RolePrincipal} that would represent this role as the + * effective one for normal privilege checks, usually the same as the + * session or outer, but changed during {@code SECURITY DEFINER} functions. + *

+ * This method can also be used to obtain a {@code Principal} that will work + * in the Java NIO.2 filesystem API. + */ + default RolePrincipal.Current currentPrincipal() + { + return new RolePrincipal.Current(name()); + } + + /** + * Roles of which this role is directly a member. + *

+ * For the other direction, see {@link #grants() grants()}. + */ + List memberOf(); + + boolean superuser(); + boolean inherit(); + boolean createRole(); + boolean createDB(); + boolean canLogIn(); + boolean replication(); + boolean bypassRLS(); + int connectionLimit(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegType.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegType.java new file mode 100644 index 000000000..7cf472e00 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegType.java @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLType; +import java.sql.SQLXML; + +import org.postgresql.pljava.Adapter; // javadoc + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.annotation.BaseUDT.Alignment; +import org.postgresql.pljava.annotation.BaseUDT.PredefinedCategory; // javadoc +import org.postgresql.pljava.annotation.BaseUDT.Storage; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; // javadoc +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a PostgreSQL data type, as defined in the system catalogs. + *

+ * This class also has static final fields for a selection of commonly used + * {@code RegType}s, such as those that correspond to types mentioned in JDBC, + * and others that are just ubiquitous when working in PostgreSQL in general, + * or are used in this model package. + *

+ * An instance of {@code RegType} also implements the JDBC + * {@link SQLType SQLType} interface, with the intention that it could be used + * with a suitably-aware JDBC implementation to identify any type available + * in PostgreSQL. + *

+ * A type can have a 'modifier' (think {@code NUMERIC(4)} versus plain + * {@code NUMERIC}). In PostgreSQL's C code, a type oid and modifier have to + * be passed around in tandem. Here, you apply + * {@link #modifier(int) modifier(int)} to the unmodified {@code RegType} and + * obtain a distinct {@code RegType} instance incorporating the modifier. + */ +public interface RegType +extends + Addressed, Namespaced, Owned, AccessControlled, + SQLType +{ + RegClass.Known CLASSID = + formClassId(TypeRelationId, RegType.class); + + /* + * PG types good to have around because of corresponding JDBC types. + */ + RegType BOOL = formObjectId(CLASSID, BOOLOID); + RegType BYTEA = formObjectId(CLASSID, BYTEAOID); + /** + * The PostgreSQL type {@code "char"} (the quotes are needed to distinguish + * it from the different SQL type named {@code CHAR}), which is an eight-bit + * signed value with no associated character encoding (though it is often + * used in the catalogs with ASCII-letter values as an ersatz enum). + *

+ * It can be mapped to the JDBC type {@code TINYINT}, or Java {@code byte}. + */ + RegType CHAR = formObjectId(CLASSID, CHAROID); + RegType INT8 = formObjectId(CLASSID, INT8OID); + RegType INT2 = formObjectId(CLASSID, INT2OID); + RegType INT4 = formObjectId(CLASSID, INT4OID); + RegType XML = formObjectId(CLASSID, XMLOID); + RegType FLOAT4 = formObjectId(CLASSID, FLOAT4OID); + RegType FLOAT8 = formObjectId(CLASSID, FLOAT8OID); + /** + * "Blank-padded CHAR", the PostgreSQL type that corresponds to the SQL + * standard {@code CHAR} (spelled without quotes) type. + */ + RegType BPCHAR = formObjectId(CLASSID, BPCHAROID); + RegType VARCHAR = formObjectId(CLASSID, VARCHAROID); + RegType DATE = formObjectId(CLASSID, DATEOID); + RegType TIME = formObjectId(CLASSID, TIMEOID); + RegType TIMESTAMP = formObjectId(CLASSID, TIMESTAMPOID); + RegType TIMESTAMPTZ = formObjectId(CLASSID, TIMESTAMPTZOID); + RegType TIMETZ = formObjectId(CLASSID, TIMETZOID); + RegType BIT = formObjectId(CLASSID, BITOID); + RegType VARBIT = formObjectId(CLASSID, VARBITOID); + RegType NUMERIC = formObjectId(CLASSID, NUMERICOID); + + /* + * PG types not mentioned in JDBC but bread-and-butter to PG devs. + */ + RegType TEXT = formObjectId(CLASSID, TEXTOID); + RegType UNKNOWN = formObjectId(CLASSID, UNKNOWNOID); + RegType RECORD = formObjectId(CLASSID, RECORDOID); + RegType CSTRING = formObjectId(CLASSID, CSTRINGOID); + RegType VOID = formObjectId(CLASSID, VOIDOID); + RegType TRIGGER = formObjectId(CLASSID, TRIGGEROID); + + /* + * API treatment for one of the several polymorphic types, because this one + * can also be the actual resolved type of some system catalog columns. + */ + /** + * Normally a pseudotype used in declaring polymorphic functions, this + * can also be the actual resolved type of some statistics-related system + * catalog columns or expressions derived from them. + *

+ * When this type is encountered as the resolved type for an array, + * different instances of the array may have different element types. + * {@link Adapter.Array#elementType()} can be used to get an {@code Adapter} + * that reports the element type of any array, so that a suitable + * {@code Adapter} for that element type can be chosen and used to construct + * an array adapter for access to the array's elements. + */ + RegType ANYARRAY = formObjectId(CLASSID, ANYARRAYOID); + + /* + * PG types used in modeling PG types themselves. + */ + RegType NAME = formObjectId(CLASSID, NAMEOID); + RegType REGPROC = formObjectId(CLASSID, REGPROCOID); + RegType OID = formObjectId(CLASSID, OIDOID); + RegType PG_NODE_TREE = formObjectId(CLASSID, PG_NODE_TREEOID); + RegType ACLITEM = formObjectId(CLASSID, ACLITEMOID); + RegType REGPROCEDURE = formObjectId(CLASSID, REGPROCEDUREOID); + RegType REGOPER = formObjectId(CLASSID, REGOPEROID); + RegType REGOPERATOR = formObjectId(CLASSID, REGOPERATOROID); + RegType REGCLASS = formObjectId(CLASSID, REGCLASSOID); + RegType REGTYPE = formObjectId(CLASSID, REGTYPEOID); + RegType REGCONFIG = formObjectId(CLASSID, REGCONFIGOID); + RegType REGDICTIONARY = formObjectId(CLASSID, REGDICTIONARYOID); + RegType REGNAMESPACE = formObjectId(CLASSID, REGNAMESPACEOID); + RegType REGROLE = formObjectId(CLASSID, REGROLEOID); + RegType REGCOLLATION = formObjectId(CLASSID, REGCOLLATIONOID); + + enum Type { BASE, COMPOSITE, DOMAIN, ENUM, PSEUDO, RANGE, MULTIRANGE } + + interface TypeInput extends Why { } + interface TypeOutput extends Why { } + interface TypeReceive extends Why { } + interface TypeSend extends Why { } + interface TypeModifierInput extends Why { } + interface TypeModifierOutput extends Why { } + interface TypeAnalyze extends Why { } + interface TypeSubscript extends Why { } + + /** + * Interface additionally implemented by an instance that represents a type + * (such as the PostgreSQL polymorphic pseudotypes or the even wilder "any" + * type) needing resolution to an actual type used at a given call site. + */ + interface Unresolved extends RegType + { + /** + * Returns true, indicating resolution to an actual type is needed. + */ + @Override + default boolean needsResolution() + { + return true; + } + } + + /** + * Whether this instance represents a type (such as the PostgreSQL + * polymorphic pseudotypes or the even wilder "any" type) needing resolution + * to an actual type used at a given call site. + *

+ * This information does not come from the {@code pg_type} catalog, but + * simply reflects PostgreSQL-version-specific knowledge of which types + * require such treatment. + *

+ * This default implementation returns false. + * @see Unresolved#needsResolution + */ + default boolean needsResolution() + { + return false; + } + + short length(); + boolean byValue(); + Type type(); + /** + * A one-character code representing the type's 'category'. + *

+ * Custom categories are possible, so not every value here need correspond + * to a {@link PredefinedCategory PredefinedCategory}, but common ones will, + * and can be 'decoded' with {@link PredefinedCategory#valueOf(char)}. + */ + char category(); + boolean preferred(); + boolean defined(); + byte delimiter(); + RegClass relation(); + RegType element(); + RegType array(); + RegProcedure input(); + RegProcedure output(); + RegProcedure receive(); + RegProcedure send(); + RegProcedure modifierInput(); + RegProcedure modifierOutput(); + RegProcedure analyze(); + RegProcedure subscript(); + Alignment alignment(); + Storage storage(); + boolean notNull(); + RegType baseType(); + int dimensions(); + RegCollation collation(); + SQLXML defaultBin(); + String defaultText(); + RegType modifier(int typmod); + + /** + * Returns the {@code RegType} for this type with no modifier, if this + * instance has one. + *

+ * If not, simply returns {@code this}. + */ + RegType withoutModifier(); + + /** + * Returns the modifier if this instance has one, else -1. + */ + int modifier(); + + /** + * The corresponding {@link TupleDescriptor TupleDescriptor}, non-null only + * for composite types. + */ + TupleDescriptor.Interned tupleDescriptor(); + + /** + * The name of this type as a {@code String}, as the JDBC + * {@link SQLType SQLType} interface requires. + *

+ * The string produced here is as would be produced by + * {@link Identifier#deparse deparse(StandardCharsets.UTF_8)} applied to + * the result of {@link #qualifiedName qualifiedName()}. + * The returned string may include double-quote marks, which affect its case + * sensitivity and the characters permitted within it. If an application is + * not required to use this method for JDBC compatibility, it can avoid + * needing to fuss with those details by using {@code qualifiedName} + * instead. + */ + @Override + default String getName() + { + return qualifiedName().toString(); + } + + /** + * A string identifying the "vendor" for which the type name and number here + * are meaningful, as the JDBC {@link SQLType SQLType} interface requires. + *

+ * The JDBC API provides that the result "typically is the package name for + * this vendor", and this method returns {@code org.postgresql} as + * a constant string. + *

+ * Note, however, that every type that is defined in the current PostgreSQL + * database can be represented by an instance of this interface, whether + * built in to PostgreSQL, installed with an extension, or user-defined. + * Therefore, not every instance with this "vendor" string can be assumed + * to be a type known to all PostgreSQL databases. Moreover, even if + * the same extension-provided or user-defined type is present in different + * PostgreSQL databases, it need not be installed with the same + * {@link #qualifiedName qualifiedName} in each, and will almost certainly + * have different object IDs, so {@link #getName getName} and + * {@link #getVendorTypeNumber getVendorTypeNumber} may not in general + * identify the same type across unrelated PostgreSQL databases. + */ + @Override + default String getVendor() + { + return "org.postgresql"; + } + + /** + * A vendor-specific type number identifying this type, as the JDBC + * {@link SQLType SQLType} interface requires. + *

+ * This implementation returns the {@link #oid oid} of the type in + * the current database. However, except for the subset of types that are + * built in to PostgreSQL with oid values that are fixed, the result of this + * method should only be relied on to identify a type within the current + * database. + */ + @Override + default Integer getVendorTypeNumber() + { + return oid(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/ResourceOwner.java b/pljava-api/src/main/java/org/postgresql/pljava/model/ResourceOwner.java new file mode 100644 index 000000000..919a05e00 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/ResourceOwner.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.Lifespan; + +import org.postgresql.pljava.model.CatalogObject.Factory; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +/** + * The representation of a PostgreSQL {@code ResourceOwner}, usable as + * a PL/Java {@link Lifespan Lifespan}. + *

+ * The {@code ResourceOwner} API in PostgreSQL is described here. + *

+ * PostgreSQL invokes callbacks in phases when a {@code ResourceOwner} + * is released, and all of its built-in consumers get notified before + * loadable modules (like PL/Java) for each phase in turn. The release + * behavior of this PL/Java instance is tied to the + * {@code RESOURCE_RELEASE_LOCKS} phase of the underlying PostgreSQL object, + * and therefore occurs after all of the built-in PostgreSQL lock-related + * releases, but before any of the built-in stuff released in the + * {@code RESOURCE_RELEASE_AFTER_LOCKS} phase. + */ +public interface ResourceOwner extends Lifespan +{ + static ResourceOwner CurrentResourceOwner() + { + return INSTANCE.resourceOwner(RSO_Current); + } + + static ResourceOwner CurTransactionResourceOwner() + { + return INSTANCE.resourceOwner(RSO_CurTransaction); + } + + static ResourceOwner TopTransactionResourceOwner() + { + return INSTANCE.resourceOwner(RSO_TopTransaction); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/SlotTester.java b/pljava-api/src/main/java/org/postgresql/pljava/model/SlotTester.java new file mode 100644 index 000000000..3e30f18a2 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/SlotTester.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import java.util.List; + +import org.postgresql.pljava.Adapter; + +/** + * A temporary test jig during TupleTableSlot development, not intended to last. + */ +public interface SlotTester +{ + /** + * Unwrap a {@link ResultSet} instance from the legacy JDBC layer as a + * {@link Portal} instance so results can be retrieved using new API. + * @param rs a ResultSet, which can only be an SPIResultSet obtained from + * the legacy JDBC implementation, not yet closed or used to fetch anything, + * and will be closed. + */ + Portal unwrapAsPortal(ResultSet rs) throws SQLException; + + /** + * Execute query, returning its complete result as a {@code List} + * of {@link TupleTableSlot}. + */ + List test(String query); + + /** + * Return one of the predefined {@link Adapter} instances, given knowledge + * of the class name and static final field name within that class inside + * PL/Java's implementation module. + *

+ * Example: + *

+	 * adapterPlease(
+	 *  "org.postgresql.pljava.pg.adt.Primitives", "FLOAT8_INSTANCE");
+	 *
+ */ + Adapter adapterPlease(String clazz, String field) + throws ReflectiveOperationException; + + /** + * A temporary marker interface used on classes or interfaces whose + * static final fields should be visible to {@code adapterPlease}. + */ + interface Visible + { + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Tablespace.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Tablespace.java new file mode 100644 index 000000000..e2249e51f --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Tablespace.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a tablespace used in laying out on-disk storage. + */ +public interface Tablespace +extends + Addressed, Named, Owned, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(TableSpaceRelationId, Tablespace.class); + + Map options(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Transform.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Transform.java new file mode 100644 index 000000000..68f45e036 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Transform.java @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.PLJavaBasedLanguage.UsingTransforms; // for javadoc +import org.postgresql.pljava.annotation.Function.Effects; // for javadoc + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +/** + * Model of the PostgreSQL {@code pg_transform} system catalog. + *

+ * A transform is a very open-ended PostgreSQL arrangement for controlling how + * values of a target PostgreSQL type may be converted to values of some + * appropriate data type available in a given procedural language, and back + * again. PostgreSQL does none of this work itself, but simply provides a way to + * declare + * a transform (associating a {@code fromSQL} and a {@code toSQL} function + * with a procedural language and a PostgreSQL type), and syntax in + * {@code CREATE FUNCTION} and {@code CREATE PROCEDURE} to indicate + * {@linkplain RegProcedure#transformTypes() which types} should have + * such transforms applied. + *

+ * Beyond verifying, at {@code CREATE FUNCTION} or {@code CREATE PROCEDURE} + * time, that any transforms mentioned in the declaration do exist, PostgreSQL + * does nothing to apply any transforms when the function or procedure + * is called. If a function's or procedure's declaration indicates any types + * for which transforms should be applied, the full responsibility for doing so + * (including all details of how it is done) falls to the function's or + * procedure's implementing procedural language. + *

+ * If a procedural language implementation does not contain logic to apply + * transforms when requested, it should reject any function or + * procedure with non-null {@link RegProcedure#transformTypes() transformTypes}, + * at validation time when possible. If it does not, PostgreSQL will allow + * functions and procedures in that language to declare transforms for types, + * and the declarations will have no effect. + *

+ * For a PL/Java-based language, such declarations will always be rejected + * if the language does not implement the {@link UsingTransforms} interface. + */ +public interface Transform extends Addressed +{ + RegClass.Known CLASSID = + formClassId(TransformRelationId, Transform.class); + + interface FromSQL extends Why { } + interface ToSQL extends Why { } + + /** + * The PostgreSQL data type to which this transform is intended to apply. + */ + RegType type(); + + /** + * The procedural language with which this transform can be used. + */ + ProceduralLanguage language(); + + /** + * Function that, at least conceptually, converts a value of + * {@linkplain #type() the intended PostgreSQL type} to a value of some + * appropriate type in the {@linkplain #language() target language}. + *

+ * A result with {@link RegProcedure#isValid() isValid()}{@code =false} + * indicates that the target language should use its default from-SQL + * conversion for this transform's type. The language's + * {@link UsingTransforms#essentialTransformChecks essentialTransformChecks} + * method, in that case, should verify that the language has a usable + * default from-SQL conversion for the type. + *

+ * Otherwise, PostgreSQL will already have ensured that this is a + * non-{@linkplain RegProcedure#returnsSet() set-returning}, + * non-{@linkplain Effects#VOLATILE volatile} + * {@linkplain RegProcedure.Kind#FUNCTION function} + * declared with a {@linkplain RegProcedure#returnType() return type} of + * {@code INTERNAL} and a single argument of type {@code INTERNAL}. + *

+ * There are no other assurances made by PostgreSQL, and there can be many + * functions with such a signature that are not transform functions at all. + * It will be up to the {@linkplain #language() target language} and, if it + * is a PL/Java-based language, its + * {@link UsingTransforms#essentialTransformChecks essentialTransformChecks} + * method, to verify (if there is any way to do so) that this function is + * one that the language implementation can use to convert the intended + * PostgreSQL type to a usable type in the target language. + *

+ * Because both the argument and the return type are declared + * {@code INTERNAL}, there is no way to be sure from the declaration alone + * that this is a transform function expecting the transform's PostgreSQL + * type. + *

+ * Whatever use is to be made of this function, including exactly what is + * passed as its {@code INTERNAL} argument and what it is expected to + * produce as its {@code INTERNAL} return type, is completely up to the + * {@linkplain #language() target language}. Therefore, each target language + * defines how to write transform functions that it can apply. A target + * language may impose requirements (such as what the function's + * {@linkplain RegProcedure#language() language of implementation} must be) + * to simplify the problem of determining whether the function is suitable, + * perhaps by inspection of the function's + * {@linkplain RegProcedure#src() source text} when its language of + * implementation is known. It is even up to the target language + * implementation whether this function is ever 'called' in the usual sense + * at all, as opposed to, say, having its source text interpreted in some + * other way. + */ + RegProcedure fromSQL(); + + /** + * Function that, at least conceptually, converts a value of + * some appropriate type in the {@linkplain #language() target language} + * to a value of {@linkplain #type() the intended PostgreSQL type}. + *

+ * A result with {@link RegProcedure#isValid() isValid()}{@code =false} + * indicates that the target language should use its default to-SQL + * conversion for this transform's type. The language's + * {@link UsingTransforms#essentialTransformChecks essentialTransformChecks} + * method, in that case, should verify that the language has a usable + * default to-SQL conversion for the type. + *

+ * Otherwise, PostgreSQL will already have ensured that this is a + * non-{@linkplain RegProcedure#returnsSet() set-returning}, + * non-{@linkplain Effects#VOLATILE volatile} + * {@linkplain RegProcedure.Kind#FUNCTION function} + * declared with a {@linkplain RegProcedure#returnType() return type} of + * {@linkplain #type() the intended PostgreSQL type} and a single argument + * of type {@code INTERNAL}. + *

+ * There are no other assurances made by PostgreSQL, and there could be + * functions with such a signature that are not transform functions at all. + * It will be up to the {@linkplain #language() target language} and, if it + * is a PL/Java-based language, its + * {@link UsingTransforms#essentialTransformChecks essentialTransformChecks} + * method, to verify (if there is any way to do so) that this function is + * one that the language implementation can use to convert the expected + * target-language type to the intended PostgreSQL type. + *

+ * The return type of this function will match the transform's PostgreSQL + * type, but as the argument type is declared {@code INTERNAL}, there is + * no way to be sure from the declaration alone that the argument this + * function expects is what the target language implementation will pass + * to a transform function. + *

+ * Whatever use is to be made of this function, including exactly what is + * passed as its {@code INTERNAL} argument, is completely up to the + * {@linkplain #language() target language}. Therefore, each target language + * defines how to write transform functions that it can apply. A target + * language may impose requirements (such as what the function's + * {@linkplain RegProcedure#language() language of implementation} must be) + * to simplify the problem of determining whether the function is suitable, + * perhaps by inspection of the function's + * {@linkplain RegProcedure#src() source text} when its language of + * implementation is known. It is even up to the target language + * implementation whether this function is ever 'called' in the usual sense + * at all, as opposed to, say, having its source text interpreted in some + * other way. + */ + RegProcedure toSQL(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Trigger.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Trigger.java new file mode 100644 index 000000000..99e432c39 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Trigger.java @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLXML; + +import java.util.List; +import java.util.Set; + +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.Trigger.Called; +import org.postgresql.pljava.annotation.Trigger.Event; +import org.postgresql.pljava.annotation.Trigger.Scope; + +import org.postgresql.pljava.model.CatalogObject.*; +import org.postgresql.pljava.model.RegProcedure.Call.Context.TriggerData; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a trigger entry in the PostgreSQL catalogs. + *

+ * This catalog object, at least at first, will have an unusual limitation: + * its accessor methods (other than those of {@link Addressed}) may only work + * when called by a trigger function or its language handler within the scope + * of the function's specialization and execution. Some may be unimplemented + * even then, as noted in the documentation of the methods themselves. + */ +public interface Trigger +extends + Addressed, Named +{ + RegClass.Known CLASSID = + formClassId(TriggerRelationId, Trigger.class); + + enum ReplicationRole { ON_ORIGIN, ALWAYS, ON_REPLICA, DISABLED }; + + interface ForTrigger extends Why { } + + /** + * Name of this trigger. + *

+ * The table on which the trigger is declared serves as a namespace, + * within which trigger names on the same table must be unique. + */ + @Override + Simple name(); + + /** + * The table on which this trigger is declared. + *

+ * May throw {@code UnsupportedOperationException}. Within a trigger + * function or its handler, {@link TriggerData} can supply the same + * information. + */ + RegClass relation(); + + /** + * Parent trigger this trigger is cloned from (applies to partitioned + * tables), null if not a clone. + *

+ * May throw {@code UnsupportedOperationException}. + * @see #isClone + */ + Trigger parent(); + + /** + * The function to be called. + *

+ * May throw {@code UnsupportedOperationException}. Within a trigger + * function or its handler, this is just the function being called. + */ + RegProcedure function(); + + /** + * When this trigger is to fire (before, after, or instead of the + * triggering event). + */ + Called called(); + + /** + * The event(s) for which the trigger can fire. + */ + Set events(); + + /** + * The scope (per-statement or per-row) of this trigger. + */ + Scope scope(); + + /** + * For which {@code session_replication_role} modes the trigger fires. + */ + ReplicationRole enabled(); + + /** + * True if the trigger is internally generated (usually to enforce the + * constraint identified by {@link #constraint}). + */ + boolean internal(); + + /** + * The referenced table if this trigger pertains to a referential integrity + * constraint, otherwise null. + */ + RegClass constraintRelation(); + + /** + * The index supporting a unique, primary key, referential integrity, or + * exclusion constraint, null if this trigger is not for such a constraint. + */ + RegClass constraintIndex(); + + /** + * The constraint associated with the trigger, null if none. + * @return null, no {@code Constraint} catalog object is implemented yet + */ + Constraint constraint(); + + /** + * True for a constraint trigger that is deferrable. + */ + boolean deferrable(); + + /** + * True for a constraint trigger initially deferred. + */ + boolean initiallyDeferred(); + + /** + * The columns of interest (as a {@link Projection} of {@link #relation}'s + * columns) if the trigger is column-specific, otherwise null. + */ + Projection columns(); + + /** + * Any additional {@code String} arguments to pass to the trigger function. + */ + List arguments(); + + /** + * A {@code pg_node_tree} representation of a boolean expression restricting + * when this trigger can fire, or null if none. + */ + SQLXML when(); + + /** + * Name by which an ephemeral table showing prior row values can be queried + * via SPI by the function for an {@code AFTER} trigger whose + * {@link #events events} include {@code UPDATE} or {@code DELETE}. + */ + Simple tableOld(); + + /** + * Name by which an ephemeral table showing new row values can be queried + * via SPI by the function for an {@code AFTER} trigger whose + * {@link #events events} include {@code UPDATE} or {@code INSERT}. + */ + Simple tableNew(); + + /** + * True if this trigger is a clone. + *

+ * This information will be available to a trigger function or its handler, + * even if the actual {@link #parent parent} trigger is not. + */ + boolean isClone(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/TupleDescriptor.java b/pljava-api/src/main/java/org/postgresql/pljava/model/TupleDescriptor.java new file mode 100644 index 000000000..f3cff5a08 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/TupleDescriptor.java @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; // javadoc + +import java.util.List; + +import org.postgresql.pljava.TargetList.Projection; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Conceptually, a {@code TupleDescriptor} is a list of {@code Attribute}, with + * a {@code RegType} that identifies its corresponding row type. + *

+ * The row type might be just {@code RECORD}, though, representing a + * transient, unregistered type. + *

+ * The {@code Attribute} instances may then correspond to nothing that exists in + * {@code pg_attribute}, in which case they will be 'virtual' instances whose + * {@code CatalogObject.Addressed} methods don't work, but which simply hold a + * reference to the {@code TupleDescriptor} they came from instead. + *

+ * A {@code TupleDescriptor} may also contain attribute defaults and/or + * constraints. These would be less often of interest in Java; if there is + * a need to make them available, rather than complicating + * {@code TupleDescriptor}, it will probably be more natural to make them + * available by methods on {@code Attribute}. + */ +public interface TupleDescriptor extends Projection +{ + /** + * @deprecated As a subinterface of {@link Projection Projection}, + * a {@code TupleDescriptor} already is a {@code List}, and there + * is no need for this method to simply return its own receiver. + */ + @Deprecated(forRemoval=true) + default List attributes() + { + return this; + } + + /** + * If this tuple descriptor is not ephemeral, returns the PostgreSQL type + * that identifies it. + *

+ * If the descriptor is for a known composite type in the PostgreSQL + * catalog, this method returns that type. + *

+ * If the descriptor has been created programmatically and interned, this + * method returns the type + * {@link RegType#RECORD RECORD}.{@link RegType#modifier(int) modifier(n)} + * where n was uniquely assigned by PostgreSQL when the + * descriptor was interned, and will reliably refer to this tuple descriptor + * for the duration of the session. + *

+ * For any ephemeral descriptor passed around in code without being + * interned, this method returns plain {@link RegType#RECORD RECORD}, which + * is useless for identifying the tuple structure. + */ + RegType rowType(); + + /** + * Gets an attribute by name. + *

+ * This API should be considered scaffolding or preliminary, until an API + * can be designed that might offer a convenient usage idiom without + * presupposing something like a name-to-attribute map in every decriptor. + *

+ * This default implementation simply does {@code project(name).get(0)}. + * Code that will do so repeatedly might be improved by doing so once and + * retaining the result. + * @throws SQLSyntaxErrorException 42703 if no attribute name matches + * @deprecated A one-by-one lookup-by-name API forces the implementation to + * cater to an inefficient usage pattern, when callers will often have a + * number of named attributes to look up, which can be done more efficiently + * in one go; see the methods of {@link Projection Projection}. + */ + @Deprecated(forRemoval=true) + default Attribute get(Simple name) throws SQLException + { + return project(name).get(0); + } + + /** + * Equivalent to {@code get(Simple.fromJava(name))}. + *

+ * This API should be considered scaffolding or preliminary, until an API + * can be designed that might offer a convenient usage idiom without + * presupposing something like a name-to-attribute map in every descriptor. + * @throws SQLSyntaxErrorException 42703 if no attribute name matches + * @deprecated A one-by-one lookup-by-name API forces the implementation to + * cater to an inefficient usage pattern, when callers will often have a + * number of named attributes to look up, which can be done more efficiently + * in one go; see the methods of {@link Projection Projection}. + */ + @Deprecated(forRemoval=true) + default Attribute get(String name) throws SQLException + { + return get(Simple.fromJava(name)); + } + + /** + * Return this descriptor unchanged if it is already interned in + * PostgreSQL's type cache, otherwise an equivalent new descriptor with + * a different {@link #rowType rowType} uniquely assigned to identify it + * for the duration of the session. + *

+ * PostgreSQL calls this operation "BlessTupleDesc", which updates the + * descriptor in place; in PL/Java code, the descriptor returned by this + * method should be used in place of the original. + */ + Interned intern(); + + /** + * A descriptor that either describes a known composite type in the + * catalogs, or has been interned in PostgreSQL's type cache, and has + * a distinct {@link #rowType rowType} that can be used to identify it + * for the duration of the session. + *

+ * Some operations, such as constructing a composite value for a function + * to return, require this. + */ + interface Interned extends TupleDescriptor + { + @Override + default Interned intern() + { + return this; + } + } + + /** + * A descriptor that has been constructed on the fly and has not been + * interned. + *

+ * For all such descriptors, {@link #rowType rowType} returns + * {@link RegType#RECORD RECORD}, which is of no use for identification. + * For some purposes (such as constructing a composite value for a function + * to return), an ephemeral descriptor must be interned before it can + * be used. + */ + interface Ephemeral extends TupleDescriptor + { + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/TupleTableSlot.java b/pljava-api/src/main/java/org/postgresql/pljava/model/TupleTableSlot.java new file mode 100644 index 000000000..ba492f308 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/TupleTableSlot.java @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsBoolean; + +/** + * A PostgreSQL abstraction that can present a variety of underlying tuple + * representations in a common way. + *

+ * PL/Java may take the liberty of extending this class to present even some + * other tuple-like things that are not native tuple forms to PostgreSQL. + *

+ * A readable instance that relies on PostgreSQL's "deforming" can be + * constructed over any supported flavor of underlying tuple. Retrieving + * its values can involve JNI calls to the support functions in PostgreSQL. + * Its writable counterpart is also what must be used for constructing a tuple + * on the fly; after its values/nulls have been set (pure Java), it can be + * flattened (at the cost of a JNI call) to return a pass-by-reference + * {@code Datum} usable as a composite function argument or return value. + *

+ * A specialized instance, with support only for reading, can be constructed + * over a PostgreSQL tuple in its widely-used 'heap' form. PL/Java knows that + * form well enough to walk it and retrieve values mostly without JNI calls. + *

+ * A {@code TupleTableSlot} is not safe for concurrent use by multiple threads, + * in the absence of appropriate synchronization. + */ +public interface TupleTableSlot +{ + TupleDescriptor descriptor(); + RegClass relation(); + + /* + * Idea: move these methods out of public API, as they aren't very + * efficient. Make them invocable internally via TargetList. As an interim + * measure, remove their "throws SQLException" clauses; the implementation + * hasn't been throwing those anyway, but wrapping them in a runtime + * version. (Which needs to get unwrapped eventually, somewhere suitable.) + */ + T get(Attribute att, As adapter); + long get(Attribute att, AsLong adapter); + double get(Attribute att, AsDouble adapter); + int get(Attribute att, AsInt adapter); + float get(Attribute att, AsFloat adapter); + short get(Attribute att, AsShort adapter); + char get(Attribute att, AsChar adapter); + byte get(Attribute att, AsByte adapter); + boolean get(Attribute att, AsBoolean adapter); + + T get(int idx, As adapter); + long get(int idx, AsLong adapter); + double get(int idx, AsDouble adapter); + int get(int idx, AsInt adapter); + float get(int idx, AsFloat adapter); + short get(int idx, AsShort adapter); + char get(int idx, AsChar adapter); + byte get(int idx, AsByte adapter); + boolean get(int idx, AsBoolean adapter); + + default T sqlGet(int idx, As adapter) + { + return get(idx - 1, adapter); + } + + default long sqlGet(int idx, AsLong adapter) + { + return get(idx - 1, adapter); + } + + default double sqlGet(int idx, AsDouble adapter) + { + return get(idx - 1, adapter); + } + + default int sqlGet(int idx, AsInt adapter) + { + return get(idx - 1, adapter); + } + + default float sqlGet(int idx, AsFloat adapter) + { + return get(idx - 1, adapter); + } + + default short sqlGet(int idx, AsShort adapter) + { + return get(idx - 1, adapter); + } + + default char sqlGet(int idx, AsChar adapter) + { + return get(idx - 1, adapter); + } + + default byte sqlGet(int idx, AsByte adapter) + { + return get(idx - 1, adapter); + } + + default boolean sqlGet(int idx, AsBoolean adapter) + { + return get(idx - 1, adapter); + } + + /** + * A form of {@code TupleTableSlot} consisting of a number of indexable + * elements all of the same type, described by the single {@code Attribute} + * of a one-element {@code TupleDescriptor}. + *

+ * This is one form in which a PostgreSQL array can be accessed. + *

+ * The {@code get} methods that take an {@code Attribute} are not especially + * useful with this type of slot, and will simply return its first element. + */ + interface Indexed extends TupleTableSlot + { + /** + * Count of the slot's elements (one greater than the maximum index + * that may be passed to {@code get}). + */ + int elements(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/model/package-info.java new file mode 100644 index 000000000..abfe63218 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/package-info.java @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Interfaces that model a useful subset of the PostgreSQL system catalogs + * and related PostgreSQL abstractions for convenient Java access. + *

CatalogObject and its subinterfaces

+ *

+ * The bulk of this package consists of interfaces extending + * {@link CatalogObject CatalogObject}, corresponding to various database + * objects represented in the PostgreSQL system catalogs. + *

+ * In many of the PostgreSQL catalog tables, each row is identified by an + * integer {@code oid}. When a row in a catalog table represents an object of + * some kind, the {@code oid} of that row (plus an identifier for which table + * it is defined in) will be enough to identify that object. + *

CatalogObject

+ *

+ * In most of the catalog tables, reference to another object is by its bare + * {@code oid}; the containing table is understood. For example, the + * {@code prorettype} attribute of a row in {@code pg_proc} (the catalog of + * procedures and functions) is a bare {@code oid}, understood to identify a row + * in {@code pg_type}, namely, the data type that the function returns. + *

+ * Such an {@code oid} standing alone, when the containing catalog is only + * implied in context, is represented in PL/Java by an instance of the root + * class {@link CatalogObject CatalogObject} itself. Such an object does not + * carry much information; it can be asked for its {@code oid}, and it can be + * combined with the {@code oid} of some catalog table to produce a + * {@link CatalogObject.Addressed CatalogObject.Addressed}. + *

CatalogObject.Addressed

+ *

+ * When the {@code oid} of a row in some catalog table is combined with an + * identifier for which catalog table, the result is the explicit + * address of an object. Because catalog tables themselves are defined by rows + * in one particular catalog table ({@code pg_class}), all that is needed to + * identify one is the {@code oid} of its defining row in {@code pg_class}. + * Therefore, a pair of numbers {@code (classId, objectId)} is a complete + * "object address" for most types of object in PostgreSQL. The {@code classId} + * identifies a catalog table (by its row in {@code pg_class}), and therefore + * what kind of object is intended, and the {@code objectId} identifies + * the specific row in that catalog table, and therefore the specific object. + *

+ * Such an {@code oid} pair is represented in PL/Java by an instance of + * {@link CatalogObject.Addressed CatalogObject.Addressed}—or, more + * likely, one of its specific subinterfaces in this package corresponding to + * the type of object. A function, for example, may be identified by a + * {@link RegProcedure RegProcedure} instance ({@code classId} identifies the + * {@code pg_proc} table, {@code objectId} is the row for the function), and its + * return type by a {@link RegType RegType} instance ({@code classId} identifies + * the {@code pg_type} table, and {@code objectId} the row defining the data + * type). + *

CatalogObject.Component

+ *

+ * The only current exception in PostgreSQL to the + * two-{@code oid}s-identify-an-object rule is for attributes (columns of tables + * or components of composite types), which are identified by three numbers, + * the {@code classId} and {@code objectId} of the parent object, plus a third + * number {@code subId} for the component's position in the parent. + * {@link Attribute Attribute}, therefore, is that rare subinterface that also + * implements {@link CatalogObject.Component CatalogObject.Component}. + *

+ * For the most part, that detail should be of no consequence to a user of this + * package, who will probably only ever obtain {@code Attribute} instances + * from a {@link TupleDescriptor TupleDescriptor}. + *

CatalogObject instances are singletons

+ *

+ * Object instances in this catalog model are lazily-populated singletons + * that exist upon being mentioned, and thereafter reliably identify the same + * {@code (classId,objectId)} in the PostgreSQL catalogs. (Whether that + * {@code (classId,objectId)} continues to identify the "same" thing in + * PostgreSQL can be affected by data-definition commands being issued in + * the same or some other session.) An instance is born lightweight, with only + * its identifying triple of numbers. Its methods that further expose properties + * of the addressed object (including whether any such object even exists) + * do not obtain that information from the PostgreSQL system caches until + * requested, and may then cache it in Java until signaled by PostgreSQL that + * some catalog change has invalidated it. + *

CharsetEncoding

+ *

+ * While not strictly a catalog object (PostgreSQL's supported encodings are + * a hard-coded set, not represented in the catalogs), they are exposed by + * {@link CharsetEncoding CharsetEncoding} instances that otherwise behave much + * like the modeled catalog objects, and are returned by the {@code encoding()} + * methods on {@link Database Database} and {@link RegCollation RegCollation}. + * The one in use on the server (an often-needed value) is exposed by the + * {@link CharsetEncoding#SERVER_ENCODING SERVER_ENCODING} static. + *

Lifespan subinterfaces

+ * Some PL/Java objects correspond to certain native structures in PostgreSQL + * and therefore must not be used beyond the native structures' lifespan. + * {@link Lifespan Lifespan} abstractly models any object in PostgreSQL that + * can be used to define, and detect the end of, a native-object lifespan. + * Two interfaces in this package that extend it and model specific PostgreSQL + * objects with that ability are {@link MemoryContext MemoryContext} and + * {@link ResourceOwner ResourceOwner}. + *

TupleTableSlot, TupleDescriptor, and Adapter

+ *

+ * {@code TupleTableSlot} in PostgreSQL is a flexible abstraction that can + * present several variant forms of native tuples to be manipulated with + * a common API. Modeled on that, {@link TupleTableSlot TupleTableSlot} is + * further abstracted, and can present a uniform API in PL/Java even to + * tuple-like things—anything with a sequence of typed, possibly named + * values—that might not be in the form of PostgreSQL native tuples. + *

+ * The key to the order, types, and names of the components of a tuple is + * its {@link TupleDescriptor TupleDescriptor}, which in broad strokes is little + * more than a {@code List} of {@link Attribute Attribute}. + *

+ * Given a tuple, and an {@code Attribute} that identifies its PostgreSQL data + * type, the job of accessing that value as some appropriate Java type falls to + * an {@link Adapter Adapter}, of which PL/Java provides a selection to cover + * common types, and there is + * a {@link org.postgresql.pljava.adt.spi service-provider interface} allowing + * independent development of others. + *

+ * PL/Java supplies simple adapters when a Java primitive or some existing + * standard Java class is clearly the appropriate mapping for a PostgreSQL type. + * Other than that (and excepting the model classes in this package), PL/Java + * avoids defining new Java classes to represent other PostgreSQL types. Such + * classes may already have been developed for an application, or may be found + * in existing Java driver libraries for PostgreSQL, such as PGJDBC or + * PGJDBC-NG. It would be unhelpful for PL/Java to offer another such, + * independent and incompatible, set. + *

+ * Instead, for PostgreSQL types that might not have an obvious, appropriate + * mapping to a standard Java type, or that might have more than one plausible + * mapping, PL/Java provides a set of functional interfaces in the + * package {@link org.postgresql.pljava.adt}. An {@code Adapter} (encapsulating + * internal details of a data type) can then expose the content in a documented, + * semantically clear form, to a simple application-supplied functional + * interface implementation or lambda that will produce a result of whatever + * Java type the application may already wish to use. + * + * @author Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Lifespan; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java index 1b6e014a0..228613b64 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java @@ -532,6 +532,16 @@ public String toString() return deparse(UTF_8); } + /** + * Whether this instance represents the name of something unnamed. + * @return false except where overridden + * @see None#isUnnamed + */ + public boolean isUnnamed() + { + return false; + } + /** * Ensure deserialization doesn't produce any unknown {@code Identifier} * subclass. @@ -544,7 +554,7 @@ private void readObject(ObjectInputStream in) in.defaultReadObject(); Class c = getClass(); if ( c != Simple.class && c != Foldable.class && c != Folding.class - && c != Pseudo.class && c != Operator.class + && c != Pseudo.class && c != None.class && c != Operator.class && c != Qualified.class ) throw new InvalidObjectException( "deserializing unknown Identifier subclass: " @@ -622,8 +632,9 @@ public static Simple from(String s, boolean quoted) * Concatenates one or more strings or identifiers to the end of * this identifier. *

- * The arguments may be instances of {@code Simple} or of - * {@code CharSequence}, in any combination. + * The arguments may be instances of {@code Simple} (but not of + * {@link None None}) or of {@code CharSequence}, in any + * combination. *

* The resulting identifier folds if this identifier and all * identifier arguments fold and the concatenation (with all @@ -637,7 +648,7 @@ public Simple concat(Object... more) for ( Object o : more ) { - if ( o instanceof Simple ) + if ( o instanceof Simple && ! (o instanceof None) ) { Simple si = (Simple)o; foldable = foldable && si.folds(); @@ -651,7 +662,7 @@ else if ( o instanceof CharSequence ) else throw new IllegalArgumentException( "arguments to Identifier.Simple.concat() must be " + - "Identifier.Simple or CharSequence"); + "Identifier.Simple (and not None) or CharSequence"); } if ( foldable ) @@ -669,6 +680,12 @@ else if ( o instanceof CharSequence ) * does not have the form of a regular identifier, or if it has that * form but does not match its pgFold-ed form (without quotes, PG * would have folded it in that case). + *

+ * The PostgreSQL catalogs can contain empty strings in some + * contexts where a name might not be provided (for example, when + * {@code pg_proc.proargnames} is present because some parameters + * have names but not all of them do). So this method can accept an + * empty string, returning the {@link None None} instance. * @param s name of the simple identifier, as found in a system * catalog. * @return an Identifier.Simple or subclass appropriate to the form @@ -676,6 +693,9 @@ else if ( o instanceof CharSequence ) */ public static Simple fromCatalog(String s) { + if ( "".equals(s) ) + return None.INSTANCE; + if ( PG_REGULAR_IDENTIFIER.matcher(s).matches() ) { if ( s.equals(Folding.pgFold(s)) ) @@ -866,6 +886,14 @@ private Simple(String nonFolded) m_nonFolded = nonFolded; } + /** + * Used only by {@link None} below. + */ + private Simple() + { + m_nonFolded = ""; + } + private static String checkLength(String s) { int cpc = s.codePointCount(0, s.length()); @@ -880,6 +908,8 @@ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); + if ( this instanceof None ) + return; String diag = checkLength(m_nonFolded); if ( null != diag ) throw new InvalidObjectException(diag); @@ -1107,6 +1137,81 @@ private Object readResolve() throws ObjectStreamException } } + /** + * What is the name of an unnamed parameter or column? + */ + public static final class None extends Simple + { + private static final long serialVersionUID = 1L; + + public static final None INSTANCE = new None(); + + /** + * A {@code None} identifier never equals anything. + */ + @Override + public boolean equals(Object other) + { + return false; + } + + /** + * True. + */ + @Override + public boolean isUnnamed() + { + return true; + } + + private None() + { + } + + private Object readResolve() throws ObjectStreamException + { + switch ( m_nonFolded ) + { + case "": return INSTANCE; + default: + throw new InvalidObjectException( + "not the string value of None: " + m_nonFolded); + } + } + + /** + * Returns this object if there are zero arguments; otherwise throws + * {@link IllegalArgumentException}. + */ + @Override + public Simple concat(Object... more) + { + if ( 0 == more.length ) + return this; + throw new IllegalArgumentException( + "may not concatenate anything to None"); + } + + /** + * Throws {@link UnsupportedOperationException}. + */ + @Override + public String deparse(Charset cs) + { + throw new UnsupportedOperationException( + "no valid deparse result for Identifier.Simple.None"); + } + + /** + * Returns the empty string. + */ + @Override + public String toString() + { + return ""; + } + } + /** * Class representing an Identifier that names a PostgreSQL operator. */ @@ -1511,6 +1616,10 @@ else if ( 0 != opStart ) private Qualified(Simple qualifier, T local) { + if ( qualifier instanceof None || local instanceof None ) + throw new IllegalArgumentException( + "no component of a qualified identifier may be None"); + m_qualifier = qualifier; m_local = requireNonNull(local); } @@ -1523,6 +1632,10 @@ private void readObject(ObjectInputStream in) throw new InvalidObjectException( "Identifier.Qualified deserialized with " + "null local part"); + if ( m_qualifier instanceof None || m_local instanceof None ) + throw new InvalidObjectException( + "Identifier.Qualified deserialized with None as " + + "a component"); } @Override diff --git a/pljava-api/src/test/java/CatalogTest.java b/pljava-api/src/test/java/CatalogTest.java new file mode 100644 index 000000000..15aea321f --- /dev/null +++ b/pljava-api/src/test/java/CatalogTest.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import org.postgresql.pljava.model.RegNamespace; + +public class CatalogTest +{ + public boolean whatbits(RegNamespace n) + { + return n.grants().stream().anyMatch( + g -> g.usageGranted() && g.createGranted() ); + } +} diff --git a/pljava-api/src/test/java/LexicalsTest.java b/pljava-api/src/test/java/LexicalsTest.java index 89b94d07f..c712ba9ab 100644 --- a/pljava-api/src/test/java/LexicalsTest.java +++ b/pljava-api/src/test/java/LexicalsTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2016-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -309,7 +309,9 @@ public void testIdentifierSerialization() throws Exception Operator.from("!@#%*"), null, - null + null, + + Simple.fromCatalog("") }; orig[5] = (( Simple )orig[2]).withQualifier((Simple)orig[1]); diff --git a/pljava-examples/pom.xml b/pljava-examples/pom.xml index bfa2c8e4f..1f4ab03bc 100644 --- a/pljava-examples/pom.xml +++ b/pljava-examples/pom.xml @@ -53,6 +53,7 @@ org/postgresql/pljava/example/*.java org/postgresql/pljava/example/annotation/*.java + org/postgresql/pljava/example/polyglot/*.java --processor-module-path @@ -192,7 +193,8 @@ function executeReport(report, locale) var packages = [ "org.postgresql.pljava.example", - "org.postgresql.pljava.example.annotation" + "org.postgresql.pljava.example.annotation", + "org.postgresql.pljava.example.polyglot" ]; if ( isProfileActive('saxon-examples') ) diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CatalogObjects.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CatalogObjects.java new file mode 100644 index 000000000..9e2087c8f --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CatalogObjects.java @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.lang.reflect.Method; +import static java.lang.reflect.Modifier.isPublic; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Statement; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import java.util.logging.Logger; +import java.util.logging.Level; +import static java.util.logging.Level.INFO; +import static java.util.logging.Level.WARNING; + +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.toList; +import java.util.stream.Stream; + +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.TargetList.Cursor; +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +import org.postgresql.pljava.model.CatalogObject; +import org.postgresql.pljava.model.CatalogObject.Addressed; +import org.postgresql.pljava.model.CatalogObject.Named; +import org.postgresql.pljava.model.Portal; +import static org.postgresql.pljava.model.Portal.ALL; +import static org.postgresql.pljava.model.Portal.Direction.FORWARD; +import org.postgresql.pljava.model.ProceduralLanguage; +import org.postgresql.pljava.model.RegClass; +import org.postgresql.pljava.model.RegClass.Known; +import org.postgresql.pljava.model.RegProcedure; +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.SlotTester; +import org.postgresql.pljava.model.Transform; +import org.postgresql.pljava.model.Trigger; +import org.postgresql.pljava.model.TupleTableSlot; + +/** + * A test that PL/Java's various {@link CatalogObject} implementations are + * usable. + *

+ * They rely on named attributes, in PostgreSQL's system catalogs, that are + * looked up at class initialization, so on a PostgreSQL version that may not + * supply all the expected attributes, the issue may not be detected until + * an affected {@code CatalogObject} subclass is first used. This test uses as + * many of them as it can. + */ +@SQLAction(requires="catalogClasses function", install= + "SELECT javatest.catalogClasses()" +) +@SQLAction(requires="catalogInval function", install= + "SELECT javatest.catalogInval()" +) +public class CatalogObjects { + static final Logger logr = Logger.getAnonymousLogger(); + + static void log(Level v, String m, Object... p) + { + logr.log(v, m, p); + } + + static final As CatObjAdapter; + static final As PrLangAdapter; + static final As RegClsAdapter; + static final As ,?> RegPrcAdapter; + static final As RegTypAdapter; + static final As TrnsfmAdapter; + + static + { + try + { + Connection conn = getConnection("jdbc:default:connection"); + + // Get access to the hacked-together interim testing API + SlotTester t = conn.unwrap(SlotTester.class); + + String cls = "org.postgresql.pljava.pg.adt.OidAdapter"; + + @SuppressWarnings("unchecked") Object _1 = + CatObjAdapter = + (As)t.adapterPlease(cls, "INSTANCE"); + @SuppressWarnings("unchecked") Object _2 = + PrLangAdapter = + (As)t.adapterPlease(cls,"PLANG_INSTANCE"); + RegClsAdapter = + (As)t.adapterPlease(cls, "REGCLASS_INSTANCE"); + RegPrcAdapter = + (As,?>)t.adapterPlease( + cls, "REGPROCEDURE_INSTANCE"); + RegTypAdapter = + (As)t.adapterPlease(cls, "REGTYPE_INSTANCE"); + TrnsfmAdapter = + (As)t.adapterPlease(cls, "TRANSFORM_INSTANCE"); + } + catch ( SQLException | ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + @Function(schema="javatest", provides="catalogInval function") + public static void catalogInval() throws SQLException + { + try ( + Connection conn = getConnection("jdbc:default:connection"); + Statement s = conn.createStatement(); + ) + { + SlotTester st = conn.unwrap(SlotTester.class); + CatalogObject.Addressed catObj; + String description1; + String description2; + boolean passing = true; + + s.executeUpdate("CREATE TABLE tbl_a ()"); + catObj = findObj(s, st, RegClsAdapter, + "SELECT CAST ('tbl_a' AS pg_catalog.regclass)"); + description1 = catObj.toString(); + s.executeUpdate("ALTER TABLE tbl_a RENAME TO tbl_b"); + description2 = catObj.toString(); + if ( ! description2.equals(description1.replace("tbl_a", "tbl_b")) ) + { + log(WARNING, "RegClass before/after rename: {0} / {1}", + description1, description2); + passing = false; + } + s.executeUpdate("DROP TABLE tbl_b"); + description1 = catObj.toString(); + if ( ! description2.matches("\\Q"+description1+"\\E(?<=]).*") ) + { + log(WARNING, "RegClass before/after drop: {1} / {0}", + description1, description2); + passing = false; + } + + s.executeQuery( + "SELECT sqlj.alias_java_language('lng_a', sandboxed => true)") + .next(); + catObj = findObj(s, st, PrLangAdapter, + "SELECT oid FROM pg_catalog.pg_language " + + "WHERE lanname OPERATOR(pg_catalog.=) 'lng_a'"); + description1 = catObj.toString(); + s.executeUpdate("ALTER LANGUAGE lng_a RENAME TO lng_b"); + description2 = catObj.toString(); + if ( ! description2.equals(description1.replace("lng_a", "lng_b")) ) + { + log(WARNING, + "ProceduralLanguage before/after rename: {0} / {1}", + description1, description2); + passing = false; + } + s.executeUpdate("DROP LANGUAGE lng_b"); + description1 = catObj.toString(); + if ( ! description2.matches("\\Q"+description1+"\\E(?<=]).*") ) + { + log(WARNING, "ProceduralLanguage before/after drop: {1} / {0}", + description1, description2); + passing = false; + } + + s.executeUpdate( + "CREATE FUNCTION fn_a() RETURNS INTEGER LANGUAGE SQL " + + "AS 'SELECT 1'"); + catObj = findObj(s, st, RegPrcAdapter, + "SELECT CAST ('fn_a()' AS pg_catalog.regprocedure)"); + description1 = catObj.toString(); + s.executeUpdate("ALTER FUNCTION fn_a RENAME TO fn_b"); + description2 = catObj.toString(); + if ( ! description2.equals(description1.replace("fn_a", "fn_b")) ) + { + log(WARNING, "RegProcedure before/after rename: {0} / {1}", + description1, description2); + passing = false; + } + s.executeUpdate("DROP FUNCTION fn_b"); + description1 = catObj.toString(); + if ( ! description2.matches("\\Q"+description1+"\\E(?<=]).*") ) + { + log(WARNING, "RegProcedure before/after drop: {1} / {0}", + description1, description2); + passing = false; + } + + s.executeUpdate("CREATE TYPE typ_a AS ()"); + catObj = findObj(s, st, RegTypAdapter, + "SELECT CAST ('typ_a' AS pg_catalog.regtype)"); + description1 = catObj.toString(); + s.executeUpdate("ALTER TYPE typ_a RENAME TO typ_b"); + description2 = catObj.toString(); + if ( ! description2.equals(description1.replace("typ_a", "typ_b")) ) + { + log(WARNING, "RegType before/after rename: {0} / {1}", + description1, description2); + passing = false; + } + s.executeUpdate("DROP TYPE typ_b"); + description1 = catObj.toString(); + if ( ! description2.matches("\\Q"+description1+"\\E(?<=]).*") ) + { + log(WARNING, "RegType before/after drop: {1} / {0}", + description1, description2); + passing = false; + } + + s.executeUpdate( // a completely bogus transform, don't use it! + "CREATE TRANSFORM FOR pg_catalog.circle LANGUAGE sql" + + " (FROM SQL WITH FUNCTION time_support)"); + catObj = findObj(s, st, TrnsfmAdapter, + "SELECT CAST (trf.oid AS pg_catalog.oid)" + + " FROM pg_catalog.pg_transform AS trf" + + " JOIN pg_catalog.pg_language AS lan ON trflang = lan.oid" + + " WHERE lanname = 'sql'" + + " AND trftype = CAST ('circle' AS pg_catalog.regtype)"); + boolean exists1 = catObj.exists(); + s.executeUpdate( + "DROP TRANSFORM FOR pg_catalog.circle LANGUAGE sql"); + boolean exists2 = catObj.exists(); + if ( exists2 ) + { + log(WARNING, "Transform.exists() before/after drop: {0} / {1}", + exists1, exists2); + passing = false; + } + + if ( passing ) + log(INFO, "selective invalidation ok"); + } + } + + private static > T findObj( + Statement s, SlotTester st, As adapter, String query) + throws SQLException + { + try ( + Portal p = st.unwrapAsPortal(s.executeQuery(query)) + ) + { + return + p.tupleDescriptor().applyOver(p.fetch(FORWARD, 1), c0 -> c0 + .stream() + .map(c -> c.apply(adapter, o -> o)) + .findFirst().get()); + } + } + + @Function(schema="javatest", provides="catalogClasses function") + public static void catalogClasses() throws SQLException + { + String catalogRelationsQuery = + "SELECT" + + " oid" + + " FROM" + + " pg_catalog.pg_class" + + " WHERE" + + " relnamespace = CAST ('pg_catalog' AS pg_catalog.regnamespace)" + + " AND" + + " relkind = 'r'"; + + try ( + Connection conn = getConnection("jdbc:default:connection"); + Statement s = conn.createStatement(); + ) + { + SlotTester st = conn.unwrap(SlotTester.class); + + List knownRegClasses; + + try ( + Portal p = + st.unwrapAsPortal(s.executeQuery(catalogRelationsQuery)) + ) + { + Projection proj = p.tupleDescriptor(); + List tups = p.fetch(FORWARD, ALL); + + Class knownCls = Known.class; + + knownRegClasses = + proj.applyOver(tups, c0 -> c0.stream() + .map(c -> c.apply(RegClsAdapter, regcls -> regcls)) + .filter(knownCls::isInstance) + .map(knownCls::cast) + .collect(toList()) + ); + } + + int passed = 0; + int untested = 0; + + for ( Known regc : knownRegClasses ) + { + String objectQuery = + "SELECT oid FROM " + regc.qualifiedName() + " LIMIT 1"; + + Class classUnderTest = null; + + try ( + Portal p = + st.unwrapAsPortal(s.executeQuery(objectQuery)) + ) + { + Projection proj = p.tupleDescriptor(); + List tups = p.fetch(FORWARD, ALL); + Optional cobj = + proj.applyOver(tups, c0 -> c0.stream() + .map(c -> c.apply(CatObjAdapter, o -> o)) + .findAny()); + + if ( ! cobj.isPresent() ) + { + log(INFO, + "database has no {0} objects " + + "for representation test", regc.name()); + ++ untested; + continue; + } + + Addressed aobj = cobj.get().of(regc); + + classUnderTest = aobj.getClass(); + + if ( aobj instanceof Named ) + { + if ( aobj instanceof Trigger ) // name() won't work here + aobj.exists(); + else + ((Named)aobj).name(); + ++ passed; + continue; + } + + log(INFO, + "{0} untested, not instance of Named " + + "(does implement {1})", + classUnderTest.getCanonicalName().substring( + 1 + classUnderTest.getPackageName().length()), + Arrays.stream(classUnderTest.getInterfaces()) + .map(Class::getSimpleName) + .collect(joining(", ")) + ); + ++ untested; + } + catch ( LinkageError e ) + { + Throwable t = e.getCause(); + if ( null == t ) + t = e; + log(WARNING, + "{0} failed initialization: {1}", + classUnderTest.getName().substring( + 1 + classUnderTest.getPackageName().length()), + t.getMessage()); + } + } + + log((knownRegClasses.size() == passed + untested)? INFO : WARNING, + "of {0} catalog representations, {1} worked " + + "and {2} could not be tested", + knownRegClasses.size(), passed, untested); + } + } + + private static boolean engulfs(Class a, Class b) + { + return a.isAssignableFrom(b) || a == b.getDeclaringClass(); + } + + static final Comparator> + partialByEngulfs = (a,b) -> engulfs(a,b) ? 1 : engulfs(b,a) ? -1 : 0; + + /** + * Given a PostgreSQL classid and objid, obtains the corresponding Java + * CatalogObject, then finds the no-parameter, non-void-returning methods + * of all the CatalogObject interfaces it implements, and returns a table + * with the results of calling those methods. + */ + @Function( + schema="javatest", + out={ "interface text", "method text", "result text", "exception text" } + ) + public static ResultSetProvider catalogIntrospect( + @SQLType("regclass") CatalogObject cls, CatalogObject obj) + throws SQLException + { + cls = cls.of(RegClass.CLASSID); + if ( ! ( cls instanceof Known ) ) + throw new SQLException( + "Not a supported known catalog class: " + cls); + + Known kcls = (Known)cls; + Addressed aobj = obj.of(kcls); + + Class clazz = aobj.getClass(); + + Stream s = + Stream.iterate( + (new Class[] { clazz }), (a -> 0 < a.length), a -> + ( + Arrays.stream(a) + .flatMap(c -> + Stream.concat( + (c.isInterface() ? + Stream.of() : Stream.of(c.getSuperclass())), + Arrays.stream(c.getInterfaces()) + ) + ) + .filter(Objects::nonNull) + .toArray(Class[]::new) + ) + ) + .flatMap(Arrays::stream) + .filter(c -> c.isInterface() && engulfs(CatalogObject.class, c)) + .sorted(partialByEngulfs.thenComparing(Class::getSimpleName)) + .distinct() + .filter(i -> CatalogObject.class.getModule().equals(i.getModule())) + .filter(i -> isPublic(i.getModifiers())) + .flatMap(i -> + { + return Arrays.stream(i.getMethods()) + .filter(m -> i == m.getDeclaringClass()); + }) + .filter(m -> void.class != m.getReturnType()) + .filter(m -> 0 == m.getParameterCount()) + .filter(m -> ! (m.isSynthetic())); + + Iterator itr = s.iterator(); + + return new ResultSetProvider.Large() + { + @Override public boolean assignRowValues(ResultSet r, long rownum) + throws SQLException + { + if ( ! itr.hasNext() ) + return false; + + Method m = itr.next(); + r.updateString(1, m.getDeclaringClass().getSimpleName()); + r.updateString(2, m.getName()); + + try + { + Object v = m.invoke(aobj); + String text; + if ( v instanceof SQLXML ) + text = ((SQLXML)v).getString(); + else + text = Objects.toString(v); + r.updateString(3, text); + } + catch ( Throwable t ) + { + String s = + Stream.iterate(t, Objects::nonNull, Throwable::getCause) + .dropWhile( + ReflectiveOperationException.class::isInstance) + .map(Object::toString) + .collect(joining("\n")); + r.updateString(4, s); + } + + return true; + } + + @Override public void close() { s.close(); } + }; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CharsetEncodings.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CharsetEncodings.java new file mode 100644 index 000000000..dfed6caa7 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CharsetEncodings.java @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.nio.charset.Charset; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import java.util.Iterator; + +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.Function; +import static + org.postgresql.pljava.annotation.Function.OnNullInput.RETURNS_NULL; +import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; +import org.postgresql.pljava.annotation.SQLAction; + +import org.postgresql.pljava.model.CharsetEncoding; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; +import static org.postgresql.pljava.model.CharsetEncoding.clientEncoding; + +/** + * Example using the {@link CharsetEncoding CharsetEncoding} interface. + */ +public class CharsetEncodings implements ResultSetProvider.Large +{ + /** + * Enumerate PostgreSQL's known character set encodings, indicating for + * each one whether it is the server encoding, whether it's the client + * encoding, its PostgreSQL name, its corresponding Java + * {@link Charset Charset} name, and the Java module that provides it. + */ + @Function( + schema = "javatest", + out = { + "server boolean", "client boolean", "server_usable boolean", + "ordinal int", "pg_name text", "icu_name text", + "java_name text", "module text" + } + ) + public static ResultSetProvider charsets() + { + return new CharsetEncodings(); + } + + /** + * Enumerate Java's known character set encodings, trying to map them to + * PostgreSQL encodings, and indicating for + * each one whether it is the server encoding, whether it's the client + * encoding, its PostgreSQL name, its corresponding Java + * {@link Charset Charset} name, and the Java module that provides it. + */ + @Function( + schema = "javatest", + out = { + "server boolean", "client boolean", "server_usable boolean", + "ordinal int", "pg_name text", "icu_name text", + "java_name text", "module text" + } + ) + public static ResultSetProvider java_charsets(boolean try_aliases) + { + return new JavaEncodings(try_aliases); + } + + @Override + public void close() + { + } + + @Override + public boolean assignRowValues(ResultSet receiver, long currentRow) + throws SQLException + { + /* + * Shamelessly exploit the fact that currentRow will be passed as + * consecutive values starting at zero and that's the same way PG + * encodings are numbered. + */ + + CharsetEncoding cse; + + try + { + cse = CharsetEncoding.fromOrdinal((int)currentRow); + } + catch ( IllegalArgumentException e ) + { + return false; + } + + if ( SERVER_ENCODING == cse ) + receiver.updateBoolean("server", true); + if ( clientEncoding() == cse ) + receiver.updateBoolean("client", true); + if ( cse.usableOnServer() ) + receiver.updateBoolean("server_usable", true); + receiver.updateInt("ordinal", cse.ordinal()); + receiver.updateString("pg_name", cse.name()); + receiver.updateString("icu_name", cse.icuName()); + + Charset cs = cse.charset(); + if ( null == cs ) + return true; + + receiver.updateString("java_name", cs.name()); + receiver.updateString("module", cs.getClass().getModule().getName()); + + return true; + } + + static class JavaEncodings implements ResultSetProvider.Large + { + final Iterator iter = + Charset.availableCharsets().values().iterator(); + final boolean tryAliases; + + JavaEncodings(boolean tryAliases) + { + this.tryAliases = tryAliases; + } + + @Override + public void close() + { + } + + @Override + public boolean assignRowValues(ResultSet receiver, long currentRow) + throws SQLException + { + if ( ! iter.hasNext() ) + return false; + + Charset cs = iter.next(); + + receiver.updateString("java_name", cs.name()); + receiver.updateString("module", + cs.getClass().getModule().getName()); + + CharsetEncoding cse = null; + + try + { + cse = CharsetEncoding.fromName(cs.name()); + } + catch ( IllegalArgumentException e ) + { + } + + /* + * If the canonical Java name didn't match up with a PG encoding, + * try the first match found for any of the Java charset's aliases. + * This is not an especially dependable idea: the aliases are a Set, + * so they don't enumerate in a reproducible order, and some Java + * aliases are PG aliases for different charsets. + */ + if ( null == cse && tryAliases ) + { + for ( String alias : cs.aliases() ) + { + try + { + cse = CharsetEncoding.fromName(alias); + break; + } + catch ( IllegalArgumentException e ) + { + } + } + } + + if ( null == cse ) + return true; + + if ( SERVER_ENCODING == cse ) + receiver.updateBoolean("server", true); + if ( clientEncoding() == cse ) + receiver.updateBoolean("client", true); + if ( cse.usableOnServer() ) + receiver.updateBoolean("server_usable", true); + receiver.updateInt("ordinal", cse.ordinal()); + receiver.updateString("pg_name", cse.name()); + receiver.updateString("icu_name", cse.icuName()); + + return true; + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MemoryContexts.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MemoryContexts.java new file mode 100644 index 000000000..2e6be6496 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MemoryContexts.java @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.stream.Stream; + +import org.postgresql.pljava.ResultSetProvider; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Functions to check that allocations are being made in the "upper" memory + * context as necessary when SPI has been used. + */ +public class MemoryContexts { + private MemoryContexts() + { + } + + private static Connection ensureSPIConnected() throws SQLException + { + Connection c = getConnection("jdbc:default:connection"); + try ( Statement s = c.createStatement() ) + { + s.execute("UPDATE javatest.foobar_1 SET stuff = 'a' WHERE FALSE"); + } + return c; + } + + /** + * Return an array result after connecting SPI, to ensure the result isn't + * allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest") + public static String[] nonSetArrayResult() throws SQLException + { + ensureSPIConnected(); + return new String[] { "Hello", "world" }; + } + + /** + * Return a coerced result after connecting SPI, to ensure the result isn't + * allocated in SPI's short-lived memory context. + *

+ * The mismatch of the Java type {@code int} and the PostgreSQL type + * {@code numeric} forces PL/Java to create a {@code Coerce} node applying + * a cast, the correct allocation of which is tested here. + */ + @Function(schema = "javatest", type = "numeric") + public static int nonSetCoercedResult() throws SQLException + { + ensureSPIConnected(); + return 42; + } + + /** + * Return a composite result after connecting SPI, to ensure the result + * isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest", out = { "a text", "b text" }) + public static boolean nonSetCompositeResult(ResultSet out) + throws SQLException + { + ensureSPIConnected(); + out.updateString(1, "Hello"); + out.updateString(2, "world"); + return true; + } + + /** + * Return a fixed-length base UDT result after connecting SPI, to ensure + * the result isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest") + public static ComplexScalar nonSetFixedUDTResult() throws SQLException + { + ensureSPIConnected(); + return new ComplexScalar(1.2, 3.4, "javatest.complexscalar"); + } + + /** + * Return a composite UDT result after connecting SPI, to ensure + * the result isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest") + public static ComplexTuple nonSetCompositeUDTResult() throws SQLException + { + Connection c = ensureSPIConnected(); + try ( + Statement s = c.createStatement(); + ResultSet r = s.executeQuery( + "SELECT CAST ( '(1.2,3.4)' AS javatest.complextuple )") + ) + { + r.next(); + return r.getObject(1, ComplexTuple.class); + } + } + + /** + * Return a set-of (non-composite) result after connecting SPI, to ensure + * the result isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest") + public static Iterator setNonCompositeResult() + { + final Iterator it = Stream.of("a", "b", "c").iterator(); + return new Iterator<>() + { + @Override + public boolean hasNext() + { + try + { + ensureSPIConnected(); + return it.hasNext(); + } + catch ( SQLException e ) + { + throw new RuntimeException(e.getMessage(), e); + } + } + + @Override + public String next() + { + try + { + ensureSPIConnected(); + return it.next(); + } + catch ( SQLException e ) + { + throw new RuntimeException(e.getMessage(), e); + } + } + }; + } + + /** + * Return a set-of composite result after connecting SPI, to ensure + * the result isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest", out = {"a text", "b text"}) + public static ResultSetProvider setCompositeResult() + { + return new ResultSetProvider.Large() + { + @Override + public boolean assignRowValues(ResultSet out, long currentRow) + throws SQLException + { + ensureSPIConnected(); + if ( currentRow > 2 ) + return false; + out.updateString(1, "a"); + out.updateString(2, "b"); + return true; + } + + @Override + public void close() + { + } + }; + } + + /** + * Prepare a statement after connecting SPI and use it later, to ensure + * important allocations are not in SPI's short-lived memory context. + */ + @Function(schema = "javatest", out = {"a text", "b text"}) + public static ResultSetProvider preparedStatementContext() + throws SQLException + { + Connection c = ensureSPIConnected(); + final PreparedStatement ps = c.prepareStatement( + "SELECT " + + " to_char( " + + " extract(microseconds FROM statement_timestamp()) % 3999, " + + " ?)"); + ps.setString(1, "RN"); + + return new ResultSetProvider.Large() + { + @Override + public boolean assignRowValues(ResultSet out, long currentRow) + throws SQLException + { + ensureSPIConnected(); + if ( currentRow > 2 ) + return false; + try ( ResultSet rs = ps.executeQuery() ) + { + rs.next(); + out.updateString(1, rs.getString(1)); + ps.setString(1, "RN"); + return true; + } + } + + @Override + public void close() + { + } + }; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java index d6dd14bfc..f6be420a8 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java @@ -67,12 +67,13 @@ import org.postgresql.pljava.Adjusting; import static org.postgresql.pljava.Adjusting.XML.setFirstSupported; -import org.postgresql.pljava.SessionManager; import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.MappedUDT; import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLType; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; + import static org.postgresql.pljava.example.LoggerTest.logMessage; /* Imports needed just for the SAX flavor of "low-level XML echo" below */ @@ -643,9 +644,7 @@ public static SQLXML transformXML( * for setting the Transformer to use the server encoding. */ if ( rlt instanceof StreamResult ) - t.setOutputProperty(ENCODING, - SessionManager.current().frozenSystemProperties() - .getProperty("org.postgresql.server.encoding")); + t.setOutputProperty(ENCODING, SERVER_ENCODING.charset().name()); else if ( Boolean.TRUE.equals(indent) ) logMessage("WARNING", "indent requested, but howout specifies a non-stream " + @@ -713,9 +712,7 @@ private static SQLXML echoSQLXML(SQLXML sx, int howin, int howout) * for setting the Transformer to use the server encoding. */ if ( howout < 5 ) - t.setOutputProperty(ENCODING, - SessionManager.current().frozenSystemProperties() - .getProperty("org.postgresql.server.encoding")); + t.setOutputProperty(ENCODING, SERVER_ENCODING.charset().name()); t.transform(src, rlt); } catch ( TransformerException te ) diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java index 49abf1382..6b814b03a 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java @@ -28,7 +28,7 @@ * to the {@link #executeSelect executeSelect} function. */ @SQLAction(requires="selecttorecords fn", -install= +install={ " SELECT " + " CASE WHEN r IS DISTINCT FROM ROW('Foo'::varchar, 1::integer, 1.5::float, " + " 23.67::decimal(8,2), '2005-06-01'::date, '20:56'::time, " + @@ -41,8 +41,20 @@ " 'select ''Foo'', 1, 1.5::float, 23.67, ''2005-06-01'', " + " ''20:56''::time, ''192.168.0''') " + " AS r(t_varchar varchar, t_integer integer, t_float float, " + -" t_decimal decimal(8,2), t_date date, t_time time, t_cidr cidr)" -) +" t_decimal decimal(8,2), t_date date, t_time time, t_cidr cidr)", + +" SELECT " + +" CASE WHEN every(a IS NOT DISTINCT FROM b) " + +" THEN javatest.logmessage('INFO', 'nested/SPI SetOfRecordTest ok') " + +" ELSE javatest.logmessage('WARNING', 'nested/SPI SetOfRecordTest not ok') " + +" END " + +" FROM " + +" javatest.executeselecttorecords('" + +" SELECT " + +" javatest.executeselect(''select generate_series(1,1)''), " + +" javatest.executeselect(''select generate_series(1,1)'') " + +" ') AS t(a text, b text)" +}) public class SetOfRecordTest implements ResultSetHandle { @Function(schema="javatest", name="executeselecttorecords", diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TupleTableSlotTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TupleTableSlotTest.java new file mode 100644 index 000000000..deed3d3c3 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TupleTableSlotTest.java @@ -0,0 +1,768 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import java.util.ArrayList; +import java.util.Arrays; +import static java.util.Arrays.deepToString; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import java.time.LocalDateTime; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.AdapterException;//for now; not planned API +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsBoolean; +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.TargetList; +import org.postgresql.pljava.TargetList.Cursor; +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.Function; +import static + org.postgresql.pljava.annotation.Function.OnNullInput.RETURNS_NULL; +import org.postgresql.pljava.annotation.SQLAction; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.Portal; +import static org.postgresql.pljava.model.Portal.ALL; +import static org.postgresql.pljava.model.Portal.Direction.FORWARD; +import org.postgresql.pljava.model.SlotTester; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +/** + * A temporary test jig during TupleTableSlot development; intended + * to be used from a debugger. + */ +@SQLAction(requires = "modelToJDBC", install = +"WITH" + +" result AS (" + +" SELECT" + +" * " + +" FROM" + +" javatest.modelToJDBC(" + +" 'SELECT DISTINCT" + +" CAST ( relacl AS text ), relacl" + +" FROM" + +" pg_class" + +" WHERE" + +" relacl IS NOT NULL'," + +" 'org.postgresql.pljava.pg.adt.TextAdapter', 'INSTANCE'," + +" 'org.postgresql.pljava.pg.adt.GrantAdapter', 'LIST_INSTANCE'" + +" ) AS r(raw text, cooked text)" + +" )," + +" conformed AS (" + +" SELECT" + +" raw, translate(cooked, '[] ', '{}') AS cooked" + +" FROM" + +" result" + +" )" + +" SELECT" + +" CASE WHEN every(raw = cooked)" + +" THEN javatest.logmessage('INFO', 'AclItem[] ok')" + +" ELSE javatest.logmessage('WARNING', 'AclItem[] ng')" + +" END" + +" FROM" + +" conformed" +) +@SQLAction(requires = "modelToJDBC", install = +"WITH" + +" result AS (" + +" SELECT" + +" raw, cooked, CAST ( cooked AS numeric ) AS refried" + +" FROM" + +" javatest.modeltojdbc(" + +" 'SELECT" + +" CAST ( pow AS text ) AS txt, pow AS bin" + +" FROM" + +" generate_series(-20., 20., 1.) AS gs(p)," + +" (VALUES (1e-16), (1e-65)) AS pf(f)," + +" (VALUES (1.), (-1.)) AS sf(sgn)," + +" LATERAL (SELECT sgn*(37.821637 ^ (p + f))) AS s(pow)'," + +" 'org.postgresql.pljava.pg.adt.TextAdapter', 'INSTANCE'," + +" 'org.postgresql.pljava.pg.adt.NumericAdapter', 'BIGDECIMAL_INSTANCE'" + +" ) AS j(raw text, cooked text)" + +" )" + +" SELECT" + +" CASE WHEN every(raw = cooked OR raw = CAST ( refried AS text ))" + +" THEN javatest.logmessage('INFO', 'NUMERIC ok')" + +" ELSE javatest.logmessage('WARNING', 'NUMERIC ng')" + +" END" + +" FROM" + +" result" +) +@SQLAction(requires = "tupleTableSlotTest", install = +"SELECT" + +" javatest.tupletableslottest(" + +" 'SELECT most_common_vals FROM pg_catalog.pg_stats'," + +" 'org.postgresql.pljava.pg.adt.ArrayAdapter', 'TYPE_OBTAINING_INSTANCE')" +) +public class TupleTableSlotTest +{ + /* + * Collect some Adapter instances that are going to be useful in the code + * below. Is it necessary they be static final? No, they can be obtained at + * any time, but collecting these here will keep the example methods tidier + * below. + * + * These are "leaf" adapters: they work from the PostgreSQL types directly. + */ + static final AsLong < ?> INT8; + static final AsInt < ?> INT4; + static final AsShort < ?> INT2; + static final AsByte < ?> INT1; + static final AsDouble < ?> FLOAT8; + static final AsFloat < ?> FLOAT4; + static final AsBoolean< ?> BOOL; + + static final As TEXT; + static final As LDT; // for the PostgreSQL TIMESTAMP type + + /* + * Now some adapters that can be derived from leaf adapters by composing + * non-leaf adapters over them. + * + * By default, the Adapters for primitive types can't fetch a null + * value. There is no value in the primitive's value space that could + * unambiguously represent null, and a DBMS should not go and reuse an + * otherwise-valid value to also mean null, if you haven't said to. But in + * a case where that is what you want, it is simple to write an adapter with + * the wanted behavior and compose it over the original one. + */ + static final AsDouble F8_NaN; // primitive double using NaN for null + + /* + * Reference-typed adapters have no trouble with null values by default; + * they'll just produce Java null. But suppose it is more convenient to get + * an Optional instead of a LocalDateTime that might be null. + * An Adapter for that can be obtained by composition. + */ + static final As,?> LDT_O; + + /* + * A composing adapter expecting a reference type can also be composed + * over one that produces a primitive type. It will see the values + * automatically boxed. + * + * Corollary: should the desired behavior be not to produce Optional, + * but simply to enable null handling for a primitive type by producing + * its boxed form or null, just one absolutely trivial composing adapter + * could add that behavior over any primitive adapter. + */ + static final As ,?> INT8_O; + + /* + * Once properly-typed adapters for component types are in hand, + * getting properly-typed array adapters is straightforward. (In Java 10+, + * a person might prefer to set these up at run time in local variables, + * where var could be used instead of these longwinded declarations.) + * + * For fun, I8x1 will be built over INT8_O, so it will really produce + * Optional[] instead of long[]. F8x5 will be built over F8_NaN, so it + * will produce double[][][][][], but null elements won't be rejected, + * and will appear as NaN. DTx2 will be built over LDT_O, so it will really + * produce Optional[][]. + */ + static final As[] ,?> I8x1; + static final As< int[][] ,?> I4x2; + static final As< short[][][] ,?> I2x3; + static final As< byte[][][][] ,?> I1x4; + static final As< double[][][][][] ,?> F8x5; + static final As< float[][][][][][] ,?> F4x6; + static final As< boolean[][][][][] ,?> Bx5; + static final As[][],?> DTx2; + + static + { + /* + * This is the very untidy part, while the planned Adapter manager API + * is not yet implemented. The extremely temporary adapterPlease method + * can be used to grovel some adapters out of PL/Java's innards, as long + * as the name of a class and a static final field is known. + * + * The adapter manager will have generic methods to obtain adapters with + * specific compile-time types. The adapterPlease method, not so much. + * It needs to be used with ugly casts. + */ + try + { + Connection conn = getConnection("jdbc:default:connection"); + SlotTester t = conn.unwrap(SlotTester.class); + + String cls = "org.postgresql.pljava.pg.adt.Primitives"; + INT8 = (AsLong )t.adapterPlease(cls, "INT8_INSTANCE"); + INT4 = (AsInt )t.adapterPlease(cls, "INT4_INSTANCE"); + INT2 = (AsShort )t.adapterPlease(cls, "INT2_INSTANCE"); + INT1 = (AsByte )t.adapterPlease(cls, "INT1_INSTANCE"); + FLOAT8 = (AsDouble )t.adapterPlease(cls, "FLOAT8_INSTANCE"); + FLOAT4 = (AsFloat )t.adapterPlease(cls, "FLOAT4_INSTANCE"); + BOOL = (AsBoolean)t.adapterPlease(cls, "BOOLEAN_INSTANCE"); + + cls = "org.postgresql.pljava.pg.adt.TextAdapter"; + + /* + * SuppressWarnings must appear on a declaration, making it hard to + * apply here, an initial assignment to a final field declared + * earlier. But making this the declaration of a new local variable, + * with the actual wanted assignment as a "side effect", works. + * (The "unnamed variable" _ previewed in Java 21 would be ideal.) + */ + @SuppressWarnings("unchecked") Object _1 = + TEXT = (As)t.adapterPlease(cls, "INSTANCE"); + + cls = "org.postgresql.pljava.pg.adt.DateTimeAdapter$JSR310"; + + @SuppressWarnings("unchecked") Object _2 = + LDT = + (As)t.adapterPlease(cls, "TIMESTAMP_INSTANCE"); + } + catch ( SQLException | ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + + /* + * Other than those stopgap uses of adapterPlease, the rest is + * not so bad. Instantiate some composing adapters over the leaf + * adapters already obtained: + */ + + F8_NaN = new NullReplacingDouble(FLOAT8, Double.NaN); + LDT_O = new AsOptional<>(LDT); + INT8_O = new AsOptional<>(INT8); + + /* + * (Those composing adapters should be provided by PL/Java and known + * to the adapter manager so it can compose them for you. For now, + * they are just defined in this example file, showing that client + * code can easily supply its own.) + * + * Java array-of-array adapters of various dimensionalities are + * easily built from the adapters chosen for their component types. + */ + + I8x1 = INT8_O .a1() .build(); // array of Optional + I4x2 = INT4 .a2() .build(); + I2x3 = INT2 .a2() .a1() .build(); + I1x4 = INT1 .a4() .build(); + F8x5 = F8_NaN .a4() .a1() .build(); // 5D F8 array, null <-> NaN + F4x6 = FLOAT4 .a4() .a2() .build(); + Bx5 = BOOL .a4() .a1() .build(); + DTx2 = LDT_O .a2() .build(); // 2D of optional LDT + } + + /** + * Test {@link TargetList} and its functional API for retrieving values. + */ + @Function(schema="javatest") + public static Iterator targetListTest() + throws SQLException, ReflectiveOperationException + { + try ( + Connection conn = getConnection("jdbc:default:connection"); + Statement s = conn.createStatement(); + ) + { + SlotTester t = conn.unwrap(SlotTester.class); + + String query = + "SELECT" + + " to_char(stamp, 'DAY') AS day," + + " stamp" + + " FROM" + + " generate_series(" + + " timestamp 'epoch', timestamp 'epoch' + interval 'P6D'," + + " interval 'P1D'" + + " ) AS s(stamp)"; + + try ( Portal p = t.unwrapAsPortal(s.executeQuery(query)) ) + { + Projection proj = p.tupleDescriptor(); + + /* + * A quick glance shows this project(...) to be unneeded, as the + * query's TupleDescriptor already has exactly these columns in + * this order, and could be used below directly. On the other + * hand, this line will keep things working if someone later + * changes the query, reordering these columns or adding + * to them, and it may give a more explanatory exception if + * a change to the query does away with an expected column. + */ + proj = proj.project("day", "stamp"); + + List fetched = p.fetch(FORWARD, ALL); + + List results = new ArrayList<>(); + + proj.applyOver(fetched, c -> + { + /* + * This loop demonstrates a straightforward use of two + * Adapters and a lambda with two parameters to go through + * the retrieved rows. + * + * Note that applyOver does not, itself, iterate over the + * rows; it supplies a Cursor object that can be iterated to + * do that. This gives the lambda body of applyOver more + * control over how that will happen. + * + * The Cursor object is mutated during iteration so the + * same object represents each row in turn; the iteration + * variable is simply the Cursor object itself, so does not + * need to be used. Once the "unnamed variable" _ is more + * widely available (Java 21 has it, with --enable-preview), + * it will be the obvious choice for the iteration variable + * here. + * + * Within the loop, the cursor represents the single current + * row as far as its apply(...) methods are concerned. + * + * Other patterns, such as the streams API, can also be used + * (starting with a stream of the cursor object itself, + * again for each row), but can involve more fuss when + * checked exceptions are involved. + */ + for ( Cursor __ : c ) + { + c.apply(TEXT, LDT, // the adapters + ( v0, v1 ) -> // the fetched values + results.add(v0 + " | " + v1.getDayOfWeek()) + ); + } + + /* + * This equivalent loop uses two lambdas in curried style + * to do the same processing of the same two columns. That + * serves no practical need in this example; a perfectly + * good method signature for two reference columns was seen + * above. This loop illustrates the technique for combining + * the available methods when there isn't one that exactly + * fits the number and types of the target columns. + */ + for ( Cursor __ : c ) + { + c.apply(TEXT, + v0 -> + c.apply(LDT, + v1 -> + results.add(v0 + " | " + v1.getDayOfWeek()) + ) + ); + } + + return null; + }); + + return results.iterator(); + } + } + } + + /** + * Test retrieval of a PostgreSQL array as a multidimensional Java array. + */ + @Function(schema="javatest") + public static Iterator javaMultiArrayTest() + throws SQLException, ReflectiveOperationException + { + Connection conn = getConnection("jdbc:default:connection"); + SlotTester t = conn.unwrap(SlotTester.class); + + String query = + "VALUES (" + + " CAST ( '{1,2}' AS int8 [] ), " + + " CAST ( '{{1},{2}}' AS int4 [] ), " + + " CAST ( '{{{1,2,3}}}' AS int2 [] ), " + + " CAST ( '{{{{1},{2},{3}}}}' AS \"char\" [] ), " + // ASCII + " CAST ( '{{{{{1,2,3}}}}}' AS float8 [] ), " + + " CAST ( '{{{{{{1},{2},{3}}}}}}' AS float4 [] ), " + + " CAST ( '{{{{{t},{f},{t}}}}}' AS boolean [] ), " + + " CAST ( '{{''epoch''}}' AS timestamp [] ) " + + "), (" + + " '{NULL}', NULL, NULL, NULL, '{{{{{1,NULL,3}}}}}', NULL, NULL," + + " '{{NULL}}'" + + ")"; + + Portal p = t.unwrapAsPortal(conn.createStatement().executeQuery(query)); + Projection proj = p.tupleDescriptor(); + + List tups = p.fetch(FORWARD, ALL); + + List result = new ArrayList<>(); + + /* + * Then just use the right adapter for each column. + */ + proj.applyOver(tups, c -> + { + for ( Cursor __ : c ) + { + c.apply(I8x1, I4x2, I2x3, I1x4, F8x5, F4x6, Bx5, DTx2, + ( v0, v1, v2, v3, v4, v5, v6, v7 ) -> + result.addAll(List.of( + Arrays.toString(v0), deepToString(v1), deepToString(v2), + deepToString(v3), deepToString(v4), deepToString(v5), + deepToString(v6), deepToString(v7), + v7[0][0].orElse(LocalDateTime.MAX).getMonth() + "" + )) + ); + } + return null; + }); + + return result.iterator(); + } + + /** + * An adapter to compose over another one, adding some wanted behavior. + * + * There should eventually be a built-in set of composing adapters like + * this available for ready use, and automatically composed for you by an + * adapter manager when you say "I want an adapter for this PG type to this + * Java type and behaving this way." + * + * Until then, let this illustrate the simplicity of writing one. + */ + public static class NullReplacingDouble extends AsDouble + { + private final double replacement; + + @Override + public boolean canFetchNull() { return true; } + + @Override + public double fetchNull(Attribute a) + { + return replacement; + } + + // It would be nice to let this method be omitted and this behavior + // assumed, in a composing adapter with the same type for return and + // parameter. Maybe someday. + public double adapt(Attribute a, double value) + { + return value; + } + + private static final Adapter.Configuration config = + Adapter.configure(NullReplacingDouble.class, null); + + NullReplacingDouble(AsDouble over, double valueForNull) + { + super(config, over); + replacement = valueForNull; + } + } + + /** + * Another example of a useful composing adapter that should eventually be + * part of a built-in set. + */ + public static class AsOptional extends As,T> + { + // canFetchNull isn't needed; its default in As is true. + + @Override + public Optional fetchNull(Attribute a) + { + return Optional.empty(); + } + + public Optional adapt(Attribute a, T value) + { + return Optional.of(value); + } + + private static final Adapter.Configuration config = + Adapter.configure(AsOptional.class, null); + + /* + * This adapter may be composed over any Adapter, including those + * of primitive types as well as the reference-typed As. When + * constructed over a primitive-returning adapter, values will be boxed + * when passed to adapt(). + */ + AsOptional(Adapter over) + { + super(config, over, null); + } + } + + /** + * A surprisingly useful composing adapter that should eventually be + * part of a built-in set. + *

+ * Surprisingly useful, because although it "does" nothing, composing it + * over any primitive adapter produces one that returns the boxed form, and + * Java null for SQL null. + */ + public static class Identity extends As + { + // the inherited fetchNull returns null, which is just right + + public T adapt(Attribute a, T value) + { + return value; + } + + private static final Adapter.Configuration config = + Adapter.configure(Identity.class, null); + + /* + * Another choice could be to restrict 'over' to extend Primitive, as + * there isn't much point composing this adapter over one of reference + * type ... unless you want Java null for SQL null and the 'over' + * adapter produces something else. + */ + Identity(Adapter over) + { + super(config, over, null); + } + } + + /** + * Test retrieving results from a query using the PG-model API and returning + * them to the caller using the legacy JDBC API. + * @param query a query producing some number of columns + * @param adapters an array of strings, twice the number of columns, + * supplying a class name and static field name for the ugly temporary + * {@code adapterPlease} method, one such pair for each result column + */ + @Function( + schema = "javatest", type = "pg_catalog.record", variadic = true, + onNullInput = RETURNS_NULL, provides = "modelToJDBC" + ) + public static ResultSetProvider modelToJDBC(String query, String[] adapters) + throws SQLException, ReflectiveOperationException + { + Connection conn = getConnection("jdbc:default:connection"); + SlotTester t = conn.unwrap(SlotTester.class); + Portal p = t.unwrapAsPortal(conn.createStatement().executeQuery(query)); + TupleDescriptor td = p.tupleDescriptor(); + + if ( adapters.length != 2 * td.size() ) + throw new SQLException(String.format( + "query makes %d columns so 'adapters' should have %d " + + "elements, not %d", td.size(), 2*td.size(), adapters.length)); + + if ( Arrays.stream(adapters).anyMatch(Objects::isNull) ) + throw new SQLException("adapters array has null element"); + + As[] resolved = new As[ td.size() ]; + + for ( int i = 0 ; i < resolved.length ; ++ i ) + { + Adapter a = + t.adapterPlease(adapters[i<<1], adapters[(i<<1) + 1]); + if ( a instanceof As ) + resolved[i] = (As)a; + else + resolved[i] = new Identity(a); + } + + return new ResultSetProvider.Large() + { + @Override + public boolean assignRowValues(ResultSet out, long currentRow) + throws SQLException + { + if ( 0 == currentRow ) + { + int rcols = out.getMetaData().getColumnCount(); + if ( td.size() != rcols ) + throw new SQLException(String.format( + "query makes %d columns but result descriptor " + + "has %d", td.size(), rcols)); + } + + /* + * This example will fetch one tuple at a time here in the + * ResultSetProvider. This is a low-level interface to Postgres. + * In the SFRM_ValuePerCall protocol that ResultSetProvider + * supports, a fresh call from Postgres is made to retrieve each + * row. The Portal lives in a memory context that persists + * across the multiple calls, but the fetch result tups only + * exist in a child of the SPI context set up for each call. + * So here we only fetch as many tups as we can use to make one + * result row. + * + * If the logic involved fetching a bunch of rows and processing + * those into Java representations with no further dependence on + * the native tuples, then of course that could be done all in + * advance. + */ + List tups = p.fetch(FORWARD, 1); + if ( 0 == tups.size() ) + return false; + + TupleTableSlot tts = tups.get(0); + + for ( int i = 0 ; i < resolved.length ; ++ i ) + { + Object o = tts.get(i, resolved[i]); + try + { + out.updateObject(1 + i, o); + } + catch ( SQLException e ) + { + try + { + out.updateObject(1 + i, o.toString()); + } + catch ( SQLException e2 ) + { + e.addSuppressed(e2); + throw e; + } + } + } + + return true; + } + + @Override + public void close() + { + p.close(); + } + }; + } + + /** + * A temporary test jig during TupleTableSlot development; intended + * to be used from a debugger. + */ + @Function(schema="javatest", provides="tupleTableSlotTest") + public static void tupleTableSlotTest( + String query, String adpClass, String adpInstance) + throws SQLException, ReflectiveOperationException + { + new TupleTableSlotTest().testWith(query, adpClass, adpInstance); + } + + As adpL; + AsLong adpJ; + AsDouble adpD; + AsInt adpI; + AsFloat adpF; + AsShort adpS; + AsChar adpC; + AsByte adpB; + AsBoolean adpZ; + + void testWith(String query, String adpClass, String adpInstance) + throws SQLException, ReflectiveOperationException + { + Connection c = getConnection("jdbc:default:connection"); + SlotTester t = c.unwrap(SlotTester.class); + + ResultSet rs = c.createStatement().executeQuery(query); + Portal p = t.unwrapAsPortal(rs); + TupleDescriptor td = p.tupleDescriptor(); + + List tups = p.fetch(FORWARD, ALL); + + int ntups = tups.size(); + + boolean firstTime = true; + + int form = 8; // set with debugger, 8 selects reference-typed adpL + + boolean go; // true until set false by debugger each time through loop + + /* + * Results from adapters of assorted types. + */ + long jj = 0; + double dd = 0; + int ii = 0; + float ff = 0; + short ss = 0; + char cc = 0; + byte bb = 0; + boolean zz = false; + Object ll = null; + + for ( TupleTableSlot tts : tups ) + { + if ( firstTime ) + { + firstTime = false; + Adapter a = t.adapterPlease(adpClass, adpInstance); + if ( a instanceof As ) + adpL = (As)a; + else if ( a instanceof AsLong ) + adpJ = (AsLong)a; + else if ( a instanceof AsDouble ) + adpD = (AsDouble)a; + else if ( a instanceof AsInt ) + adpI = (AsInt)a; + else if ( a instanceof AsFloat ) + adpF = (AsFloat)a; + else if ( a instanceof AsShort ) + adpS = (AsShort)a; + else if ( a instanceof AsChar ) + adpC = (AsChar)a; + else if ( a instanceof AsByte ) + adpB = (AsByte)a; + else if ( a instanceof AsBoolean ) + adpZ = (AsBoolean)a; + } + + for ( Attribute att : tts.descriptor() ) + { + go = true; + while ( go ) + { + go = false; + try + { + switch ( form ) + { + case 0: jj = tts.get(att, adpJ); break; + case 1: dd = tts.get(att, adpD); break; + case 2: ii = tts.get(att, adpI); break; + case 3: ff = tts.get(att, adpF); break; + case 4: ss = tts.get(att, adpS); break; + case 5: cc = tts.get(att, adpC); break; + case 6: bb = tts.get(att, adpB); break; + case 7: zz = tts.get(att, adpZ); break; + case 8: ll = tts.get(att, adpL); break; + } + } + catch ( AdapterException e ) + { + System.out.println(e); + } + } + } + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/DoSQL.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/DoSQL.java new file mode 100644 index 000000000..713bca3fd --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/DoSQL.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.polyglot; + +import java.sql.SQLException; + +import org.postgresql.pljava.PLJavaBasedLanguage.InlineBlocks; + +import org.postgresql.pljava.annotation.SQLAction; + +import org.postgresql.pljava.model.ProceduralLanguage; + +/* + * The imports above are the basics to make this a language handler. + * + * These imports below for JDBC / database access might not be so common in a + * real language handler; you'd expect it to focus on compiling/executing some + * client code, and the client code is where you'd expect to see what looks + * more like application logic like this. But this is a handler for a very + * simple language that only takes the given string and hands it to JDBC, so it + * does look a bit like application logic. + */ +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.Statement; +import org.postgresql.pljava.model.Portal; +import static org.postgresql.pljava.model.Portal.ALL; +import static org.postgresql.pljava.model.Portal.Direction.FORWARD; +import org.postgresql.pljava.model.SlotTester; // temporary development hack + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Example of a procedural language with only DO blocks, built atop PL/Java. + */ +@SQLAction(requires = "pljavahandler language", install = { +"CREATE OR REPLACE FUNCTION javatest.dosql_validator(oid)" + +" RETURNS void" + +" LANGUAGE pljavahandler AS 'org.postgresql.pljava.example.polyglot.DoSQL'", + +"COMMENT ON FUNCTION javatest.dosql_validator(oid) IS " + +"'Validator function for the dosql procedural language'", + +"CREATE LANGUAGE dosql" + +" HANDLER sqlj.pljavaDispatchRoutine" + +" INLINE sqlj.pljavaDispatchInline" + +" VALIDATOR javatest.dosql_validator", + +"COMMENT ON LANGUAGE dosql IS " + +"'The dosql procedural language, which is implemented atop PL/Java, " + +"and supports inline code blocks that are just plain SQL, to be executed " + +"with any output discarded. COMMIT and ROLLBACK are recognized " + +"for transaction control.'", + +"DO LANGUAGE dosql 'SELECT javatest.logmessage(''INFO'', ''DoSQL ok'')'" +}, remove = { +"DROP LANGUAGE dosql", +"DROP FUNCTION javatest.dosql_validator(oid)" +}) +public class DoSQL implements InlineBlocks +{ + private final ProceduralLanguage pl; + + /** + * There must be a public constructor with a {@code ProceduralLanguage} + * parameter. + *

+ * The parameter can be ignored, or used to determine the name, oid, + * accessibility, or other details of the declared PostgreSQL language + * your handler class has been instantiated for. + */ + public DoSQL(ProceduralLanguage pl) + { + this.pl = pl; + } + + /** + * The sole method needed to implement inline code blocks. + *

+ * This implementation will recognize {@code COMMIT} or {@code ROLLBACK} + * and call the dedicated JDBC {@code Connection} methods for those, or + * otherwise just pass the string to {@code Statement.execute} and consume + * and discard any results. + */ + @Override + public void execute(String inlineSource, boolean atomic) throws SQLException + { + try ( + Connection c = getConnection("jdbc:default:connection"); + Statement s = c.createStatement() + ) + { + Matcher m = COMMIT_OR_ROLLBACK.matcher(inlineSource); + if ( m.matches() ) + { + if ( -1 != m.start(1) ) + c.commit(); + else + c.rollback(); + return; + } + + /* + * Not COMMIT or ROLLBACK, just hand it to execute() and consume + * any results. + */ + + SlotTester st = c.unwrap(SlotTester.class); + long count = 0; + + for ( + boolean isRS = s.execute(inlineSource); + -1 != count; + isRS = s.getMoreResults() + ) + { + if ( isRS ) + { + try ( Portal p = st.unwrapAsPortal(s.getResultSet()) ) + { + p.move(FORWARD, ALL); + } + } + else + count = s.getLargeUpdateCount(); + } + } + } + + static final Pattern COMMIT_OR_ROLLBACK = + Pattern.compile("^\\s*+(?i:(commit)|(rollback))\\s*+$"); +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/Glot64.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/Glot64.java new file mode 100644 index 000000000..d0b894a42 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/Glot64.java @@ -0,0 +1,819 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.polyglot; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import static java.nio.charset.StandardCharsets.US_ASCII; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; + +import java.util.Base64; +import java.util.BitSet; +import java.util.Objects; +import static java.util.Optional.ofNullable; + +import static java.util.stream.Collectors.toList; + +import org.postgresql.pljava.PLJavaBasedLanguage.InlineBlocks; +import org.postgresql.pljava.PLJavaBasedLanguage.ReturningSets; +import org.postgresql.pljava.PLJavaBasedLanguage.Routines; +import org.postgresql.pljava.PLJavaBasedLanguage.Routine; +import org.postgresql.pljava.PLJavaBasedLanguage.Template; +import org.postgresql.pljava.PLJavaBasedLanguage.Triggers; +import org.postgresql.pljava.PLJavaBasedLanguage.TriggerFunction; +import org.postgresql.pljava.PLJavaBasedLanguage.TriggerTemplate; +import org.postgresql.pljava.PLJavaBasedLanguage.UsingTransforms; + +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.SQLAction; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.ProceduralLanguage; +import org.postgresql.pljava.model.ProceduralLanguage.PLJavaBased; +import org.postgresql.pljava.model.RegProcedure; +import org.postgresql.pljava.model.RegProcedure.Call; +import org.postgresql.pljava.model.RegProcedure.Lookup; +import org.postgresql.pljava.model.Transform; +import org.postgresql.pljava.model.Transform.FromSQL; +import org.postgresql.pljava.model.Transform.ToSQL; +import org.postgresql.pljava.model.Trigger; +import org.postgresql.pljava.model.Trigger.ForTrigger; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +/** + * Example of a procedural language implemented atop PL/Java. + *

+ * Glot64 has a couple of features rarely found in a PostgreSQL PL. First, you + * can't use it to do anything, other than print some text to standard output. + * That's the server's standard output, which you probably won't even see unless + * running the server with standard output to your terminal, as you might run a + * test instance under PL/Java's test harness. On a production server, what is + * written to standard output may well go nowhere at all, and then it may truly + * be said your Glot64 routines do nothing at all. + *

+ * Second, Glot64 has the rare property that the compiled form of its code is + * easier to read than the source. That's because when you + * {@code CREATE FUNCTION} or {@code CREATE PROCEDURE} in Glot64, you write + * the {@code AS} part in Base64. It gets 'compiled' by decoding it to ASCII. + * (Unless it is malformed Base64 or doesn't decode to ASCII. Then your routine + * gets rejected by the validator. Better luck next time.) Then, when you call + * the function or procedure, the 'compiled' code is written to standard output. + * Therefore, + *


+ * CREATE FUNCTION hello() RETURNS void
+ *   LANGUAGE glot64
+ *   AS 'SGVsbG8sIHdvcmxkIQo=';
+ *
+ * defines a function that writes "Hello, world!" when you call it. + *

+ * However, Glot64 writes several other things to standard output ahead of the + * output of the routine itself. That is the real purpose: to illustrate how + * PL/Java's language handler API is arranged, and how the information about the + * parameters and result type (or types) will be presented to your code. + *

+ * You can declare a Glot64 function or procedure with any number and types of + * parameters and return type (or {@code OUT} parameters). Because your routine + * will not use any of the arguments or produce any result, it doesn't care how + * they are declared. By declaring Glot64 functions in several different ways, + * you can see, in the output messages, how the API presents that information. + */ +@SQLAction(requires = "pljavahandler language", install = { +"CREATE OR REPLACE FUNCTION javatest.glot64_validator(oid)" + +" RETURNS void" + +" LANGUAGE pljavahandler AS 'org.postgresql.pljava.example.polyglot.Glot64'", + +"COMMENT ON FUNCTION javatest.glot64_validator(oid) IS " + +"'Validator function for the glot64 procedural language'", + +"CREATE LANGUAGE glot64" + +" HANDLER sqlj.pljavaDispatchRoutine" + +" INLINE sqlj.pljavaDispatchInline" + +" VALIDATOR javatest.glot64_validator", + +"COMMENT ON LANGUAGE glot64 IS " + +"'The glot64 procedural language, which is implemented atop PL/Java, " + +"and supports functions, procedures, and inline code blocks'", + +"CREATE FUNCTION javatest.hello() RETURNS void" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"CREATE FUNCTION javatest.hello(anyelement) RETURNS anyelement" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"CREATE FUNCTION javatest.hello(a int4, b int4, OUT c int2, OUT d int2)" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"CREATE FUNCTION javatest.hello(text, VARIADIC \"any\") RETURNS text[]" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"CREATE PROCEDURE javatest.say_hello()" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"DO LANGUAGE glot64 'SGVsbG8sIHdvcmxkIQo='", + +"SELECT javatest.hello()", + +"SELECT javatest.hello()", + +"SELECT javatest.hello(42)", + +"SELECT javatest.hello(i), javatest.hello(r)" + +" FROM (VALUES (CAST (1 AS INTEGER), CAST (1.0 AS REAL)), (2, 2.0), (3, 3.0))" + +" AS t(i, r)", + +"CALL javatest.say_hello()", + +"CREATE FUNCTION javatest.glot64_trigger() RETURNS trigger" + +" LANGUAGE glot64 AS 'dHJpZ2dlciBkZW1vCg=='", + +"CREATE TRIGGER g64_as_d AFTER DELETE ON javatest.username_test" + +" REFERENCING OLD TABLE AS oldone FOR EACH STATEMENT" + +" EXECUTE FUNCTION javatest.glot64_trigger('ab', 'cd')", + +"CREATE CONSTRAINT TRIGGER g64_ar_iu AFTER INSERT OR UPDATE" + +" ON javatest.username_test FOR EACH ROW" + +" EXECUTE FUNCTION javatest.glot64_trigger('ef', 'gh')", + +"INSERT INTO javatest.username_test VALUES ('Wilhelm Glot', '64')", + +"UPDATE javatest.username_test SET name = 'Glot, Wilhelm'" + +" WHERE username = '64'", + +"DELETE FROM javatest.username_test WHERE username = '64'", + +"CREATE FUNCTION javatest.fromline(internal) RETURNS internal" + +" IMMUTABLE LANGUAGE glot64 AS 'ZnJvbVNRTA=='", + +"CREATE FUNCTION javatest.toline(internal) RETURNS line" + +" IMMUTABLE LANGUAGE glot64 AS 'dG9TUUw='", + +"CREATE FUNCTION javatest.frombox(internal) RETURNS internal" + +" IMMUTABLE LANGUAGE glot64 AS 'ZnJvbVNRTA=='", + +"CREATE FUNCTION javatest.tolseg(internal) RETURNS lseg" + +" IMMUTABLE LANGUAGE glot64 AS 'dG9TUUw='", + +"CREATE TRANSFORM FOR line LANGUAGE glot64 (" + +" FROM SQL WITH FUNCTION javatest.fromline," + +" TO SQL WITH FUNCTION javatest.toline )", + +"CREATE TRANSFORM FOR box LANGUAGE glot64 (" + +" FROM SQL WITH FUNCTION javatest.frombox )", + +"CREATE TRANSFORM FOR lseg LANGUAGE glot64 (" + +" TO SQL WITH FUNCTION javatest.tolseg )", + +"CREATE FUNCTION javatest.usingtransforms() RETURNS void" + +" TRANSFORM FOR TYPE line, FOR TYPE box, FOR TYPE lseg" + +" LANGUAGE glot64 AS 'SSBjb3VsZCB1c2UgdHJhbnNmb3JtcyEK'", + +"SELECT javatest.usingtransforms()", + +"CREATE FUNCTION setof3() RETURNS SETOF INT" + +" LANGUAGE glot64 AS 'Mw==' /* 3 */", + +"SELECT javatest.setof3() LIMIT 2" +}, remove = { +"DROP FUNCTION javatest.setof3()", +"DROP FUNCTION javatest.usingtransforms()", +"DROP TRANSFORM FOR lseg LANGUAGE glot64", +"DROP TRANSFORM FOR box LANGUAGE glot64", +"DROP TRANSFORM FOR line LANGUAGE glot64", +"DROP FUNCTION javatest.tolseg(internal)", +"DROP FUNCTION javatest.frombox(internal)", +"DROP FUNCTION javatest.toline(internal)", +"DROP FUNCTION javatest.fromline(internal)", +"DROP TRIGGER g64_ar_iu ON javatest.username_test", +"DROP TRIGGER g64_as_d ON javatest.username_test", +"DROP FUNCTION javatest.glot64_trigger()", +"DROP PROCEDURE javatest.say_hello()", +"DROP FUNCTION javatest.hello(text,VARIADIC \"any\")", +"DROP FUNCTION javatest.hello(int4,int4)", +"DROP FUNCTION javatest.hello(anyelement)", +"DROP FUNCTION javatest.hello()", +"DO LANGUAGE glot64 'QnllIGJ5ZSEK'", +"DROP LANGUAGE glot64", +"DROP FUNCTION javatest.glot64_validator(oid)" +}) +public class Glot64 +implements InlineBlocks, Routines, ReturningSets, Triggers, UsingTransforms +{ + private final ProceduralLanguage pl; + + /** + * There must be a public constructor with a {@code ProceduralLanguage} + * parameter. + *

+ * The parameter can be ignored, or used to determine the name, oid, + * accessibility, or other details of the declared PostgreSQL language + * your handler class has been instantiated for. + */ + public Glot64(ProceduralLanguage pl) + { + this.pl = pl; + } + + /** + * The sole method needed to implement inline code blocks. + *

+ * This one merely writes the 'compiled' source text to standard output. + * @param inlineSource the source text to be executed as the inline code + * block. + * @param atomic true if top-level transaction control must be disallowed + * within the block. PL/Java will already handle propagating this value to + * underlying PostgreSQL SPI calls your code might make, but it is also + * available here in case your language has compilation choices it can make + * based on that information. + */ + @Override + public void execute(String inlineSource, boolean atomic) throws SQLException + { + System.out.printf("%s inline code block (atomic: %s):\n", pl, atomic) + .print(compile(inlineSource)); + } + + /** + * This and {@link #additionalChecks additionalChecks} are the two methods + * involved in implementing a validator for the language. + *

+ * Each method should simply return normally if its checks pass, or throw + * an (ideally informative) exception if not. The work is split into two + * methods (which need not both be supplied) because PostgreSQL does not + * guarantee that the validator fully ran at the time of creating any + * routine. Therefore, while PL/Java will normally call both methods during + * validation when a function or procedure is being created, it also will + * call {@link #essentialChecks essentialChecks} at runtime, in advance of + * calling {@link #prepare prepare}, so this is the place to put checks that + * are necessary to support assumptions {@code prepare} relies on. + *

+ * For Glot64, this method does nothing. The only check needed at validation + * time is whether the source text successfully 'compiles', and the + * {@code prepare} method will have to compile the code anyway, so including + * that check here would be redundant. It can be included in + * {@code additionalChecks}, so a user has useful feedback at create time. + * @param subject the proposed Glot64 routine to be validated + * @param checkBody whether to perform all checks. When false, depending on + * details of the language being implemented, some checks may need to be + * skipped. PostgreSQL can call validators with {@code checkBody} false at + * odd times, such as during {@code pg_restore} or {@code pg_upgrade}, when + * not everything in the database may be as the full suite of checks would + * expect. + */ + @Override + public void essentialChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + System.out.printf("%s essentialChecks: checkBody %s\n", + subject, checkBody); + } + + /** + * This and {@link #essentialChecks essentialChecks} are the two methods + * involved in implementing a validator for the language. + *

+ * Each method should simply return normally if its checks pass, or throw + * an (ideally informative) exception if not. See + * {@link #essentialChecks essentialChecks} for more on why there are two + * methods. + *

+ * For Glot64, this is the only method that really checks anything, namely, + * that the source text can be 'compiled'. That is work the {@code prepare} + * method must do when it is called anyway, so there is nothing to gain by + * having it redundantly done in {@code essentialChecks}. Doing it here at + * {@code CREATE} time, though, gives helpful feedback to a user. + * @param subject the proposed Glot64 routine to be validated + * @param checkBody whether to perform all checks. When false, depending on + * details of the language being implemented, some checks may need to be + * skipped. PostgreSQL can call validators with {@code checkBody} false at + * odd times, such as during {@code pg_restore} or {@code pg_upgrade}, when + * not everything in the database may be as the full suite of checks would + * expect. + */ + @Override + public void additionalChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + System.out.printf("%s additionalChecks: checkBody %s: ", + subject, checkBody); + + /* + * For Glot64, 'compiling' is purely a matter of string transformation + * and has no interaction with database state, so the judgment call can + * be made (as here) to include this check even when checkBody is false. + */ + String compiled = compile(subject.src()); + + if ( subject.returnsSet() ) + { + try + { + Integer.parseInt(compiled); + } + catch ( NumberFormatException e ) + { + throw new SQLSyntaxErrorException( + "the body of a Glot64 set-returning function must compile" + + " to an integer", "42P13", e); + } + } + + System.out.printf("ok\n"); + } + + /** + * Prepares an executable template for a routine (a function or procedure). + *

+ * The parameter to this method is a {@link RegProcedure}, carrying a + * {@link PLJavaBased PLJavaBased} memo. The {@code RegProcedure} exposes + * the PostgreSQL catalog information for the routine, and the memo provides + * some more information computed and cached by PL/Java. However, at this + * stage, no information from any specific call site is presented. + *

+ * For example, the memo describes the number, names, and types of the + * routine's inputs (all of its declared parameters that have mode + * {@code IN}, {@code INOUT}, or {@code VARIADIC}) and outputs (the ones + * with mode {@code INOUT}, {@code OUT}, or {@code TABLE}), each set + * presented in the simple form of a {@link TupleDescriptor}. + *

+ * Because this information is based only on the routine's declaration, + * these tuple descriptors are called {@code inputsTemplate} and + * {@code outputsTemplate}. They may contain entries with polymorphic types + * that will be resolved to concrete types at a given call site (and + * possibly to different concrete types at a different call site). The + * {@link BitSet BitSet}s {@code unresolvedInputs} and + * {@code unresolvedOutputs} indicate which positions must be resolved + * later. If the bit sets are empty, the template {@code TupleDescriptor}s + * are already complete descriptors of the inputs and outputs that will be + * seen at call sites. + *

+ * This method should precompute whatever it can based on the routine's + * catalog declaration and the template tuple descriptors only, and return + * a {@link Template} instance, which must depend only on this information, + * as it will be cached with the {@code RegProcedure} itself, independently + * of any call site. + *

+ * At a call site, the {@code Template} instance's {@code specialize} method + * will be called on a {@link Lookup Lookup} object (conventionally called + * {@code flinfo}) representing the call site. At that stage, more specific + * information is available, such as fully-resolved {@code TupleDescriptor}s + * for the inputs and outputs, and which argument expressions at that call + * site have stable values that will not vary in succesive calls made at + * that site. The {@code specialize} method should use that information to + * generate and return a {@link Routine Routine}, a fully-resolved object + * with a {@code call} method ready to be cached at that call site and used + * for (possibly many) calls made there. + *

+ * When the routine has no polymorphic inputs or outputs, as reported by + * empty {@code unresolvedInputs} and {@code unresolvedOutputs} bit sets + * at the {@code prepare} stage, a final {@code Routine} can be generated + * at that stage, and the {@code Template} returned by {@code prepare} can + * simply return it unconditionally (unless it wants to look at which + * input expressions can be treated as stable). + *

+ * For each call at a given call site, a {@link Call Call} instance will be + * passed (conventionally as {@code fcinfo}) to the generated + * {@code Routine}'s {@code call} method. The {@code Call} object bears the + * actual {@link TupleTableSlot}s from which the routine will fetch its + * arguments and to which it will (XXX when implemented) store its + * result(s). + */ + @Override + public Template prepare(RegProcedure target, PLJavaBased memo) + throws SQLException + { + BitSet unresolvedIn = memo.unresolvedInputs(); + BitSet unresolvedOut = memo.unresolvedOutputs(); + + System.out.printf( + "%s prepare():\n" + + "inputsTemplate : %s\n" + + "unresolvedInputs : %s\n" + + "outputsTemplate : %s\n" + + "unresolvedOutputs: %s\n" + + "transforms : %s\n", + + target, + + memo.inputsTemplate() + .stream().map(Attribute::type).collect(toList()), + + unresolvedIn, + + /* + * Unlike inputsTemplate, outputsTemplate can return null. That can + * happen for two reasons: (1) the routine is declared VOID and no + * outputs are needed, or (2) it is declared RECORD and will rely on + * an output column definition list at every call site, so there is + * no outputsTemplate to examine in advance. + */ + ofNullable(memo.outputsTemplate()) + .map(t -> + t.stream() + .map(Attribute::type) + .map(Object::toString) + .collect(toList()) + .toString()) + .orElse("null"), + + /* + * It's also possible for unresolvedOutputs to be null, in the + * declared-RECORD-so-nothing-is-known-yet case. (In the VOID case, + * it will just be an empty BitSet, meaning no outputs need to be + * resolved, just as an empty BitSet would mean any other time. That + * makes it simple to test for canSkipResolution, as shown below.) + */ + Objects.toString(unresolvedOut), + Objects.toString(memo.transforms()) + ); + + boolean canSkipResolution = + unresolvedIn.isEmpty() + && null != unresolvedOut && unresolvedOut.isEmpty(); + + /* + * For this 'language', all compilation can be done early; it does not + * need to see resolved type descriptors from flinfo at a call site. + */ + String compiled = compile(target.src()); + + /* + * This will be the Template object, cached with the RegProcedure. + */ + return flinfo -> + { + /* + * It might be interesting to know which arguments are 'stable' at + * this 'flinfo' call site, meaning they will have the same values + * in any number of upcoming calls at this site. In a realistic + * case, there might be certain arguments we'd be interested in + * precomputing values from, and we can use a BitSet to indicate + * which arguments we'd like to know the stability of, and the set + * returned from inputsStable will show the subset of those + * positions where stable expressions have been passed. For this + * example, we'll start by setting all bits [0,nargs) and thus ask + * about all the arguments. + */ + TupleDescriptor inDescriptor = flinfo.inputsDescriptor(); + int nargs = inDescriptor.size(); + BitSet maybeStable = new BitSet(nargs); + maybeStable.set(0, nargs); + + /* + * Precompute something specific to this call site + * that can be baked into the returned Routine. + */ + int id = System.identityHashCode(flinfo); + + System.out.printf( + "%s Template.specialize():\n" + + "precomputed id : %x\n" + + "inputsDescriptor : %s\n" + + "inputsAreSpread : %s\n" + + "stableInputs : %s\n" + + "outputsDescriptor: %s\n", + + target, id, + + inDescriptor.stream().map(Attribute::type).collect(toList()), + + flinfo.inputsAreSpread(), + + flinfo.stableInputs(maybeStable), + + /* + * Above, outputsTemplate could return null for two reasons. + * The second reason no longer applies; if the routine is + * declared RECORD and this call site has no column definition + * list, outputsDescriptor throws an exception. But a null + * return is still possible in the VOID case. + * + * Why not an empty descriptor for VOID? An empty descriptor + * really occurs if a function returns t where t is a + * zero-column composite type. Odd thing to do, but allowed. + */ + ofNullable(flinfo.outputsDescriptor()) + .map(d -> + d.stream() + .map(Attribute::type) + .map(Object::toString) + .collect(toList()) + .toString()) + .orElse("null") + ); + + /* + * This will be the Routine object, cached with the call site. + */ + return fcinfo -> + { + Call.Context cx = fcinfo.context(); + + String subifc = ofNullable(cx) + .map(c -> c.getClass().getInterfaces()[0].getSimpleName()) + .orElse("null"); + + String maybeAtomic = + (cx instanceof Call.Context.CallContext) + ? String.format("atomic: %s\n", + ((Call.Context.CallContext)cx).atomic()) + : ""; + + System.out.printf( + "%s Routine.call():\n" + + "precomputed id: %x\n" + + "collation: %s\n" + + "context: %s\n%s" + + "result:\n%s", + target, id, + fcinfo.collation(), + subifc, maybeAtomic, + compiled // here we 'execute' the 'compiled' routine :) + ); + }; + }; + } + + /** + * Prepares a template for a set-returning Glot64 function. + *

+ * The source of any set-returning Glot64 function must "compile" to + * the string representation of an integer. + *

+ * The generated routine will ignore any arguments, and produce a number of + * rows (of, for now, nothing, as {@code TupleTableSlot} isn't writable yet) + * equal to the integer. If the integer is negative, the return of a single + * (non-set) result is exercised. + */ + @Override + public SRFTemplate prepareSRF(RegProcedure target, PLJavaBased memo) + throws SQLException + { + int rowsToReturn = Integer.parseInt(compile(target.src())); + + return (SRFTemplate.ValuePerCall) flinfo -> + { + return fcinfo -> + { + return new SRFNext() + { + private int rowsLeft = rowsToReturn; + + @Override + public void close() + { + System.out.println("ValuePerCall result closed"); + } + + @Override + public SRFNext.Result nextResult(Call fcinfo) + { + if ( 0 > rowsLeft ) + return SRFNext.Result.SINGLE; + if ( 0 == rowsLeft ) + return SRFNext.Result.END; + -- rowsLeft; + return SRFNext.Result.MULTIPLE; + } + }; + }; + }; + } + + @Override + public void essentialTriggerChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + System.out.printf("essentialTriggerChecks: "); + essentialChecks(subject, memo, checkBody); + } + + @Override + public void additionalTriggerChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + System.out.printf("additionalTriggerChecks: "); + additionalChecks(subject, memo, checkBody); + } + + @Override + public TriggerTemplate prepareTrigger( + RegProcedure target, PLJavaBased memo) + throws SQLException + { + System.out.printf( + "%s prepareTrigger():\n", + target + ); + + String compiled = compile(target.src()); + + return trigger -> + { + System.out.printf( + "%s TriggerTemplate.specialize():\n" + + "name : %s\n" + + "relation : %s\n" + + "function : %s\n" + + "called : %s\n" + + "events : %s\n" + + "scope : %s\n" + + "enabled : %s\n" + + "internal : %s\n" + + "arguments : %s\n" + + "columns : %s\n" + + "when : %s\n" + + "tableOld : %s\n" + + "tableNew : %s\n" + + "isClone : %s\n" + + "constraint: %s\n" + + "deferrable: %s\n" + + "initiallyDeferred: %s\n" + + "constraintTable : %s\n" + + "constraintIndex : %s\n", + target, + trigger.name(), trigger.relation(), trigger.function(), + trigger.called(), trigger.events(), trigger.scope(), + trigger.enabled(), trigger.internal(), + trigger.arguments(), + projectionListNames(trigger.columns()), + ofNullable(trigger.when()) + .map(xml -> + { + try + { + return xml.getString(); + } + catch ( SQLException e ) + { + return e.toString(); + } + }).orElse("null"), + trigger.tableOld(), + trigger.tableNew(), + trigger.isClone(), + trigger.constraint(), + trigger.deferrable(), trigger.initiallyDeferred(), + trigger.constraintRelation(), trigger.constraintIndex() + ); + + /* + * Precompute something specific to this trigger + * that can be baked into the returned TriggerFunction. + */ + String triggerName = + trigger.name() + " on " + trigger.relation().qualifiedName(); + + return triggerData -> + { + System.out.printf( + "%s TriggerFunction.apply():\n" + + "precomputed name: %s\n" + + "called : %s\n" + + "event : %s\n" + + "scope : %s\n" + + "relation : %s\n" + + "trigger : %s\n" + + "triggerTuple : %s\n" + + "newTuple : %s\n" + + "updatedCols : %s\n" + + "result:\n%s", + target, triggerName, + triggerData.called(), triggerData.event(), + triggerData.scope(), triggerData.relation(), + triggerData.trigger(), + slotListNamesTypes(triggerData.triggerTuple()), + slotListNamesTypes(triggerData.newTuple()), + projectionListNames(triggerData.updatedColumns()), + compiled // here we 'execute' the 'compiled' routine :) + ); + + return null; // in real life this suppresses triggering event + }; + }; + } + + /** + * Checks that t is a transform usable with this language. + *

+ * The toy requirements imposed here are that a {@code fromSQL} function + * must be implemented in this language and have a {@code src} string that + * compiles to {@code "fromSQL"}, and likewise a {@code toSQL} function must + * be implemented in this language and compile to {@code "toSQL"}. + */ + @Override + public void essentialTransformChecks(Transform t) throws SQLException + { + System.out.printf("%s essentialTransformChecks: ", t); + + RegProcedure fs = t.fromSQL(); + RegProcedure ts = t.toSQL(); + + if ( ! fs.isValid() ) + { + /* + * This transform specifies to use the PL's default from-SQL + * conversion for this type. An exception should be thrown here + * if there is no such usable default. + */ + System.out.printf(String.format( + "will use PL's default from-SQL treatment for %s\n", t.type())); + } + else if ( fs.language() != pl || ! "fromSQL".equals(compile(fs.src())) ) + throw new SQLSyntaxErrorException(String.format( + "%s for use as a fromSQL function for %s must be implemented " + + "in %s and compile to string \"fromSQL\"", fs, pl, pl), + "42P17"); + + if ( ! ts.isValid() ) + { + /* + * This transform specifies to use the PL's default to-SQL + * conversion for this type. An exception should be thrown here + * if there is no such usable default. + */ + System.out.printf(String.format( + "will use PL's default to-SQL treatment for %s\n", t.type())); + } + else if ( ts.language() != pl || ! "toSQL".equals(compile(ts.src())) ) + throw new SQLSyntaxErrorException(String.format( + "%s for use as a toSQL function for %s must be implemented " + + "in %s and compile to string \"toSQL\"", ts, pl, pl), + "42P17"); + + System.out.printf("ok\n"); + return; + } + + /** + * This method handles 'compiling' Glot64 source code (which is Base64) + * into its 'compiled' form, which is ASCII and easier to read than the + * source. + *

+ * It is factored out here so it can also be conveniently used at validation + * time. + *

+ * The longwinded style with explicit {@code newEncoder}/{@code newDecoder} + * calls is used to get strict checking (instead of lax character + * substitution) from the encoder/decoder, to give the most, shall we say, + * thorough feedback to the user. + */ + public static String compile(String sourceText) throws SQLException + { + try + { + CharBuffer cb = CharBuffer.wrap(sourceText); + ByteBuffer bb = US_ASCII.newEncoder().encode(cb); + bb = Base64.getDecoder().decode(bb); + cb = US_ASCII.newDecoder().decode(bb); + return cb.toString(); + } + catch ( CharacterCodingException | IllegalArgumentException e ) + { + throw new SQLSyntaxErrorException( + "compiling glot64 code: " + e, "42601", e); + } + } + + private static String projectionListNames(Projection td) + { + return + ofNullable(td) + .map(d -> + d.stream() + .map(Attribute::name) + .map(Object::toString) + .collect(toList()) + .toString()) + .orElse("null"); + } + + private static String slotListNamesTypes(TupleTableSlot tts) + { + return + ofNullable(tts) + .map(s -> + s.descriptor().stream() + .map(a->a.name() + ":" + a.type().qualifiedName()) + .collect(toList()) + .toString()) + .orElse("null"); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/package-info.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/package-info.java new file mode 100644 index 000000000..a9085f9fb --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/package-info.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ + +/** + * Examples that illustrate how to build new Procedural Language implementations + * in PL/Java. + *

+ * The {@code SQLAction} annotation here creates the {@code pljavahandler} + * language needed for the other examples, and will eventually disappear when + * the creation actions are incorporated into PL/Java's installation. + * @author Chapman Flack + */ +@SQLAction(provides="pljavahandler language", install={ +"DO LANGUAGE plpgsql '" + +" DECLARE" + +" qbin text;" + +" BEGIN" + +" SELECT quote_literal(probin) INTO STRICT qbin FROM pg_proc" + +" WHERE oid = ''sqlj.java_call_handler()''::regprocedure;" + +" EXECUTE ''" + +" CREATE OR REPLACE FUNCTION sqlj.pljavaDispatchValidator(pg_catalog.oid)" + +" RETURNS pg_catalog.void" + +" LANGUAGE C AS '' || qbin || '', ''''pljavaDispatchValidator''''" + +" '';" + +" EXECUTE ''" + +" CREATE OR REPLACE FUNCTION sqlj.pljavaDispatchValidator()" + +" RETURNS pg_catalog.language_handler" + +" LANGUAGE C AS '' || qbin || '', ''''pljavaDispatchValidator''''" + +" '';" + +" EXECUTE ''" + +" CREATE OR REPLACE FUNCTION sqlj.pljavaDispatchRoutine()" + +" RETURNS pg_catalog.language_handler" + +" LANGUAGE C AS '' || qbin || '', ''''pljavaDispatchRoutine''''" + +" '';" + +" EXECUTE ''" + +" CREATE OR REPLACE FUNCTION sqlj.pljavaDispatchInline(pg_catalog.internal)" + +" RETURNS pg_catalog.void" + +" LANGUAGE C AS '' || qbin || '', ''''pljavaDispatchInline''''" + +" '';" + +"END'", + +"COMMENT ON FUNCTION sqlj.pljavaDispatchValidator(oid) IS " + +"'The validator function for the \"PL/Java handler\" language (in which one " + +"can only write functions that are validators of actual procedural languages " + +"implemented atop PL/Java).'", + +"COMMENT ON FUNCTION sqlj.pljavaDispatchValidator() IS " + +"'The call handler for the \"PL/Java handler\" language (in which one " + +"can only write functions that are validators of actual procedural languages " + +"implemented atop PL/Java). The C entry point is the same as for the " + +"validator handler, which works because the only functions that can be " + +"written in this \"language\" are validators.'", + +"COMMENT ON FUNCTION sqlj.pljavaDispatchRoutine() IS " + +"'The call handler that must be named (as HANDLER) in CREATE LANGUAGE for " + +"a procedural language implemented atop PL/Java. (PostgreSQL requires every " + +"CREATE LANGUAGE to include HANDLER, but PL/Java allows a language to " + +"simply not implement the Routines interface, if it is only intended for " + +"InlineBlocks.)'", + +"COMMENT ON FUNCTION sqlj.pljavaDispatchInline(pg_catalog.internal) IS " + +"'The handler that must be named (as INLINE) in CREATE LANGUAGE for " + +"a procedural language implemented atop PL/Java, if that language is " + +"intended to support inline code blocks.'", + +"CREATE LANGUAGE pljavahandler" + +" HANDLER sqlj.pljavaDispatchValidator" + +" VALIDATOR sqlj.pljavaDispatchValidator", + +"COMMENT ON LANGUAGE pljavahandler IS " + +"'The PL/Java \"handler language\", used in implementing other procedural " + +"languages atop PL/Java. Only one kind of function can be written in this " + +"\"language\", namely, a validator function, and the AS string of such a " + +"validator function is simply the name of a Java class that must implement " + +"one or both of PLJavaBasedLanguage.Routines or " + +"PLJavaBasedLanguage.InlineBlocks, and that class will be used as the " + +"implementation of the new language.'" +}, remove={ +"DROP LANGUAGE pljavahandler", +"DROP FUNCTION sqlj.pljavaDispatchInline(internal)", +"DROP FUNCTION sqlj.pljavaDispatchRoutine()", +"DROP FUNCTION sqlj.pljavaDispatchValidator()", +"DROP FUNCTION sqlj.pljavaDispatchValidator(oid)" +}) +package org.postgresql.pljava.example.polyglot; + +import org.postgresql.pljava.annotation.SQLAction; diff --git a/pljava-packaging/src/main/resources/pljava.policy b/pljava-packaging/src/main/resources/pljava.policy index 4c7c078f1..da3ab5846 100644 --- a/pljava-packaging/src/main/resources/pljava.policy +++ b/pljava-packaging/src/main/resources/pljava.policy @@ -71,6 +71,8 @@ grant codebase "${org.postgresql.pljava.codesource}" { "control"; permission java.security.SecurityPermission "createAccessControlContext"; + permission org.postgresql.pljava.Adapter$Permission + "*", "fetch"; // This gives the PL/Java implementation code permission to read // any file, which it only exercises on behalf of sqlj.install_jar() @@ -87,6 +89,17 @@ grant codebase "${org.postgresql.pljava.codesource}" { }; +// +// This grant is specific to the API classes of PL/Java itself; the data type +// Adapter class is there (so user code can create adapters) and must be able +// to pass its own permission check. +// +grant codebase "${org.postgresql.pljava.codesource.api}" { + permission org.postgresql.pljava.Adapter$Permission + "*", "fetch"; +}; + + // // This grant defines the mapping onto Java of PostgreSQL's "trusted language" // category. When PL/Java executes a function whose SQL declaration names diff --git a/pljava-so/src/main/c/Backend.c b/pljava-so/src/main/c/Backend.c index 8e4cf71d0..29eda0062 100644 --- a/pljava-so/src/main/c/Backend.c +++ b/pljava-so/src/main/c/Backend.c @@ -48,6 +48,8 @@ #include "org_postgresql_pljava_internal_Backend.h" #include "org_postgresql_pljava_internal_Backend_EarlyNatives.h" +#include "pljava/ModelConstants.h" +#include "pljava/ModelUtils.h" #include "pljava/DualState.h" #include "pljava/Invocation.h" #include "pljava/InstallHelper.h" @@ -758,7 +760,7 @@ static void initsequencer(enum initstage is, bool tolerant) } PG_CATCH(); { - MemoryContextSwitchTo(ctx.upperContext); /* leave ErrorContext */ + Invocation_switchToUpperContext(); /* leave ErrorContext */ Invocation_popBootContext(); initstage = IS_MISC_ONCE_DONE; /* We can't stay here... @@ -792,7 +794,7 @@ static void initsequencer(enum initstage is, bool tolerant) "and \"pljava-api.jar\" files, separated by the correct " "path separator for this platform.") )); - pljava_DualState_unregister(); + pljava_ResourceOwner_unregister(); _destroyJavaVM(0, 0); goto check_tolerant; } @@ -1050,6 +1052,11 @@ static void initPLJavaClasses(void) "(Ljava/lang/String;Ljava/lang/ClassLoader;[B)Ljava/lang/Class;", Java_org_postgresql_pljava_internal_Backend_00024EarlyNatives__1defineClass }, + { + "_window", + "(Ljava/lang/Class;)[Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_internal_Backend_00024EarlyNatives__1window + }, { 0, 0, 0 } }; jclass cls; @@ -1085,11 +1092,13 @@ static void initPLJavaClasses(void) "THREADLOCK", "Ljava/lang/Object;"); JNI_setThreadLock(JNI_getStaticObjectField(s_Backend_class, fID)); + pljava_ModelConstants_initialize(); Invocation_initialize(); Exception_initialize2(); - SPI_initialize(); Type_initialize(); + pljava_ModelUtils_initialize(); pljava_DualState_initialize(); + SPI_initialize(); Function_initialize(); Session_initialize(); PgSavepoint_initialize(); @@ -1157,10 +1166,10 @@ static void onJVMExitOrAbort() /* * This does a PostgreSQL UnregisterResourceReleaseCallback, which should * be painless if the callback hasn't been registered yet. The key is to - * avoid triggering a DualState callback that tries a JNI upcall into + * avoid triggering a ResourceOwner callback that tries a JNI upcall into * the already-gone JVM. */ - pljava_DualState_unregister(); + pljava_ResourceOwner_unregister(); } /** @@ -1429,7 +1438,7 @@ static void _destroyJavaVM(int status, Datum dummy) elog(DEBUG2, "needed to forcibly shut down the Java virtual machine"); s_javaVM = 0; - currentInvocation = 0; + *currentInvocation = ctx; /* popBootContext but VM is gone */ return; } @@ -1447,7 +1456,7 @@ static void _destroyJavaVM(int status, Datum dummy) #endif elog(DEBUG2, "done shutting down the Java virtual machine"); s_javaVM = 0; - currentInvocation = 0; + *currentInvocation = ctx; /* popBootContext but VM is gone */ } } @@ -1884,7 +1893,81 @@ static void registerGUCOptions(void) #undef PLJAVA_ENABLE_DEFAULT #undef PLJAVA_IMPLEMENTOR_FLAGS -static inline Datum internalCallHandler(bool trusted, PG_FUNCTION_ARGS); +extern PLJAVADLLEXPORT Datum pljavaDispatchRoutine(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1(pljavaDispatchRoutine); + +Datum pljavaDispatchRoutine(PG_FUNCTION_ARGS) +{ + Invocation ctx; + Datum retval = 0; + + /* + * Just in case it could be helpful in offering diagnostics later, hang + * on to an Oid that is known to refer to PL/Java (because it got here). + * It's cheap, and can be followed back to the right language and + * handler function entries later if needed. + * + * Note that doing this here, in a meta-language dispatcher, changes the + * meaning somewhat. No longer is this necessarily an oid for a routine in + * the familiar PL/Java language; it is the oid of a routine in some + * PL/Java-based language. It's still good for pinning down the path to our + * shared object, but is now more ambiguous as far as what pg_language entry + * it identifies. (To some extent, this has been the case for a while now + * anyway, since sqlj.alias_java_language.) + */ + pljavaOid = fcinfo->flinfo->fn_oid; + + if ( IS_COMPLETE != initstage ) + { + deferInit = false; + initsequencer( initstage, false); + } + + Invocation_pushInvocation(&ctx); + PG_TRY(); + { + retval = pljava_ModelUtils_callDispatch(fcinfo, false); + Invocation_popInvocation(false); + } + PG_CATCH(); + { + Invocation_popInvocation(true); + PG_RE_THROW(); + } + PG_END_TRY(); + return retval; +} + +extern PLJAVADLLEXPORT Datum pljavaDispatchInline(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1(pljavaDispatchInline); + +Datum pljavaDispatchInline(PG_FUNCTION_ARGS) +{ + Invocation ctx; + + if ( IS_COMPLETE != initstage ) + { + deferInit = false; + initsequencer( initstage, false); + } + + Invocation_pushInvocation(&ctx); + PG_TRY(); + { + pljava_ModelUtils_inlineDispatch(fcinfo); + Invocation_popInvocation(false); + } + PG_CATCH(); + { + Invocation_popInvocation(true); + PG_RE_THROW(); + } + PG_END_TRY(); + + PG_RETURN_VOID(); +} + +static inline Datum legacyCallHandler(PG_FUNCTION_ARGS); extern PLJAVADLLEXPORT Datum javau_call_handler(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1(javau_call_handler); @@ -1894,7 +1977,7 @@ PG_FUNCTION_INFO_V1(javau_call_handler); */ Datum javau_call_handler(PG_FUNCTION_ARGS) { - return internalCallHandler(false, fcinfo); + return legacyCallHandler(fcinfo); } extern PLJAVADLLEXPORT Datum java_call_handler(PG_FUNCTION_ARGS); @@ -1905,11 +1988,11 @@ PG_FUNCTION_INFO_V1(java_call_handler); */ Datum java_call_handler(PG_FUNCTION_ARGS) { - return internalCallHandler(true, fcinfo); + return legacyCallHandler(fcinfo); } static inline Datum -internalCallHandler(bool trusted, PG_FUNCTION_ARGS) +legacyCallHandler(PG_FUNCTION_ARGS) { Invocation ctx; Datum retval = 0; @@ -1922,7 +2005,7 @@ internalCallHandler(bool trusted, PG_FUNCTION_ARGS) * It's cheap, and can be followed back to the right language and * handler function entries later if needed. */ - *(trusted ? &pljavaTrustedOid : &pljavaUntrustedOid) = funcoid; + pljavaOid = funcoid; if ( IS_COMPLETE != initstage ) { deferInit = false; @@ -1932,8 +2015,7 @@ internalCallHandler(bool trusted, PG_FUNCTION_ARGS) Invocation_pushInvocation(&ctx); PG_TRY(); { - retval = Function_invoke( - funcoid, trusted, forTrigger, false, true, fcinfo); + retval = Function_invoke(funcoid, forTrigger, false, true, fcinfo); Invocation_popInvocation(false); } PG_CATCH(); @@ -1945,14 +2027,14 @@ internalCallHandler(bool trusted, PG_FUNCTION_ARGS) return retval; } -static Datum internalValidator(bool trusted, PG_FUNCTION_ARGS); +static Datum internalValidator(PG_FUNCTION_ARGS, bool legacy); extern PLJAVADLLEXPORT Datum javau_validator(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1(javau_validator); Datum javau_validator(PG_FUNCTION_ARGS) { - return internalValidator(false, fcinfo); + return internalValidator(fcinfo, true); } extern PLJAVADLLEXPORT Datum java_validator(PG_FUNCTION_ARGS); @@ -1960,14 +2042,21 @@ PG_FUNCTION_INFO_V1(java_validator); Datum java_validator(PG_FUNCTION_ARGS) { - return internalValidator(true, fcinfo); + return internalValidator(fcinfo, true); } -static Datum internalValidator(bool trusted, PG_FUNCTION_ARGS) +extern PLJAVADLLEXPORT Datum pljavaDispatchValidator(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1(pljavaDispatchValidator); + +Datum pljavaDispatchValidator(PG_FUNCTION_ARGS) +{ + return internalValidator(fcinfo, false); +} + +static Datum internalValidator(PG_FUNCTION_ARGS, bool legacy) { Oid funcoid = PG_GETARG_OID(0); Invocation ctx; - Oid *oidSaveLocation = NULL; bool ok = CheckFunctionValidatorAccess(fcinfo->flinfo->fn_oid, funcoid); /* @@ -1978,10 +2067,10 @@ static Datum internalValidator(bool trusted, PG_FUNCTION_ARGS) * that should never be skipped, just like the permission checks made in * CheckFunctionValidatorAccess itself. */ - if ( withoutEnforcement && trusted && ! superuser() ) + if ( withoutEnforcement && ! superuser() ) ereport(ERROR, ( errmsg( - "trusted PL/Java language restricted to superuser when " + "PL/Java language restricted to superuser when " "\"java.security.manager\"=\"disallow\""), errdetail( "This PL/Java version enforces security policy using important " @@ -1997,29 +2086,6 @@ static Datum internalValidator(bool trusted, PG_FUNCTION_ARGS) if ( ! ok ) PG_RETURN_VOID(); - /* - * In the call handler, which could be called heavily, funcoid gets - * unconditionally stored to one of these two locations, rather than - * spending extra cycles deciding whether to store it or not. A validator - * will not be called as heavily, and can afford to check here whether - * an Oid needs to be stored or not. The situation to avoid is where - * funcoid gets stored here, as an Oid from which PL/Java's library path can - * be found, but the function then gets rejected by the validator, leaving - * the stored Oid invalid and useless for that purpose. Therefore, choose - * here whether and where to store it, but store it only within the PG_TRY - * block, and replace with InvalidOid again in the PG_CATCH. - */ - if ( trusted ) - { - if ( InvalidOid == pljavaTrustedOid ) - oidSaveLocation = &pljavaTrustedOid; - } - else - { - if ( InvalidOid == pljavaUntrustedOid ) - oidSaveLocation = &pljavaUntrustedOid; - } - if ( IS_PLJAVA_INSTALLING > initstage ) { if ( check_function_bodies ) /* We're gonna need a JVM */ @@ -2042,17 +2108,31 @@ static Datum internalValidator(bool trusted, PG_FUNCTION_ARGS) Invocation_pushInvocation(&ctx); PG_TRY(); { - if ( NULL != oidSaveLocation ) - *oidSaveLocation = funcoid; + /* + * In the call handler, which could be called heavily, funcoid gets + * unconditionally stored to pljavaOid, rather than spending extra + * cycles deciding whether to store it or not. A validator will not be + * called as heavily, and can afford to check here whether an Oid needs + * to be stored or not. The situation to avoid is where funcoid gets + * stored here, as an Oid from which PL/Java's library path can be + * found, but the function then gets rejected by the validator, + * leaving the stored Oid invalid and useless for that purpose. + * Therefore, choose whether to store it, here within the PG_TRY block, + * but replace with InvalidOid again if the PG_CATCH happens. + */ + if ( InvalidOid == pljavaOid ) + pljavaOid = funcoid; - Function_invoke( - funcoid, trusted, false, true, check_function_bodies, NULL); + if ( legacy ) + Function_invoke(funcoid, false, true, check_function_bodies, NULL); + else + pljava_ModelUtils_callDispatch(fcinfo, true); Invocation_popInvocation(false); } PG_CATCH(); { - if ( NULL != oidSaveLocation ) - *oidSaveLocation = InvalidOid; + if ( funcoid == pljavaOid ) + pljavaOid = InvalidOid; Invocation_popInvocation(true); PG_RE_THROW(); @@ -2277,10 +2357,8 @@ Java_org_postgresql_pljava_internal_Backend__1myLibraryPath(JNIEnv *env, jclass if ( NULL == pljavaLoadPath ) { - Oid funcoid = pljavaTrustedOid; + Oid funcoid = pljavaOid; - if ( InvalidOid == funcoid ) - funcoid = pljavaUntrustedOid; if ( InvalidOid == funcoid ) return NULL; @@ -2392,3 +2470,32 @@ Java_org_postgresql_pljava_internal_Backend_00024EarlyNatives__1defineClass(JNIE (*env)->ReleaseStringUTFChars(env, name, utfName); return newcls; } + +/* + * Class: org_postgresql_pljava_internal_Backend_EarlyNatives + * Method: _window + * Signature: (Ljava/lang/Class;)[Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_Backend_00024EarlyNatives__1window(JNIEnv *env, jclass cls, jclass component) +{ + jobject r = (*env)->NewObjectArray(env, (jsize)1, component, NULL); + if ( NULL == r ) + return NULL; + +#define POPULATE(thing) do {\ + jobject b = (*env)->NewDirectByteBuffer(env, \ + &thing, sizeof thing);\ + if ( NULL == b )\ + return NULL;\ + (*env)->SetObjectArrayElement(env, r, \ + (jsize)org_postgresql_pljava_internal_Backend_##thing, \ + b);\ +} while (0) + + POPULATE(check_function_bodies); + +#undef POPULATE + + return r; +} diff --git a/pljava-so/src/main/c/DualState.c b/pljava-so/src/main/c/DualState.c index 1732b6e50..7d207ae9c 100644 --- a/pljava-so/src/main/c/DualState.c +++ b/pljava-so/src/main/c/DualState.c @@ -17,10 +17,12 @@ #include "org_postgresql_pljava_internal_DualState_SingleHeapFreeTuple.h" #include "org_postgresql_pljava_internal_DualState_SingleFreeErrorData.h" #include "org_postgresql_pljava_internal_DualState_SingleSPIfreeplan.h" +#include "org_postgresql_pljava_internal_DualState_SingleSPIfreetuptable.h" #include "org_postgresql_pljava_internal_DualState_SingleSPIcursorClose.h" +#include "org_postgresql_pljava_internal_DualState_BBHeapFreeTuple.h" +#include "org_postgresql_pljava_internal_DualState_SingleDeleteGlobalRefP.h" #include "pljava/DualState.h" -#include "pljava/Backend.h" #include "pljava/Exception.h" #include "pljava/Invocation.h" #include "pljava/PgObject.h" @@ -46,22 +48,8 @@ extern void pljava_ExecutionPlan_initialize(void); static jclass s_DualState_class; -static jmethodID s_DualState_resourceOwnerRelease; static jmethodID s_DualState_cleanEnqueuedInstances; -static jobject s_DualState_key; - -static void resourceReleaseCB(ResourceReleasePhase phase, - bool isCommit, bool isTopLevel, void *arg); - -/* - * Return a capability that is only expected to be accessible to native code. - */ -jobject pljava_DualState_key(void) -{ - return s_DualState_key; -} - /* * Rather than using finalizers (deprecated in recent Java anyway), which can * increase the number of threads needing to interact with PG, DualState objects @@ -75,35 +63,9 @@ void pljava_DualState_cleanEnqueuedInstances(void) s_DualState_cleanEnqueuedInstances); } -/* - * Called when the lifespan/scope of a particular PG resource owner is about to - * expire, to make the associated DualState objects inaccessible from Java. As - * described in DualState.java, the argument will often be a PG ResourceOwner - * (when this function is called by resourceReleaseCB), but pointers to other - * structures can also be used (such a pointer clearly can't be confused with a - * ResourceOwner existing at the same time). In PG 9.5+, it could be a - * MemoryContext, with a MemoryContextCallback established to call this - * function. For items whose scope is limited to a single PL/Java function - * invocation, this can be a pointer to the Invocation. - */ -void pljava_DualState_nativeRelease(void *ro) -{ - /* - * This static assertion does not need to be in every file that uses - * PointerGetJLong, but it should be somewhere once, so here it is. - */ - StaticAssertStmt(sizeof (uintptr_t) <= sizeof (jlong), - "uintptr_t will not fit in jlong on this platform"); - - JNI_callStaticVoidMethodLocked(s_DualState_class, - s_DualState_resourceOwnerRelease, - PointerGetJLong(ro)); -} - void pljava_DualState_initialize(void) { jclass clazz; - jmethodID ctor; JNINativeMethod singlePfreeMethods[] = { @@ -165,6 +127,16 @@ void pljava_DualState_initialize(void) { 0, 0, 0 } }; + JNINativeMethod singleSPIfreetuptableMethods[] = + { + { + "_spiFreeTupTable", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SingleSPIfreetuptable__1spiFreeTupTable + }, + { 0, 0, 0 } + }; + JNINativeMethod singleSPIcursorCloseMethods[] = { { @@ -175,19 +147,31 @@ void pljava_DualState_initialize(void) { 0, 0, 0 } }; + JNINativeMethod bbHeapFreeTupleMethods[] = + { + { + "_heapFreeTuple", + "(Ljava/nio/ByteBuffer;)V", + Java_org_postgresql_pljava_internal_DualState_00024BBHeapFreeTuple__1heapFreeTuple + }, + { 0, 0, 0 } + }; + + JNINativeMethod deleteGlobalRefPMethods[] = + { + { + "_deleteGlobalRefP", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SingleDeleteGlobalRefP__1deleteGlobalRefP + }, + { 0, 0, 0 } + }; + s_DualState_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass( "org/postgresql/pljava/internal/DualState")); - s_DualState_resourceOwnerRelease = PgObject_getStaticJavaMethod( - s_DualState_class, "resourceOwnerRelease", "(J)V"); s_DualState_cleanEnqueuedInstances = PgObject_getStaticJavaMethod( s_DualState_class, "cleanEnqueuedInstances", "()V"); - clazz = (jclass)PgObject_getJavaClass( - "org/postgresql/pljava/internal/DualState$Key"); - ctor = PgObject_getJavaMethod(clazz, "", "()V"); - s_DualState_key = JNI_newGlobalRef(JNI_newObject(clazz, ctor)); - JNI_deleteLocalRef(clazz); - clazz = (jclass)PgObject_getJavaClass( "org/postgresql/pljava/internal/DualState$SinglePfree"); PgObject_registerNatives2(clazz, singlePfreeMethods); @@ -218,12 +202,25 @@ void pljava_DualState_initialize(void) PgObject_registerNatives2(clazz, singleSPIfreeplanMethods); JNI_deleteLocalRef(clazz); + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SingleSPIfreetuptable"); + PgObject_registerNatives2(clazz, singleSPIfreetuptableMethods); + JNI_deleteLocalRef(clazz); + clazz = (jclass)PgObject_getJavaClass( "org/postgresql/pljava/internal/DualState$SingleSPIcursorClose"); PgObject_registerNatives2(clazz, singleSPIcursorCloseMethods); JNI_deleteLocalRef(clazz); - RegisterResourceReleaseCallback(resourceReleaseCB, NULL); + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$BBHeapFreeTuple"); + PgObject_registerNatives2(clazz, bbHeapFreeTupleMethods); + JNI_deleteLocalRef(clazz); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SingleDeleteGlobalRefP"); + PgObject_registerNatives2(clazz, deleteGlobalRefPMethods); + JNI_deleteLocalRef(clazz); /* * Call initialize() methods of known classes built upon DualState. @@ -240,32 +237,6 @@ void pljava_DualState_initialize(void) pljava_VarlenaWrapper_initialize(); } -void pljava_DualState_unregister(void) -{ - UnregisterResourceReleaseCallback(resourceReleaseCB, NULL); -} - -static void resourceReleaseCB(ResourceReleasePhase phase, - bool isCommit, bool isTopLevel, void *arg) -{ - /* - * The way ResourceOwnerRelease is implemented, callbacks to loadable - * modules (like us!) happen /after/ all of the built-in releasey actions - * for a particular phase. So, by looking for RESOURCE_RELEASE_LOCKS here, - * we actually end up executing after all the built-in lock-related stuff - * has been released, but before any of the built-in stuff released in the - * RESOURCE_RELEASE_AFTER_LOCKS phase. Which, at least for the currently - * implemented DualState subclasses, is about the right time. - */ - if ( RESOURCE_RELEASE_LOCKS != phase ) - return; - - pljava_DualState_nativeRelease(CurrentResourceOwner); - - if ( isTopLevel ) - Backend_warnJEP411(isCommit); -} - /* @@ -374,6 +345,30 @@ Java_org_postgresql_pljava_internal_DualState_00024SingleSPIfreeplan__1spiFreePl +/* + * Class: org_postgresql_pljava_internal_DualState_SingleSPIfreetuptable + * Method: _spiFreeTupTable + * Signature: (J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SingleSPIfreetuptable__1spiFreeTupTable( + JNIEnv* env, jobject _this, jlong pointer) +{ + BEGIN_NATIVE_NO_ERRCHECK + PG_TRY(); + { + SPI_freetuptable(JLongGet(SPITupleTable *, pointer)); + } + PG_CATCH(); + { + Exception_throw_ERROR("SPI_freetuptable"); + } + PG_END_TRY(); + END_NATIVE +} + + + /* * Class: org_postgresql_pljava_internal_DualState_SingleSPIcursorClose * Method: _spiCursorClose @@ -393,7 +388,7 @@ Java_org_postgresql_pljava_internal_DualState_00024SingleSPIcursorClose__1spiCur * does nothing if the current Invocation's errorOccurred flag is set, * or during an end-of-expression-context callback from the executor. */ - if ( NULL != currentInvocation && ! currentInvocation->errorOccurred + if ( HAS_INVOCATION && ! currentInvocation->errorOccurred && ! currentInvocation->inExprContextCB ) SPI_cursor_close(JLongGet(Portal, pointer)); } @@ -404,3 +399,40 @@ Java_org_postgresql_pljava_internal_DualState_00024SingleSPIcursorClose__1spiCur PG_END_TRY(); END_NATIVE } + + + +/* + * Class: org_postgresql_pljava_internal_DualState_BBHeapFreeTuple + * Method: _heapFreeTuple + * Signature: (Ljava/nio/ByteBuffer;)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024BBHeapFreeTuple__1heapFreeTuple( + JNIEnv* env, jobject _this, jobject bb) +{ + HeapTuple tup = (*env)->GetDirectBufferAddress(env, bb); + if ( NULL == tup ) + return; + BEGIN_NATIVE_NO_ERRCHECK + heap_freetuple(tup); + END_NATIVE +} + + + +/* + * Class: org_postgresql_pljava_internal_DualState_SingleDeleteGlobalRefP + * Method: _deleteGlobalRefP + * Signature: (J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SingleDeleteGlobalRefP__1deleteGlobalRefP( + JNIEnv* env, jobject _this, jlong jrefp) +{ + jobject *refp = JLongGet(jobject *, jrefp); + jobject ref = *refp; + *refp = NULL; + /* no call into PostgreSQL here, just one simple JNI operation */ + (*env)->DeleteGlobalRef(env, ref); +} diff --git a/pljava-so/src/main/c/ExecutionPlan.c b/pljava-so/src/main/c/ExecutionPlan.c index 90ecdeb8a..466a55d6b 100644 --- a/pljava-so/src/main/c/ExecutionPlan.c +++ b/pljava-so/src/main/c/ExecutionPlan.c @@ -78,8 +78,7 @@ void pljava_ExecutionPlan_initialize(void) "org/postgresql/pljava/internal/ExecutionPlan")); s_ExecutionPlan_init = PgObject_getJavaMethod(s_ExecutionPlan_class, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;J" - "Ljava/lang/Object;J)V"); + "(Ljava/lang/Object;J)V"); } static bool coerceObjects( @@ -100,7 +99,7 @@ static bool coerceObjects( if(count > 0) { int idx; - jobject typeMap = Invocation_getTypeMap(); + jobject typeMap = Function_currentTypeMap(); values = (Datum*)palloc(count * sizeof(Datum)); for(idx = 0; idx < count; ++idx) { @@ -319,8 +318,7 @@ Java_org_postgresql_pljava_internal_ExecutionPlan__1prepare(JNIEnv* env, jclass result = JNI_newObjectLocked( s_ExecutionPlan_class, s_ExecutionPlan_init, - /* (jlong)0 as resource owner: the saved plan isn't transient */ - pljava_DualState_key(), (jlong)0, key, PointerGetJLong(ePlan)); + key, PointerGetJLong(ePlan)); } } PG_CATCH(); diff --git a/pljava-so/src/main/c/Function.c b/pljava-so/src/main/c/Function.c index 891591a6e..7a1bffd4b 100644 --- a/pljava-so/src/main/c/Function.c +++ b/pljava-so/src/main/c/Function.c @@ -696,7 +696,7 @@ Type Function_checkTypeBaseUDT(Oid typeId, Form_pg_type typeStruct) } static Function Function_create( - Oid funcOid, bool trusted, bool forTrigger, + Oid funcOid, bool forTrigger, bool forValidator, bool checkBody) { Function self; @@ -707,17 +707,11 @@ static Function Function_create( PgObject_getValidTuple(LANGOID, procStruct->prolang, "language"); Form_pg_language lngStruct = (Form_pg_language)GETSTRUCT(lngTup); jstring lname = String_createJavaStringFromNTS(NameStr(lngStruct->lanname)); - bool ltrust = lngStruct->lanpltrusted; + bool trusted = lngStruct->lanpltrusted; jstring schemaName; Datum d; jobject invocable; - if ( trusted != ltrust ) - elog(ERROR, - "function with oid %u invoked through wrong call handler " - "for %strusted language %s", funcOid, ltrust ? "" : "un", - NameStr(lngStruct->lanname)); - d = heap_copy_tuple_as_datum(procTup, Type_getTupleDesc(s_pgproc_Type, 0)); schemaName = getSchemaName(procStruct->pronamespace); @@ -811,17 +805,14 @@ static Function Function_create( * in currentInvocation->function upon successful return from here. */ static inline Function -getFunction( - Oid funcOid, bool trusted, bool forTrigger, - bool forValidator, bool checkBody) +getFunction(Oid funcOid, bool forTrigger, bool forValidator, bool checkBody) { Function func = forValidator ? NULL : (Function)HashMap_getByOid(s_funcMap, funcOid); if ( NULL == func ) { - func = Function_create( - funcOid, trusted, forTrigger, forValidator, checkBody); + func = Function_create(funcOid, forTrigger, forValidator, checkBody); if ( NULL != func ) HashMap_putByOid(s_funcMap, funcOid, func); } @@ -830,11 +821,80 @@ getFunction( return func; } -jobject Function_getTypeMap(Function self) +/* + * Some functions that inquire about the "currently-executing" function + * (innermost, if there is more than one invocation of PL/Java on the stack); + * that is, they refer to currentInvocation->function as set in getFunction + * above. + * + * These functions have been moved closer together from originally scattered + * locations, which exposes their several different styles of checking for a + * current Invocation and its Function link. At one time, currentInvocation + * would be NULL when there was no call on the stack. There is now always a + * struct there, but its nestLevel (and other key values) are zero when + * nothing's been called. That should allow adapting these functions to a + * uniform, simpler style ... some other day. + */ + +bool Function_isCurrentReadOnly(void) +{ + /* + * function will be NULL during resolution of class and java function. At + * that time, no updates are allowed (or needed). (That's a little too glib; + * the effect of passing true to SPI functions having a read-only parameter, + * which is what the result of this function is used for, goes beyond + * preventing updates; it also implies the use of an existing snapshot + * instead of a newly-taken one, meaning recent preceding updates may not + * be visible. During class resolution, the class loader has in fact to + * override this, not because it intends to write anything, but in order + * to see newly-loaded jar files! install_jar(..., deploy=>true) necessarily + * has to find classes in a jar that has just been loaded in the same + * transaction.) + */ + if (currentInvocation->function == NULL) + return true; + return currentInvocation->function->readOnly; +} + +/* + * Returns a JNI global reference to the initiating (schema) class loader used + * to load the currently-executing function. + */ +jobject Function_currentLoader(void) +{ + Function f; + + if ( ! HAS_INVOCATION ) /* I believe this check is superfluous ... */ + { + if ( NULL != currentInvocation->function ) /* ... here's why. */ + elog(DEBUG1, /* I never expect to see this message. */ + "non-null ->function seen in Invocation with none current"); + return NULL; + } + f = currentInvocation->function; + if ( NULL == f ) + return NULL; + return f->schemaLoader; +} + +/* + * Returns the type map held by the innermost executing PL/Java function's + * schema loader (the initiating loader that was used to resolve the function). + * The type map is a map from Java Oid objects to Class, + * as resolved by that loader. This is effectively Function_currentLoader() + * followed by JNI-invoking getTypeMap on the loader, but cached to avoid JNI. + */ +jobject Function_currentTypeMap(void) { - return self->func.nonudt.typeMap; + Function f = currentInvocation->function; + return NULL == f ? NULL : f->func.nonudt.typeMap; } +/* + * True if this function is mentioned at any level of the stack of current + * PL/Java invocations. Used in Function_clearFunctionCache to avoid freeing + * the struct while referenced. + */ static bool Function_inUse(Function func) { Invocation* ic = currentInvocation; @@ -896,7 +956,7 @@ passAsPrimitive(Type t) Datum Function_invoke( - Oid funcoid, bool trusted, bool forTrigger, bool forValidator, + Oid funcoid, bool forTrigger, bool forValidator, bool checkBody, PG_FUNCTION_ARGS) { Function self; @@ -905,7 +965,7 @@ Function_invoke( Type invokerType; bool skipParameterConversion = false; - self = getFunction(funcoid, trusted, forTrigger, forValidator, checkBody); + self = getFunction(funcoid, forTrigger, forValidator, checkBody); if ( forValidator ) PG_RETURN_VOID(); @@ -1094,28 +1154,6 @@ void pljava_Function_setParameter(Function self, int index, jvalue value) JNI_setObjectArrayElement(s_referenceParameters, numRefs - 1, value.l); } -bool Function_isCurrentReadOnly(void) -{ - /* function will be 0 during resolve of class and java function. At - * that time, no updates are allowed (or needed). - */ - if (currentInvocation->function == 0) - return true; - return currentInvocation->function->readOnly; -} - -jobject Function_currentLoader(void) -{ - Function f; - - if ( NULL == currentInvocation ) - return NULL; - f = currentInvocation->function; - if ( NULL == f ) - return NULL; - return f->schemaLoader; -} - /* * Class: org_postgresql_pljava_internal_Function_EarlyNatives * Method: _parameterArea diff --git a/pljava-so/src/main/c/InstallHelper.c b/pljava-so/src/main/c/InstallHelper.c index 488ee0a66..aa0adda41 100644 --- a/pljava-so/src/main/c/InstallHelper.c +++ b/pljava-so/src/main/c/InstallHelper.c @@ -83,9 +83,7 @@ char const *pljavaLoadPath = NULL; bool pljavaLoadingAsExtension = false; -Oid pljavaTrustedOid = InvalidOid; - -Oid pljavaUntrustedOid = InvalidOid; +Oid pljavaOid = InvalidOid; bool pljavaViableXact() { @@ -381,10 +379,8 @@ bool InstallHelper_isPLJavaFunction(Oid fn, char **langName, bool *trusted) if ( NULL == pljavaLoadPath ) { pljPath = NULL; - if ( InvalidOid != pljavaTrustedOid ) - pljPath = pljavaFnOidToLibPath(pljavaTrustedOid, NULL, NULL); - if ( NULL == pljPath && InvalidOid != pljavaUntrustedOid ) - pljPath = pljavaFnOidToLibPath(pljavaUntrustedOid, NULL, NULL); + if ( InvalidOid != pljavaOid ) + pljPath = pljavaFnOidToLibPath(pljavaOid, NULL, NULL); if ( NULL == pljPath ) { elog(WARNING, "unable to determine PL/Java's load path"); diff --git a/pljava-so/src/main/c/Invocation.c b/pljava-so/src/main/c/Invocation.c index 37ed93bfb..d534aeb08 100644 --- a/pljava-so/src/main/c/Invocation.c +++ b/pljava-so/src/main/c/Invocation.c @@ -13,7 +13,8 @@ #include #include -#include "org_postgresql_pljava_jdbc_Invocation.h" +#include "org_postgresql_pljava_internal_Invocation.h" +#include "org_postgresql_pljava_internal_Invocation_EarlyNatives.h" #include "pljava/Invocation.h" #include "pljava/Function.h" #include "pljava/PgObject.h" @@ -24,12 +25,35 @@ #define LOCAL_FRAME_SIZE 128 -static jclass s_Invocation_class; -static jmethodID s_Invocation_onExit; -static jfieldID s_Invocation_s_unhandled; -static unsigned int s_callLevel = 0; +static jclass s_Invocation_class; +static jmethodID s_Invocation_onExit; +static jfieldID s_Invocation_s_unhandled; -Invocation* currentInvocation; +/** + * All of these initial values are as were formerly set in pushBootContext, + * leaving it to set only upperContext (a value that's not statically known). + * When nestLevel is zero, no call into a PL/Java function is in progress. + */ +Invocation currentInvocation[] = +{ + { + .nestLevel = 0, + .hasDual = false, + .errorOccurred = false, + .hasConnected = false, + .inExprContextCB = false, + .nonAtomic = false, + .upperContext = NULL, + .savedLoader = NULL, + .function = NULL, +#if PG_VERSION_NUM >= 100000 + .triggerData = NULL, +#endif + .previous = NULL, + .primSlot0.j = 0L, + .frameLimits = 0 + } +}; /* * Two features of the calling convention for PL/Java functions will be handled @@ -64,32 +88,32 @@ void Invocation_initialize(void) JNINativeMethod invocationMethods[] = { { - "_getCurrent", - "()Lorg/postgresql/pljava/jdbc/Invocation;", - Java_org_postgresql_pljava_jdbc_Invocation__1getCurrent - }, - { - "_getNestingLevel", - "()I", - Java_org_postgresql_pljava_jdbc_Invocation__1getNestingLevel - }, - { - "_clearErrorCondition", - "()V", - Java_org_postgresql_pljava_jdbc_Invocation__1clearErrorCondition - }, - { - "_register", - "()V", - Java_org_postgresql_pljava_jdbc_Invocation__1register + "_window", + "()Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_internal_Invocation_00024EarlyNatives__1window }, { 0, 0, 0 } }; - cls = PgObject_getJavaClass("org/postgresql/pljava/jdbc/Invocation"); - s_Invocation_class = JNI_newGlobalRef(cls); +#define CONFIRMOFFSET(fld) \ +StaticAssertStmt(offsetof(Invocation,fld) == \ +(org_postgresql_pljava_internal_Invocation_OFFSET_##fld), \ + "Java/C offset mismatch for " #fld) + + CONFIRMOFFSET(nestLevel); + CONFIRMOFFSET(hasDual); + CONFIRMOFFSET(errorOccurred); + CONFIRMOFFSET(upperContext); + +#undef CONFIRMOFFSET + + cls = PgObject_getJavaClass("org/postgresql/pljava/internal/Invocation$EarlyNatives"); PgObject_registerNatives2(cls, invocationMethods); - s_Invocation_onExit = PgObject_getJavaMethod(cls, "onExit", "(Z)V"); + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/internal/Invocation"); + s_Invocation_class = JNI_newGlobalRef(cls); + s_Invocation_onExit = PgObject_getStaticJavaMethod(cls, "onExit", "(IZ)V"); s_Invocation_s_unhandled = PgObject_getStaticJavaField( cls, "s_unhandled", "Ljava/sql/SQLException;"); JNI_deleteLocalRef(cls); @@ -100,7 +124,8 @@ void Invocation_assertConnect(void) int rslt; if(!currentInvocation->hasConnected) { - rslt = SPI_connect(); + rslt = SPI_connect_ext( + currentInvocation->nonAtomic ? SPI_OPT_NONATOMIC : 0); if ( SPI_OK_CONNECT != rslt ) elog(ERROR, "SPI_connect returned %s", SPI_result_code_string(rslt)); @@ -126,44 +151,19 @@ void Invocation_assertDisconnect(void) } } -/* - * Return the type map held by the innermost executing PL/Java function's - * schema loader (the initiating loader that was used to resolve the function). - * The type map is a map from Java Oid objects to Class class objects, - * as resolved by that loader. This is effectively Function_currentLoader() - * followed by JNI-invoking getTypeMap on the loader, but cached to avoid JNI). - */ -jobject Invocation_getTypeMap(void) -{ - Function f = currentInvocation->function; - return f == 0 ? 0 : Function_getTypeMap(f); -} - void Invocation_pushBootContext(Invocation* ctx) { JNI_pushLocalFrame(LOCAL_FRAME_SIZE); - ctx->invocation = 0; - ctx->function = 0; - ctx->frameLimits = 0; - ctx->primSlot0.j = 0L; - ctx->savedLoader = 0; - ctx->hasConnected = false; - ctx->upperContext = CurrentMemoryContext; - ctx->errorOccurred = false; - ctx->inExprContextCB = false; - ctx->previous = 0; -#if PG_VERSION_NUM >= 100000 - ctx->triggerData = 0; -#endif - currentInvocation = ctx; - ++s_callLevel; + *ctx = *currentInvocation; + currentInvocation->previous = ctx; + currentInvocation->upperContext = CurrentMemoryContext; + ++ currentInvocation->nestLevel; } void Invocation_popBootContext(void) { JNI_popLocalFrame(0); - currentInvocation = 0; - --s_callLevel; + *currentInvocation = *currentInvocation->previous; /* * Nothing is done here with savedLoader. It is just set to 0 in * pushBootContext (uses can precede allocation of the sentinel value), @@ -175,21 +175,23 @@ void Invocation_popBootContext(void) void Invocation_pushInvocation(Invocation* ctx) { JNI_pushLocalFrame(LOCAL_FRAME_SIZE); - ctx->invocation = 0; + *ctx = *currentInvocation; + currentInvocation->previous = ctx; + ctx = currentInvocation; /* just to keep the notation compact below */ ctx->function = 0; ctx->frameLimits = *s_frameLimits; ctx->primSlot0 = *s_primSlot0; ctx->savedLoader = pljava_Function_NO_LOADER; ctx->hasConnected = false; + ctx->nonAtomic = false; ctx->upperContext = CurrentMemoryContext; ctx->errorOccurred = false; ctx->inExprContextCB = false; - ctx->previous = currentInvocation; #if PG_VERSION_NUM >= 100000 ctx->triggerData = 0; #endif - currentInvocation = ctx; - ++s_callLevel; + ctx->hasDual = false; + ++ ctx->nestLevel; } void Invocation_popInvocation(bool wasException) @@ -217,15 +219,18 @@ void Invocation_popInvocation(bool wasException) * invocation, delete the reference (after calling its onExit method, * indicating whether the return is exceptional or not). */ - if(currentInvocation->invocation != 0) + if ( currentInvocation->hasDual ) { - JNI_callVoidMethodLocked( - currentInvocation->invocation, s_Invocation_onExit, + JNI_callStaticVoidMethodLocked( + s_Invocation_class, s_Invocation_onExit, + (jint)currentInvocation->nestLevel, (wasException || unhandled) ? JNI_TRUE : JNI_FALSE); - JNI_deleteGlobalRef(currentInvocation->invocation); } + if(currentInvocation->hasConnected) + SPI_finish(); + if ( unhandled ) { jthrowable ex = (jthrowable)JNI_getStaticObjectField( @@ -238,28 +243,24 @@ void Invocation_popInvocation(bool wasException) wasException ? DEBUG2 : already_hit ? WARNING : DEBUG1); } + JNI_popLocalFrame(0); + /* - * Do nativeRelease for any DualState instances scoped to this invocation. + * Return to the context that was effective at pushInvocation of *this* + * invocation. */ - pljava_DualState_nativeRelease(currentInvocation); + MemoryContextSwitchTo(currentInvocation->upperContext); /* * Check for any DualState objects that became unreachable and can be freed. + * In this late position, it might find things that became unreachable with + * the release of SPI contexts or JNI local frame references; having first + * switched back to the upperContext, the chance that any contexts possibly + * released in cleaning could be the current one are minimized. */ pljava_DualState_cleanEnqueuedInstances(); - if(currentInvocation->hasConnected) - SPI_finish(); - - JNI_popLocalFrame(0); - - if(ctx != 0) - { - MemoryContextSwitchTo(ctx->upperContext); - } - - currentInvocation = ctx; - --s_callLevel; + *currentInvocation = *ctx; } MemoryContext @@ -269,55 +270,13 @@ Invocation_switchToUpperContext(void) } /* - * Class: org_postgresql_pljava_jdbc_Invocation - * Method: _getNestingLevel - * Signature: ()I - */ -JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_jdbc_Invocation__1getNestingLevel(JNIEnv* env, jclass cls) -{ - return s_callLevel; -} - -/* - * Class: org_postgresql_pljava_jdbc_Invocation - * Method: _getCurrent - * Signature: ()Lorg/postgresql/pljava/jdbc/Invocation; + * Class: org_postgresql_pljava_internal_Invocation_EarlyNatives + * Method: _window + * Signature: ()Ljava/nio/ByteBuffer; */ JNIEXPORT jobject JNICALL -Java_org_postgresql_pljava_jdbc_Invocation__1getCurrent(JNIEnv* env, jclass cls) -{ - return currentInvocation->invocation; -} - -/* - * Class: org_postgresql_pljava_jdbc_Invocation - * Method: _clearErrorCondition - * Signature: ()V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_jdbc_Invocation__1clearErrorCondition(JNIEnv* env, jclass cls) -{ - currentInvocation->errorOccurred = false; -} - -/* - * Class: org_postgresql_pljava_jdbc_Invocation - * Method: _register - * Signature: ()V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_jdbc_Invocation__1register(JNIEnv* env, jobject _this) +Java_org_postgresql_pljava_internal_Invocation_00024EarlyNatives__1window(JNIEnv* env, jobject _cls) { - if ( NULL == currentInvocation->invocation ) - { - currentInvocation->invocation = (*env)->NewGlobalRef(env, _this); - return; - } - if ( (*env)->IsSameObject(env, currentInvocation->invocation, _this) ) - return; - BEGIN_NATIVE - Exception_throw(ERRCODE_INTERNAL_ERROR, - "mismanaged PL/Java invocation stack"); - END_NATIVE + return (*env)->NewDirectByteBuffer(env, + currentInvocation, sizeof *currentInvocation); } diff --git a/pljava-so/src/main/c/JNICalls.c b/pljava-so/src/main/c/JNICalls.c index 4e496b0da..7e87be1b6 100644 --- a/pljava-so/src/main/c/JNICalls.c +++ b/pljava-so/src/main/c/JNICalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -317,10 +317,14 @@ bool beginNativeNoErrCheck(JNIEnv* env) bool beginNative(JNIEnv* env) { - if (!currentInvocation) + if ( ! HAS_INVOCATION ) { env = JNI_setEnv(env); - Exception_throw(ERRCODE_INTERNAL_ERROR, "An attempt was made to call a PostgreSQL backend function in a transaction callback. At the end of a transaction you may not access the database any longer."); + Exception_throw(ERRCODE_INTERNAL_ERROR, + "An attempt was made to call a PostgreSQL backend function " + "when no PL/Java function was active (such as in a transaction " + "callback. At the end of a transaction you may not access " + "the database any longer."); JNI_setEnv(env); return false; } diff --git a/pljava-so/src/main/c/ModelConstants.c b/pljava-so/src/main/c/ModelConstants.c new file mode 100644 index 000000000..5fa86dc28 --- /dev/null +++ b/pljava-so/src/main/c/ModelConstants.c @@ -0,0 +1,821 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ + +#include + +#if PG_VERSION_NUM < 140000 +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include +#include + +#include "org_postgresql_pljava_pg_CatalogObjectImpl_Factory.h" +#include "org_postgresql_pljava_pg_LookupImpl.h" +#include "org_postgresql_pljava_pg_ModelConstants.h" +#include "org_postgresql_pljava_pg_ModelConstants_Natives.h" +#include "org_postgresql_pljava_pg_TriggerImpl.h" +#include "org_postgresql_pljava_pg_TupleTableSlotImpl.h" + +#include +#include "org_postgresql_pljava_pg_AclItem.h" + +#include + +#include "pljava/PgObject.h" +#include "pljava/ModelConstants.h" + +/* + * A compilation unit collecting various machine- or PostgreSQL-related + * constants that have to be known in Java code. Those that are expected to be + * stable can be defined in Java code, included from the Java-generated .h files + * and simply confirmed here (in the otherwise-unused dummy() method) by static + * assertions comparing them to the real values. Those that are expected to vary + * (between PostgreSQL versions, or target platforms, or both) are a bit more + * effort: their values are compiled into the constants[] array here, at indexes + * known to the Java code, and the _statics() native method will return a direct + * ByteBuffer through which the Java code can read them. + * + * To confirm the expected order of the array elements, each constant gets two + * consecutive array members, first the expected index, then the value. The + * CONSTANT macro below generates both, for the common case where the constant + * is known here by the name FOO and the Java index is in static field IDX_FOO + * in the ModelConstants class. CONSTANTEXPR is for the cases without that + * direct name correspondence. + * + * NOCONSTANT supplies the value ModelConstants.NOCONSTANT, intended for when + * the version of PG being built for does not define the constant in question + * (and when the NOCONSTANT value wouldn't be a valid value of the constant!). + */ + +#define CONSTANT(c) (org_postgresql_pljava_pg_ModelConstants_IDX_##c), (c) +#define CONSTANTEXPR(c,v) (org_postgresql_pljava_pg_ModelConstants_IDX_##c), (v) +#define NOCONSTANT(c) \ + CONSTANTEXPR(c,org_postgresql_pljava_pg_ModelConstants_NOCONSTANT) + +#define FORMOFFSET(form,fld) \ + CONSTANTEXPR(OFFSET_##form##_##fld, offsetof(FormData_##form,fld)) + +#define TYPEOFFSET(type,tag,fld) \ + CONSTANTEXPR(OFFSET_##tag##_##fld, offsetof(type,fld)) + +static int32 constants[] = { + CONSTANT(PG_VERSION_NUM), + + CONSTANT(SIZEOF_DATUM), + CONSTANTEXPR(SIZEOF_INT, sizeof (int)), + CONSTANTEXPR(SIZEOF_LONG, sizeof (long)), + CONSTANTEXPR(SIZEOF_SIZE, sizeof (Size)), + + CONSTANT(ALIGNOF_SHORT), + CONSTANT(ALIGNOF_INT), + CONSTANT(ALIGNOF_DOUBLE), + CONSTANT(MAXIMUM_ALIGNOF), + + CONSTANT(NAMEDATALEN), + + + + CONSTANTEXPR(SIZEOF_varatt_indirect, sizeof (varatt_indirect)), + CONSTANTEXPR(SIZEOF_varatt_expanded, sizeof (varatt_expanded)), + CONSTANTEXPR(SIZEOF_varatt_external, sizeof (varatt_external)), + + TYPEOFFSET(RelationData,Relation,rd_id), + + + CONSTANT(HEAPTUPLESIZE), + CONSTANTEXPR(OFFSET_TTS_NVALID, offsetof(TupleTableSlot, tts_nvalid)), + CONSTANTEXPR(SIZEOF_TTS_NVALID, sizeof ((TupleTableSlot *)0)->tts_nvalid), + +#if PG_VERSION_NUM >= 120000 + CONSTANT(TTS_FLAG_EMPTY), + CONSTANT(TTS_FLAG_FIXED), + CONSTANTEXPR(OFFSET_TTS_FLAGS, offsetof(TupleTableSlot, tts_flags)), + NOCONSTANT(OFFSET_TTS_EMPTY), + NOCONSTANT(OFFSET_TTS_FIXED), + CONSTANTEXPR(OFFSET_TTS_TABLEOID, offsetof(TupleTableSlot, tts_tableOid)), +#else + NOCONSTANT(TTS_FLAG_EMPTY), + NOCONSTANT(TTS_FLAG_FIXED), + NOCONSTANT(OFFSET_TTS_FLAGS), + CONSTANTEXPR(OFFSET_TTS_EMPTY, offsetof(TupleTableSlot, tts_isempty)), +#if PG_VERSION_NUM >= 110000 + CONSTANTEXPR(OFFSET_TTS_FIXED, + offsetof(TupleTableSlot, tts_fixedTupleDescriptor)), +#else + NOCONSTANT(OFFSET_TTS_FIXED), +#endif /* 110000 */ + NOCONSTANT(OFFSET_TTS_TABLEOID), +#endif /* 120000 */ + + TYPEOFFSET(NullableDatum, NullableDatum, isnull), + CONSTANTEXPR(SIZEOF_NullableDatum, sizeof (NullableDatum)), + + TYPEOFFSET(FunctionCallInfoBaseData, fcinfo, fncollation), + TYPEOFFSET(FunctionCallInfoBaseData, fcinfo, isnull), + TYPEOFFSET(FunctionCallInfoBaseData, fcinfo, nargs), + TYPEOFFSET(FunctionCallInfoBaseData, fcinfo, args), + + TYPEOFFSET(Bitmapset, Bitmapset, words), + + + /* + * Prior to PG 18, TupleDesc->attrs was where the Form_pg_attribute copies + * actually started. As of PG 18, they must be found from the offset of + * compact_attrs (a constant, supplied here), plus the size of nattrs + * CompactAttribute structs (a bit of arithmetic for the Java code to do). + */ +#if PG_VERSION_NUM < 180000 + CONSTANTEXPR(OFFSET_TUPLEDESC_ATTRS, offsetof(struct TupleDescData, attrs)), +#else + CONSTANTEXPR(OFFSET_TUPLEDESC_ATTRS, + offsetof(struct TupleDescData, compact_attrs)), +#endif + CONSTANTEXPR(OFFSET_TUPLEDESC_TDREFCOUNT, + offsetof(struct TupleDescData, tdrefcount)), + CONSTANTEXPR(SIZEOF_TUPLEDESC_TDREFCOUNT, + sizeof ((struct TupleDescData *)0)->tdrefcount), + CONSTANTEXPR(OFFSET_TUPLEDESC_TDTYPEID, + offsetof(struct TupleDescData, tdtypeid)), + CONSTANTEXPR(OFFSET_TUPLEDESC_TDTYPMOD, + offsetof(struct TupleDescData, tdtypmod)), + + + + CONSTANTEXPR(SIZEOF_FORM_PG_ATTRIBUTE, sizeof (FormData_pg_attribute)), + CONSTANT(ATTRIBUTE_FIXED_PART_SIZE), + FORMOFFSET( pg_attribute, atttypid ), + FORMOFFSET( pg_attribute, attlen ), + /* available(1) */ + FORMOFFSET( pg_attribute, atttypmod ), + FORMOFFSET( pg_attribute, attbyval ), + FORMOFFSET( pg_attribute, attalign ), + FORMOFFSET( pg_attribute, attnotnull ), + FORMOFFSET( pg_attribute, attisdropped ), + + + + CONSTANT(CLASS_TUPLE_SIZE), + CONSTANT( Anum_pg_class_reltype ), + + + + CONSTANTEXPR(SIZEOF_MCTX, sizeof (MemoryContextData)), + TYPEOFFSET(MemoryContextData, MCTX, isReset), + TYPEOFFSET(MemoryContextData, MCTX, mem_allocated), + TYPEOFFSET(MemoryContextData, MCTX, parent), + TYPEOFFSET(MemoryContextData, MCTX, firstchild), + TYPEOFFSET(MemoryContextData, MCTX, prevchild), + TYPEOFFSET(MemoryContextData, MCTX, nextchild), + TYPEOFFSET(MemoryContextData, MCTX, name), + TYPEOFFSET(MemoryContextData, MCTX, ident), + + + + CONSTANT(N_ACL_RIGHTS), + CONSTANT(BITS_PER_BITMAPWORD), + + + + CONSTANT(T_Invalid), + CONSTANT(T_AggState), + CONSTANT(T_CallContext), + CONSTANT(T_EventTriggerData), + CONSTANT(T_ReturnSetInfo), + CONSTANT(T_TriggerData), + CONSTANT(T_WindowAggState), + CONSTANT(T_WindowObjectData), +#if PG_VERSION_NUM >= 160000 + CONSTANT(T_Bitmapset), + CONSTANT(T_ErrorSaveContext), +#else + CONSTANTEXPR(T_Bitmapset, T_Invalid), + CONSTANTEXPR(T_ErrorSaveContext, T_Invalid), +#endif + + + + TYPEOFFSET(Trigger, TRG, tgoid), + TYPEOFFSET(Trigger, TRG, tgname), + TYPEOFFSET(Trigger, TRG, tgfoid), + TYPEOFFSET(Trigger, TRG, tgtype), + TYPEOFFSET(Trigger, TRG, tgenabled), + TYPEOFFSET(Trigger, TRG, tgisinternal), + TYPEOFFSET(Trigger, TRG, tgisclone), + TYPEOFFSET(Trigger, TRG, tgconstrrelid), + TYPEOFFSET(Trigger, TRG, tgconstrindid), + TYPEOFFSET(Trigger, TRG, tgconstraint), + TYPEOFFSET(Trigger, TRG, tgdeferrable), + TYPEOFFSET(Trigger, TRG, tginitdeferred), + TYPEOFFSET(Trigger, TRG, tgnargs), + TYPEOFFSET(Trigger, TRG, tgnattr), + TYPEOFFSET(Trigger, TRG, tgattr), + TYPEOFFSET(Trigger, TRG, tgargs), + TYPEOFFSET(Trigger, TRG, tgqual), + TYPEOFFSET(Trigger, TRG, tgoldtable), + TYPEOFFSET(Trigger, TRG, tgnewtable), + CONSTANTEXPR(SIZEOF_Trigger, sizeof (Trigger)), + + + + TYPEOFFSET(TriggerData, TRGD, tg_event), + TYPEOFFSET(TriggerData, TRGD, tg_relation), + TYPEOFFSET(TriggerData, TRGD, tg_trigtuple), + TYPEOFFSET(TriggerData, TRGD, tg_newtuple), + TYPEOFFSET(TriggerData, TRGD, tg_trigger), + TYPEOFFSET(TriggerData, TRGD, tg_updatedcols), + + TYPEOFFSET(ReturnSetInfo, RSI, allowedModes), + TYPEOFFSET(ReturnSetInfo, RSI, isDone), + TYPEOFFSET(ReturnSetInfo, RSI, returnMode), + CONSTANTEXPR(SIZEOF_RSI_isDone, sizeof ((ReturnSetInfo *)0)->isDone), + CONSTANTEXPR(SIZEOF_RSI_returnMode,sizeof ((ReturnSetInfo *)0)->returnMode), + + + + CONSTANT(ATTNUM), + CONSTANT(AUTHMEMMEMROLE), + CONSTANT(AUTHMEMROLEMEM), + CONSTANT(AUTHOID), + CONSTANT(COLLOID), + CONSTANT(DATABASEOID), + CONSTANT(LANGOID), + CONSTANT(NAMESPACEOID), + CONSTANT(OPEROID), + CONSTANT(PROCOID), + CONSTANT(RELOID), + CONSTANT(TSCONFIGOID), + CONSTANT(TSDICTOID), + CONSTANT(TYPEOID), + CONSTANT(CONSTROID), + CONSTANT(TRFOID), + CONSTANT(TRFTYPELANG), + CONSTANT(AMOID), + CONSTANT(TABLESPACEOID), + CONSTANT(FOREIGNDATAWRAPPEROID), + CONSTANT(FOREIGNSERVEROID), + CONSTANT(FOREIGNTABLEREL), + + + +}; + +#undef CONSTANT +#undef CONSTANTEXPR + +static void dummy(Bitmapset *bitmapset, ReturnSetInfo *rsi) +{ + StaticAssertStmt(SIZEOF_DATUM == SIZEOF_VOID_P, + "PostgreSQL SIZEOF_DATUM and SIZEOF_VOID_P no longer equivalent?"); + + AssertVariableIsOfType(bitmapset->nwords, int); /* DatumUtils.java */ + AssertVariableIsOfType(rsi->allowedModes, int); /* LookupImpl.java */ + +/* + * BEGIN:CONFIRMCONST for CatalogObjectImpl.Factory constants + */ + +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == \ +(org_postgresql_pljava_pg_CatalogObjectImpl_Factory_##c), \ + "Java/C value mismatch for " #c) + + CONFIRMCONST( InvalidOid ); + + CONFIRMCONST( TableSpaceRelationId ); + CONFIRMCONST( TypeRelationId ); + CONFIRMCONST( AttributeRelationId ); + CONFIRMCONST( ProcedureRelationId ); + CONFIRMCONST( RelationRelationId ); + CONFIRMCONST( AuthIdRelationId ); + CONFIRMCONST( DatabaseRelationId ); + CONFIRMCONST( ForeignServerRelationId ); + CONFIRMCONST( ForeignDataWrapperRelationId ); + CONFIRMCONST( AccessMethodRelationId ); + CONFIRMCONST( ConstraintRelationId ); + CONFIRMCONST( LanguageRelationId ); + CONFIRMCONST( NamespaceRelationId ); + CONFIRMCONST( OperatorRelationId ); + CONFIRMCONST( TriggerRelationId ); + CONFIRMCONST( ExtensionRelationId ); + CONFIRMCONST( ForeignTableRelationId ); /* <-CatalogObjectImpl only */ + CONFIRMCONST( CollationRelationId ); + CONFIRMCONST( TransformRelationId ); + CONFIRMCONST( TSDictionaryRelationId ); + CONFIRMCONST( TSConfigRelationId ); + + /* + * PG types good to have around because of corresponding JDBC types. + */ + CONFIRMCONST( BOOLOID ); + CONFIRMCONST( BYTEAOID ); + CONFIRMCONST( CHAROID ); + CONFIRMCONST( INT8OID ); + CONFIRMCONST( INT2OID ); + CONFIRMCONST( INT4OID ); + CONFIRMCONST( XMLOID ); + CONFIRMCONST( FLOAT4OID ); + CONFIRMCONST( FLOAT8OID ); + CONFIRMCONST( BPCHAROID ); + CONFIRMCONST( VARCHAROID ); + CONFIRMCONST( DATEOID ); + CONFIRMCONST( TIMEOID ); + CONFIRMCONST( TIMESTAMPOID ); + CONFIRMCONST( TIMESTAMPTZOID ); + CONFIRMCONST( TIMETZOID ); + CONFIRMCONST( BITOID ); + CONFIRMCONST( VARBITOID ); + CONFIRMCONST( NUMERICOID ); + + /* + * PG types not mentioned in JDBC but bread-and-butter to PG devs. + */ + CONFIRMCONST( TEXTOID ); + CONFIRMCONST( UNKNOWNOID ); + CONFIRMCONST( RECORDOID ); + CONFIRMCONST( CSTRINGOID ); + CONFIRMCONST( VOIDOID ); + + /* + * PG types used in modeling PG types themselves. + */ + CONFIRMCONST( NAMEOID ); + CONFIRMCONST( REGPROCOID ); + CONFIRMCONST( OIDOID ); + CONFIRMCONST( PG_NODE_TREEOID ); + CONFIRMCONST( ACLITEMOID ); + CONFIRMCONST( REGPROCEDUREOID ); + CONFIRMCONST( REGOPEROID ); + CONFIRMCONST( REGOPERATOROID ); + CONFIRMCONST( REGCLASSOID ); + CONFIRMCONST( REGTYPEOID ); + CONFIRMCONST( TRIGGEROID ); + CONFIRMCONST( REGCONFIGOID ); + CONFIRMCONST( REGDICTIONARYOID ); + CONFIRMCONST( REGNAMESPACEOID ); + CONFIRMCONST( REGROLEOID ); + CONFIRMCONST( REGCOLLATIONOID ); + + /* + * The PG polymorphic pseudotypes. Of these, only ANYARRAYOID is in + * CatalogObject.Factory (because API has RegType.ANYARRAY), while the rest + * are in CatalogObjectImpl.Factory. + */ + CONFIRMCONST( ANYOID ); + + CONFIRMCONST( ANYARRAYOID ); + CONFIRMCONST( ANYELEMENTOID ); + CONFIRMCONST( ANYNONARRAYOID ); + CONFIRMCONST( ANYENUMOID ); + CONFIRMCONST( ANYRANGEOID ); +#if PG_VERSION_NUM >= 140000 + CONFIRMCONST( ANYMULTIRANGEOID ); + + CONFIRMCONST( ANYCOMPATIBLEMULTIRANGEOID ); +#endif +#if PG_VERSION_NUM >= 130000 + CONFIRMCONST( ANYCOMPATIBLEOID ); + CONFIRMCONST( ANYCOMPATIBLEARRAYOID ); + CONFIRMCONST( ANYCOMPATIBLENONARRAYOID ); + CONFIRMCONST( ANYCOMPATIBLERANGEOID ); +#endif + + /* + * The well-known, pinned procedural languages. + */ + CONFIRMCONST( INTERNALlanguageId ); + CONFIRMCONST( ClanguageId ); + CONFIRMCONST( SQLlanguageId ); + + /* + * The well-known, pinned namespaces. + */ + CONFIRMCONST( PG_CATALOG_NAMESPACE ); + CONFIRMCONST( PG_TOAST_NAMESPACE ); + + /* + * The well-known, pinned collations. + */ + CONFIRMCONST( DEFAULT_COLLATION_OID ); + CONFIRMCONST( C_COLLATION_OID ); + +#undef CONFIRMCONST + +/* + * END:CONFIRMCONST for CatalogObjectImpl.Factory constants + * + * BEGIN:CONFIRMCONST for AclItem constants + * BEGIN:CONFIRMOFFSET for AclItem constants + */ + +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == \ +(org_postgresql_pljava_pg_AclItem_##c), \ + "Java/C value mismatch for " #c) + +#define CONFIRMOFFSET(typ,fld) \ +StaticAssertStmt(offsetof(typ,fld) == \ +(org_postgresql_pljava_pg_AclItem_OFFSET_##fld), \ + "Java/C offset mismatch for " #fld) + + CONFIRMCONST( ACL_INSERT ); + CONFIRMCONST( ACL_SELECT ); + CONFIRMCONST( ACL_UPDATE ); + CONFIRMCONST( ACL_DELETE ); + CONFIRMCONST( ACL_TRUNCATE ); + CONFIRMCONST( ACL_REFERENCES ); + CONFIRMCONST( ACL_TRIGGER ); + CONFIRMCONST( ACL_EXECUTE ); + CONFIRMCONST( ACL_USAGE ); + CONFIRMCONST( ACL_CREATE ); + CONFIRMCONST( ACL_CREATE_TEMP ); + CONFIRMCONST( ACL_CONNECT ); +#if PG_VERSION_NUM >= 150000 + CONFIRMCONST( ACL_SET ); + CONFIRMCONST( ACL_ALTER_SYSTEM); +#endif +#if PG_VERSION_NUM >= 170000 + CONFIRMCONST( ACL_MAINTAIN ); +#endif + CONFIRMCONST( ACL_ID_PUBLIC ); + + CONFIRMOFFSET( AclItem, ai_grantee ); + CONFIRMOFFSET( AclItem, ai_grantor ); + CONFIRMOFFSET( AclItem, ai_privs ); + +#undef CONFIRMCONST +#undef CONFIRMOFFSET + +/* + * END:CONFIRMCONST for AclItem constants + * END:CONFIRMOFFSET for AclItem constants + * + * BEGIN:CONFIRMCONST for ModelConstants constants + * BEGIN:CONFIRMEXPR for ModelConstants constants + * + * BEGIN:CONFIRMSIZEOF for ModelConstants FormData_* structs + * BEGIN:CONFIRMOFFSET for ModelConstants FormData_* structs + * BEGIN:CONFIRMATTNUM for ModelConstants Anum_* constants + */ + +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == \ +(org_postgresql_pljava_pg_ModelConstants_##c), \ + "Java/C value mismatch for " #c) +#define CONFIRMEXPR(c,expr) \ +StaticAssertStmt((expr) == \ +(org_postgresql_pljava_pg_ModelConstants_##c), \ + "Java/C value mismatch for " #c) + +#define CONFIRMSIZEOF(form,fld) \ +StaticAssertStmt((sizeof ((FormData_##form *)0)->fld) == \ +(org_postgresql_pljava_pg_ModelConstants_SIZEOF_##form##_##fld), \ + "Java/C sizeof mismatch for " #form "." #fld) +#define CONFIRMOFFSET(form,fld) \ +StaticAssertStmt(offsetof(FormData_##form,fld) == \ +(org_postgresql_pljava_pg_ModelConstants_OFFSET_##form##_##fld), \ + "Java/C offset mismatch for " #form "." #fld) +#define CONFIRMATTNUM(form,fld) \ +StaticAssertStmt(Anum_##form##_##fld == \ +(org_postgresql_pljava_pg_ModelConstants_Anum_##form##_##fld), \ + "Java/C attribute number mismatch for " #form "." #fld) + + CONFIRMCONST( PG_SQL_ASCII ); + CONFIRMCONST( PG_UTF8 ); + CONFIRMCONST( PG_LATIN1 ); + CONFIRMCONST( PG_ENCODING_BE_LAST ); + + CONFIRMCONST( VARHDRSZ ); + CONFIRMCONST( VARHDRSZ_EXTERNAL ); + CONFIRMCONST( VARTAG_INDIRECT ); + CONFIRMCONST( VARTAG_EXPANDED_RO ); + CONFIRMCONST( VARTAG_EXPANDED_RW ); + CONFIRMCONST( VARTAG_ONDISK ); + + CONFIRMATTNUM( pg_attribute, attname ); + + CONFIRMSIZEOF( pg_attribute, atttypid ); + CONFIRMSIZEOF( pg_attribute, attlen ); + CONFIRMSIZEOF( pg_attribute, atttypmod ); + CONFIRMSIZEOF( pg_attribute, attbyval ); + CONFIRMSIZEOF( pg_attribute, attalign ); + CONFIRMSIZEOF( pg_attribute, attnotnull ); + CONFIRMSIZEOF( pg_attribute, attisdropped ); + +#if PG_VERSION_NUM >= 180000 + CONFIRMEXPR( SIZEOF_CompactAttribute, sizeof (CompactAttribute)); +#endif + + CONFIRMATTNUM( pg_extension, oid ); + CONFIRMCONST( ExtensionOidIndexId ); + + CONFIRMATTNUM( pg_trigger, oid ); + CONFIRMCONST( TriggerOidIndexId ); + +#undef CONFIRMSIZEOF +#undef CONFIRMOFFSET +#undef CONFIRMATTNUM + +/* + * END:CONFIRMSIZEOF for ModelConstants FormData_* structs + * END:CONFIRMOFFSET for ModelConstants FormData_* structs + * END:CONFIRMATTNUM for ModelConstants Anum_* constants + * + * BEGIN:CONFIRMOFFSET for ModelConstants, arbitrary structs + * BEGIN:CONFIRMSIZEOF for ModelConstants, arbitrary structs + * BEGIN:CONFIRMSIZETAG lets the ModelConstants name use a shorter tag + * + * BEGIN:CONFIRMVLOFFSET for varlena offsets, which in Java don't count VARHDRSZ + */ + +#define CONFIRMOFFSET(strct,fld) \ +StaticAssertStmt(offsetof(strct,fld) == \ +(org_postgresql_pljava_pg_ModelConstants_OFFSET_##strct##_##fld), \ + "Java/C offset mismatch for " #strct "." #fld) +#define CONFIRMSIZEOF(strct,fld) \ +StaticAssertStmt((sizeof ((strct *)0)->fld) == \ +(org_postgresql_pljava_pg_ModelConstants_SIZEOF_##strct##_##fld), \ + "Java/C sizeof mismatch for " #strct "." #fld) +#define CONFIRMSIZETAG(strct,tag,fld) \ +StaticAssertStmt((sizeof ((strct *)0)->fld) == \ +(org_postgresql_pljava_pg_ModelConstants_SIZEOF_##tag##_##fld), \ + "Java/C sizeof mismatch for " #strct "." #fld) + +#define CONFIRMVLOFFSET(strct,fld) \ +StaticAssertStmt(offsetof(strct,fld) - VARHDRSZ == \ +(org_postgresql_pljava_pg_ModelConstants_OFFSET_##strct##_##fld), \ + "Java/C offset mismatch for " #strct "." #fld) + + CONFIRMSIZEOF( ArrayType, ndim ); + CONFIRMSIZEOF( ArrayType, dataoffset ); + CONFIRMSIZEOF( ArrayType, elemtype ); + + CONFIRMVLOFFSET( ArrayType, ndim ); + CONFIRMVLOFFSET( ArrayType, dataoffset ); + CONFIRMVLOFFSET( ArrayType, elemtype ); + +#if 0 + /* + * Given the way ARR_DIMS is defined in PostgreSQL's array.h, there seems + * to be no way to construct a static assertion for this offset acceptable + * to a compiler that forbids "the conversions of a reinterpret_cast" in + * a constant expression. This will have to be checked in an old-fashioned + * runtime assertion in _initialize, losing the benefit of compile-time + * detection. + */ + CONFIRMEXPR( OFFSET_ArrayType_DIMS, + (((char*)ARR_DIMS(0)) - (char *)0) - VARHDRSZ ); +#endif + + CONFIRMEXPR( SIZEOF_ArrayType_DIM, sizeof *ARR_DIMS(0) ); + + CONFIRMEXPR( SIZEOF_NodeTag, sizeof (NodeTag) ); + CONFIRMEXPR( SIZEOF_Oid, sizeof (Oid) ); + + CONFIRMSIZETAG( FunctionCallInfoBaseData, fcinfo, fncollation ); + CONFIRMSIZETAG( FunctionCallInfoBaseData, fcinfo, isnull ); + CONFIRMSIZETAG( FunctionCallInfoBaseData, fcinfo, nargs ); + +#undef CONFIRMVLOFFSET +#undef CONFIRMSIZETAG +#undef CONFIRMSIZEOF +#undef CONFIRMOFFSET + +#undef CONFIRMCONST +#undef CONFIRMEXPR + +/* + * END:CONFIRMOFFSET for ModelConstants, arbitrary structs + * END:CONFIRMSIZEOF for ModelConstants, arbitrary structs + * END:CONFIRMSIZETAG lets the ModelConstants name use a shorter tag + * END:CONFIRMVLOFFSET for varlena offsets, which in Java don't count VARHDRSZ + * + * END:CONFIRMCONST for ModelConstants constants + * END:CONFIRMEXPR for ModelConstants constants + * + * BEGIN:CONFIRMCONST for TupleTableSlotImpl constants + * BEGIN:CONFIRMSIZEOF for TupleTableSlotImpl FormData_* structs + * BEGIN:CONFIRMOFFSET for TupleTableSlotImpl FormData_* structs + */ + +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == \ +(org_postgresql_pljava_pg_TupleTableSlotImpl_##c), \ + "Java/C value mismatch for " #c) +#define CONFIRMSIZEOF(form,fld) \ +StaticAssertStmt((sizeof ((form *)0)->fld) == \ +(org_postgresql_pljava_pg_TupleTableSlotImpl_SIZEOF_##form##_##fld), \ + "Java/C sizeof mismatch for " #form "." #fld) +#define CONFIRMOFFSET(form,fld) \ +StaticAssertStmt(offsetof(form,fld) == \ +(org_postgresql_pljava_pg_TupleTableSlotImpl_OFFSET_##form##_##fld), \ + "Java/C offset mismatch for " #form "." #fld) + + CONFIRMOFFSET( HeapTupleData, t_len ); + CONFIRMOFFSET( HeapTupleData, t_tableOid ); + + CONFIRMSIZEOF( HeapTupleData, t_len ); + CONFIRMSIZEOF( HeapTupleData, t_tableOid ); + + CONFIRMOFFSET( HeapTupleHeaderData, t_infomask ); + CONFIRMOFFSET( HeapTupleHeaderData, t_infomask2 ); + CONFIRMOFFSET( HeapTupleHeaderData, t_hoff ); + CONFIRMOFFSET( HeapTupleHeaderData, t_bits ); + + CONFIRMSIZEOF( HeapTupleHeaderData, t_infomask ); + CONFIRMSIZEOF( HeapTupleHeaderData, t_infomask2 ); + CONFIRMSIZEOF( HeapTupleHeaderData, t_hoff ); + + CONFIRMCONST( HEAP_HASNULL ); + CONFIRMCONST( HEAP_HASEXTERNAL ); + CONFIRMCONST( HEAP_NATTS_MASK ); + + CONFIRMOFFSET( NullableDatum, value ); + +#undef CONFIRMCONST +#undef CONFIRMSIZEOF +#undef CONFIRMOFFSET + +/* + * END:CONFIRMCONST for TupleTableSlotImpl constants + * END:CONFIRMSIZEOF for TupleTableSlotImpl FormData_* structs + * END:CONFIRMOFFSET for TupleTableSlotImpl FormData_* structs + * + * BEGIN:CONFIRMCONST for TriggerImpl constants + * BEGIN:CONFIRMSIZEOF for TriggerImpl constants + */ + +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == \ +(org_postgresql_pljava_pg_TriggerImpl_##c), \ + "Java/C value mismatch for " #c) +#define CONFIRMSIZETAG(strct,tag,fld) \ +StaticAssertStmt((sizeof ((strct *)0)->fld) == \ +(org_postgresql_pljava_pg_TriggerImpl_SIZEOF_##tag##_##fld), \ + "Java/C sizeof mismatch for " #strct "." #fld) + + CONFIRMCONST( TRIGGER_FIRES_ON_ORIGIN ); + CONFIRMCONST( TRIGGER_FIRES_ALWAYS ); + CONFIRMCONST( TRIGGER_FIRES_ON_REPLICA ); + CONFIRMCONST( TRIGGER_DISABLED ); + + CONFIRMCONST( TRIGGER_TYPE_ROW ); + CONFIRMCONST( TRIGGER_TYPE_BEFORE ); + CONFIRMCONST( TRIGGER_TYPE_INSERT ); + CONFIRMCONST( TRIGGER_TYPE_DELETE ); + CONFIRMCONST( TRIGGER_TYPE_UPDATE ); + CONFIRMCONST( TRIGGER_TYPE_TRUNCATE ); + CONFIRMCONST( TRIGGER_TYPE_INSTEAD ); + + CONFIRMCONST( TRIGGER_TYPE_LEVEL_MASK ); + CONFIRMCONST( TRIGGER_TYPE_STATEMENT ); + + CONFIRMCONST( TRIGGER_TYPE_TIMING_MASK ); + CONFIRMCONST( TRIGGER_TYPE_AFTER ); + CONFIRMCONST( TRIGGER_TYPE_EVENT_MASK ); + +#undef CONFIRMCONST +#undef CONFIRMSIZETAG + +/* + * END:CONFIRMCONST for TriggerImpl constants + * END:CONFIRMSIZETAG for TriggerImpl constants + * + * BEGIN:CONFIRMCONST for LookupImpl constants + * BEGIN:CONFIRMOFFSET for LookupImpl constants + * BEGIN:CONFIRMSIZEOF for LookupImpl constants + * BEGIN:CONFIRMSIZETAG for LookupImpl constants + */ + +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == \ +(org_postgresql_pljava_pg_LookupImpl_##c), \ + "Java/C value mismatch for " #c) +#define CONFIRMOFFSET(strct,fld) \ +StaticAssertStmt(offsetof(strct,fld) == \ +(org_postgresql_pljava_pg_LookupImpl_OFFSET_##strct##_##fld), \ + "Java/C offset mismatch for " #strct "." #fld) +#define CONFIRMSIZEOF(strct,fld) \ +StaticAssertStmt((sizeof ((strct *)0)->fld) == \ +(org_postgresql_pljava_pg_LookupImpl_SIZEOF_##strct##_##fld), \ + "Java/C sizeof mismatch for " #strct "." #fld) +#define CONFIRMSIZETAG(strct,tag,fld) \ +StaticAssertStmt((sizeof ((strct *)0)->fld) == \ +(org_postgresql_pljava_pg_LookupImpl_SIZEOF_##tag##_##fld), \ + "Java/C sizeof mismatch for " #strct "." #fld) + + CONFIRMOFFSET( CallContext, atomic ); + CONFIRMSIZEOF( CallContext, atomic ); + + CONFIRMSIZETAG( TriggerData, TRGD, tg_event ); + + CONFIRMCONST( TRIGGER_EVENT_INSERT ); + CONFIRMCONST( TRIGGER_EVENT_DELETE ); + CONFIRMCONST( TRIGGER_EVENT_UPDATE ); + CONFIRMCONST( TRIGGER_EVENT_TRUNCATE ); + CONFIRMCONST( TRIGGER_EVENT_OPMASK ); + CONFIRMCONST( TRIGGER_EVENT_ROW ); + CONFIRMCONST( TRIGGER_EVENT_BEFORE ); + CONFIRMCONST( TRIGGER_EVENT_AFTER ); + CONFIRMCONST( TRIGGER_EVENT_INSTEAD ); + CONFIRMCONST( TRIGGER_EVENT_TIMINGMASK ); + CONFIRMCONST( FirstLowInvalidHeapAttributeNumber ); + + CONFIRMCONST( SFRM_ValuePerCall ); + CONFIRMCONST( SFRM_Materialize ); + CONFIRMCONST( SFRM_Materialize_Random ); + CONFIRMCONST( SFRM_Materialize_Preferred ); + + CONFIRMCONST( ExprSingleResult ); + CONFIRMCONST( ExprMultipleResult ); + CONFIRMCONST( ExprEndResult ); + +#undef CONFIRMCONST +#undef CONFIRMSIZETAG + +/* + * END:CONFIRMCONST for LookupImpl constants + * END:CONFIRMSIZETAG for LookupImpl constants + */ +} + +void pljava_ModelConstants_initialize(void) +{ + ArrayType dummyArray; + jclass cls; + + JNINativeMethod methods[] = + { + { + "_statics", + "()Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_ModelConstants_00024Natives__1statics + }, + { 0, 0, 0 }, + { 0, 0, dummy } /* so C compiler won't warn that dummy is unused */ + }; + + cls = PgObject_getJavaClass( + "org/postgresql/pljava/pg/ModelConstants$Natives"); + PgObject_registerNatives2(cls, methods); + JNI_deleteLocalRef(cls); + + /* + * Don't really use PostgreSQL Assert for this; it goes behind elog's back. + */ + if (org_postgresql_pljava_pg_ModelConstants_OFFSET_ArrayType_DIMS != + (((char*)ARR_DIMS(&dummyArray)) - (char *)&dummyArray) - VARHDRSZ ) + elog(ERROR, + "PL/Java built with mismatched value for OFFSET_ArrayType_DIMS"); +} + +/* + * Class: org_postgresql_pljava_pg_ModelConstants_Natives + * Method: _statics + * Signature: ()Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_ModelConstants_00024Natives__1statics(JNIEnv* env, jobject _cls) +{ + /* + * None of the usual PL/Java BEGIN_NATIVE fencing here, because this is not + * a call into PostgreSQL; it's pure JNI to grab a static constant address. + */ + return (*env)->NewDirectByteBuffer(env, constants, sizeof constants); +} diff --git a/pljava-so/src/main/c/ModelUtils.c b/pljava-so/src/main/c/ModelUtils.c new file mode 100644 index 000000000..b0e26f398 --- /dev/null +++ b/pljava-so/src/main/c/ModelUtils.c @@ -0,0 +1,1637 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if PG_VERSION_NUM >= 160000 +#include +#endif +#include +#include +#include +#include +#include + +#include "pljava/Backend.h" +#include "pljava/Exception.h" +#include "pljava/Invocation.h" +#include "pljava/PgObject.h" +#include "pljava/ModelUtils.h" +#include "pljava/VarlenaWrapper.h" + +#include "org_postgresql_pljava_internal_SPI.h" +#include "org_postgresql_pljava_internal_SPI_EarlyNatives.h" + +#include "org_postgresql_pljava_pg_CatalogObjectImpl_Addressed.h" +#include "org_postgresql_pljava_pg_CatalogObjectImpl_Factory.h" +#include "org_postgresql_pljava_pg_CharsetEncodingImpl_EarlyNatives.h" +#include "org_postgresql_pljava_pg_DatumUtils.h" +#include "org_postgresql_pljava_pg_ExprContextImpl.h" +#include "org_postgresql_pljava_pg_LookupImpl.h" +#include "org_postgresql_pljava_pg_MemoryContextImpl_EarlyNatives.h" +#include "org_postgresql_pljava_pg_ResourceOwnerImpl_EarlyNatives.h" +#include "org_postgresql_pljava_pg_TupleDescImpl.h" +#include "org_postgresql_pljava_pg_TupleTableSlotImpl.h" + +/* + * A compilation unit collecting various native methods used in the pg model + * implementation classes. This is something of a break with past PL/Java + * practice of having a correspondingly-named C file for a Java class, made on + * the belief that there won't be that many new methods here, and they will make + * more sense collected together. + * + * Some of the native methods here may *not* include the elaborate fencing seen + * in other PL/Java native methods, if they involve trivially simple functions + * that do not require calling into PostgreSQL or other non-thread-safe code. + * This is, of course, a careful exception made to the general rule. The calling + * Java code is expected to have good reason to believe any state to be examined + * by these methods won't be shifting underneath them. + */ + +static jclass s_CatalogObjectImpl_Factory_class; +static jmethodID s_CatalogObjectImpl_Factory_invalidateRelation; +static jmethodID s_CatalogObjectImpl_Factory_syscacheInvalidate; + +static jclass s_ExprContextImpl_class; +static jmethodID s_ExprContextImpl_releaseAndDecache; +static void exprContextCB(Datum arg); + +static jclass s_LookupImpl_class; +static jmethodID s_LookupImpl_dispatchNew; +static jmethodID s_LookupImpl_dispatch; +static jmethodID s_LookupImpl_dispatchInline; + +static jclass s_MemoryContextImpl_class; +static jmethodID s_MemoryContextImpl_callback; +static void memoryContextCallback(void *arg); + +static jclass s_ResourceOwnerImpl_class; +static jmethodID s_ResourceOwnerImpl_callback; +static void resourceReleaseCB(ResourceReleasePhase phase, + bool isCommit, bool isTopLevel, void *arg); + +static jclass s_TupleDescImpl_class; +static jmethodID s_TupleDescImpl_fromByteBuffer; + +static jclass s_TupleTableSlotImpl_class; +static jmethodID s_TupleTableSlotImpl_newDeformed; + +static void relCacheCB(Datum arg, Oid relid); +static void sysCacheCB(Datum arg, int cacheid, uint32 hash); + +/* + * An array of boolean, one for each registered syscache callback, updated from + * Java to reflect whether any instances subject to invalidation of that class + * have been cached. When false, the C syscache callback can return immediately. + */ +static bool s_sysCacheInvalArmed + [ org_postgresql_pljava_pg_CatalogObjectImpl_Factory_SYSCACHE_CBS ]; + +jobject pljava_TupleDescriptor_create(TupleDesc tupdesc, Oid reloid) +{ + jlong tupdesc_size = (jlong)TupleDescSize(tupdesc); + jobject td_b = JNI_newDirectByteBuffer(tupdesc, tupdesc_size); + + jobject result = JNI_callStaticObjectMethodLocked(s_TupleDescImpl_class, + s_TupleDescImpl_fromByteBuffer, + td_b, + (jint)tupdesc->tdtypeid, (jint)tupdesc->tdtypmod, + (jint)reloid, (jint)tupdesc->tdrefcount); + + JNI_deleteLocalRef(td_b); + return result; +} + +/* + * If NULL is passed for jtd, a Java TupleDescriptor will be created here from + * tupdesc. Otherwise, the passed jtd must be a JNI local reference to an + * existing Java TupleDescriptor corresponding to tupdesc, and on return, the + * JNI local reference will have been deleted. + */ +jobject pljava_TupleTableSlot_create( + TupleDesc tupdesc, jobject jtd, const TupleTableSlotOps *tts_ops, Oid reloid) +{ + int natts = tupdesc->natts; + TupleTableSlot *tts = MakeSingleTupleTableSlot(tupdesc, tts_ops); + jobject tts_b = JNI_newDirectByteBuffer(tts, (jlong)sizeof *tts); + jobject vals_b = JNI_newDirectByteBuffer(tts->tts_values, + (jlong)(natts * sizeof *tts->tts_values)); + jobject nuls_b = JNI_newDirectByteBuffer(tts->tts_isnull, (jlong)natts); + jobject jtts; + + if ( NULL == jtd ) + jtd = pljava_TupleDescriptor_create(tupdesc, reloid); + + jtts = JNI_callStaticObjectMethodLocked(s_TupleTableSlotImpl_class, + s_TupleTableSlotImpl_newDeformed, tts_b, jtd, vals_b, nuls_b); + + JNI_deleteLocalRef(nuls_b); + JNI_deleteLocalRef(vals_b); + JNI_deleteLocalRef(jtd); + JNI_deleteLocalRef(tts_b); + + return jtts; +} + +typedef struct RegProcedureLookup +{ + /* + * This member caches a JNI global reference to the Java RegProcedure.Lookup + * corresponding to the flinfo whose fn_extra member points here. The JNI + * global reference must be deleted when fn_mcxt goes away. + */ + jobject lookup; + /* + * Tag and address of the fn_expr most recently seen here. If changed, + * the Java object may need to invalidate some cached information. + * + * No address retained in this struct from an earlier call is in any way + * assumed to be valid, other than for comparison to a corresponding address + * supplied in the current call. + */ + NodeTag exprTag; + Node *expr; + /* + * Members below hold most-recently seen values associated with fcinfo + * pointing to this flinfo. For any item whose tag and address (or nargs and + * address) have not changed, a new Java ByteBuffer needn't be created, as + * one retained from the earlier call still fits. + */ + short nargs; + FunctionCallInfo fcinfo; + NodeTag contextTag; + Node *context; + NodeTag resultinfoTag; + Node *resultinfo; +} +RegProcedureLookup; + +/* + * At the time of writing, all of these nodes appear (happily) to be of fixed + * size. (Even the one that is private.) + */ +static inline Size nodeTagToSize(NodeTag tag) +{ +#define TO_SIZE(t) case T_##t: return sizeof (t) + switch ( tag ) + { + TO_SIZE(AggState); + TO_SIZE(CallContext); +#if PG_VERSION_NUM >= 160000 + TO_SIZE(ErrorSaveContext); +#endif + TO_SIZE(EventTriggerData); + TO_SIZE(ReturnSetInfo); + TO_SIZE(TriggerData); + TO_SIZE(WindowAggState); +#if 0 /* this struct is private in nodeWindowAgg.c */ + TO_SIZE(WindowObjectData); +#endif + default: + return 0; /* never a valid Node size */ + } +#undef TO_SIZE +} + +void pljava_ModelUtils_inlineDispatch(PG_FUNCTION_ARGS) +{ + Size len; + jobject src; + InlineCodeBlock *codeblock = + castNode(InlineCodeBlock, DatumGetPointer(PG_GETARG_DATUM(0))); + + len = strlen(codeblock->source_text); + src = JNI_newDirectByteBuffer(codeblock->source_text, (jlong)len); + + /* + * The atomic flag will also be passed to the handler in case it cares, + * but recording it in currentInvocation for SPI's use should always happen + * and this is the simplest place to do it. + */ + currentInvocation->nonAtomic = ! codeblock->atomic; + + JNI_callStaticVoidMethod(s_LookupImpl_class, s_LookupImpl_dispatchInline, + (jint)codeblock->langOid, (jboolean)codeblock->atomic, src); +} + +Datum pljava_ModelUtils_callDispatch(PG_FUNCTION_ARGS, bool forValidator) +{ + FmgrInfo *flinfo = fcinfo->flinfo; + Oid oid = flinfo->fn_oid; + MemoryContext mcxt = flinfo->fn_mcxt; + Node *expr = flinfo->fn_expr; + RegProcedureLookup *extra = (RegProcedureLookup *)flinfo->fn_extra; + short nargs = fcinfo->nargs; + Node *context = fcinfo->context; + Node *resultinfo = fcinfo->resultinfo; + NodeTag exprTag = T_Invalid; + NodeTag contextTag = T_Invalid; + NodeTag resultinfoTag = T_Invalid; + jboolean j4v = forValidator ? JNI_TRUE : JNI_FALSE; + jboolean hasExpr = NULL != expr ? JNI_TRUE : JNI_FALSE; + jboolean newExpr = JNI_FALSE; + jobject fcinfo_b = NULL; + jobject context_b = NULL; + jobject resultinfo_b = NULL; + jobject lookup = NULL; + Size size; + ExprContext *econtext = NULL; + MemoryContext perQueryContext = NULL; + + /* + * If the caller has supplied an expression node representing the call site, + * get its tag. The handler can use the information to, for example, resolve + * the types of polymorphic parameters to concrete types from the call site. + */ + if ( NULL != expr ) + exprTag = nodeTag(expr); + + /* + * If the caller has supplied a context node with extra information about + * the call, get its tag. The handler will be able to consult its contents. + * + * The atomic flag (if it is a CallContext) or TriggerData (if that's what + * it is) will be recorded in currentInvocation right here, so that always + * happens without attention from the handler. + */ + if ( NULL != context ) + { + contextTag = nodeTag(context); + + if ( T_CallContext == contextTag ) + currentInvocation->nonAtomic = ! ((CallContext *)context)->atomic; + else if ( T_TriggerData == contextTag ) + currentInvocation->triggerData = (TriggerData *)context; + } + + /* + * If the caller has supplied a resultinfo node to control how results are + * returned, get its tag. + */ + if ( NULL != resultinfo ) + resultinfoTag = nodeTag(resultinfo); + + /* + * If there is a RegProcedureLookup struct that was saved in extra during + * an earlier look at this call site, recover the existing Java LookupImpl + * object to call its dispatch method. A new ByteBuffer covering an fcinfo, + * context, or resultinfo struct, respectively, will be passed only if the + * presence, type, size, or location of the struct has changed; if not, a + * ByteBuffer from the earlier encounter can be used again. The newExpr and + * hasExpr params likewise indicate whether LookupImpl needs to refresh any + * expression information possibly cached from before. The target routine + * oid is passed here only as a sanity check; it had better match the one + * used when the LookupImpl was constructed. + * + * This block returns to the caller after invoking dispatch(...) and + * handling the result. XXX Result handling yet to be implemented; only + * returns void for now (the caller will see null if the handler poked + * fcinfo->isnull). + */ + if ( NULL != extra ) + { + lookup = extra->lookup; + Assert(NULL != lookup); /* extra with null lookup shouldn't be seen */ + + if ( exprTag != extra->exprTag || expr != extra->expr ) + { + newExpr = JNI_TRUE; + extra->exprTag = exprTag; + extra->expr = expr; + } + + if ( nargs != extra->nargs || fcinfo != extra->fcinfo ) + { + size = SizeForFunctionCallInfo(nargs); + fcinfo_b = JNI_newDirectByteBuffer(fcinfo, (jlong)size); + extra->nargs = nargs; + extra->fcinfo = fcinfo; + } + + if ( contextTag != extra->contextTag || context != extra->context ) + { + /* + * The size will be zero if it's a tag we don't support. The case of + * a change from an earlier-seen value *to* one we don't support is + * probably unreachable, but if it were to happen, we would need + * a way to tell the Java code not to go on using some stale buffer + * from before. Sending a zero-length buffer suffices for that; the + * inefficiency is of little concern considering it probably never + * happens, and it avoids passing an additional argument (just for + * something that probably never happens). + */ + size = nodeTagToSize(contextTag); + context_b = JNI_newDirectByteBuffer(context, (jlong)size); + extra->contextTag = contextTag; + extra->context = context; + } + + if ( resultinfoTag != extra->resultinfoTag + || resultinfo != extra->resultinfo ) + { + size = nodeTagToSize(resultinfoTag); + resultinfo_b = JNI_newDirectByteBuffer(resultinfo, (jlong)size); + extra->resultinfoTag = resultinfoTag; + extra->resultinfo = resultinfo; + } + + if ( T_ReturnSetInfo == resultinfoTag ) + { + ReturnSetInfo *rsi = (ReturnSetInfo *)resultinfo; + econtext = rsi->econtext; + perQueryContext = econtext->ecxt_per_query_memory; + } + + JNI_callVoidMethod(lookup, s_LookupImpl_dispatch, + oid, newExpr, hasExpr, fcinfo_b, context_b, resultinfo_b, + (jlong)(uintptr_t)econtext, (jlong)(uintptr_t)perQueryContext); + + PG_RETURN_VOID(); /* XXX for now */ + } + + /* + * Arrival here means extra was NULL: no Java LookupImpl exists yet. + * A RegProcedureLookup struct will be freshly allocated in the + * flinfo->fn_mcxt memory context and saved as flinfo->fn_extra, and + * LookupImpl's static dispatchNew method will be called. The new C struct + * will end up holding a JNI global reference to the new LookupImpl thanks + * to a _cacheReference JNI callback (below in this file) made in the course + * of dispatchNew. + * + * The remainder of the RegProcedureLookup struct is populated here with + * the tags and addresses of any expr, context, or resultinfo nodes supplied + * by the caller, and the argument count and address of the caller-supplied + * fcinfo. Those will be used on subsequent calls to notice if the presence, + * tag (hence likely size), or address of any of those pieces has changed. + * + * dispatchNew is passed the memory context of the RegProcedureLookup + * struct, to bound its lifespan; when the context is reset, the JNI global + * ref to the LookupImpl instance will be released. The method is also + * passed the fn_extra address (for use by the _cacheReference callback), + * the target routine oid, forValidator and hasExpr flags, and ByteBuffers + * windowing the fcinfo struct, and the context and resultinfo structs when + * present. + * + * Once dispatchNew returns, any returned result needs appropriate handling. + * XXX For now, void is unconditionally returned (the caller will see null + * if the handler has poked fcinfo->isnull). + */ + + extra = MemoryContextAllocZero(mcxt, sizeof *extra); + + if ( T_Invalid != exprTag ) + { + extra->exprTag = exprTag; + extra->expr = expr; + } + + if ( T_Invalid != contextTag ) + { + extra->contextTag = contextTag; + extra->context = context; + size = nodeTagToSize(contextTag); + if ( 0 < size ) + context_b = JNI_newDirectByteBuffer(context, (jlong)size); + } + + if ( T_Invalid != resultinfoTag ) + { + extra->resultinfoTag = resultinfoTag; + extra->resultinfo = resultinfo; + size = nodeTagToSize(resultinfoTag); + if ( 0 < size ) + resultinfo_b = JNI_newDirectByteBuffer(resultinfo, (jlong)size); + + if ( T_ReturnSetInfo == resultinfoTag ) + { + ReturnSetInfo *rsi = (ReturnSetInfo *)resultinfo; + econtext = rsi->econtext; + perQueryContext = econtext->ecxt_per_query_memory; + } + } + + extra->nargs = nargs; + extra->fcinfo = fcinfo; + size = SizeForFunctionCallInfo(nargs); + fcinfo_b = JNI_newDirectByteBuffer(fcinfo, (jlong)size); + + flinfo->fn_extra = extra; + + JNI_callStaticVoidMethod(s_LookupImpl_class, s_LookupImpl_dispatchNew, + (jlong)(uintptr_t)mcxt, (jlong)(uintptr_t)extra, + oid, j4v, hasExpr, fcinfo_b, context_b, resultinfo_b, + (jlong)(uintptr_t)econtext, (jlong)(uintptr_t)perQueryContext); + + PG_RETURN_VOID(); /* XXX for now */ +} + +static void exprContextCB(Datum arg) +{ + JNI_callStaticObjectMethodLocked(s_ExprContextImpl_class, + s_ExprContextImpl_releaseAndDecache, (jint)DatumGetInt32(arg)); +} + +static void memoryContextCallback(void *arg) +{ + JNI_callStaticVoidMethodLocked(s_MemoryContextImpl_class, + s_MemoryContextImpl_callback, + PointerGetJLong(arg)); +} + +static void relCacheCB(Datum arg, Oid relid) +{ + JNI_callStaticObjectMethodLocked(s_CatalogObjectImpl_Factory_class, + s_CatalogObjectImpl_Factory_invalidateRelation, (jint)relid); +} + +static void resourceReleaseCB(ResourceReleasePhase phase, + bool isCommit, bool isTopLevel, void *arg) +{ + /* + * This static assertion does not need to be in every file that uses + * PointerGetJLong, but it should be somewhere once, so here it is. + */ + StaticAssertStmt(sizeof (uintptr_t) <= sizeof (jlong), + "uintptr_t will not fit in jlong on this platform"); + + /* + * The way ResourceOwnerRelease is implemented, callbacks to loadable + * modules (like us!) happen /after/ all of the built-in releasey actions + * for a particular phase. So, by looking for RESOURCE_RELEASE_LOCKS here, + * we actually end up executing after all the built-in lock-related stuff + * has been released, but before any of the built-in stuff released in the + * RESOURCE_RELEASE_AFTER_LOCKS phase. Which, at least for the currently + * implemented DualState subclasses, is about the right time. + */ + if ( RESOURCE_RELEASE_LOCKS != phase ) + return; + + /* + * The void *arg is the NULL we supplied at registration time. The resource + * manager arranges for CurrentResourceOwner to be the one that is being + * released. + */ + JNI_callStaticVoidMethodLocked(s_ResourceOwnerImpl_class, + s_ResourceOwnerImpl_callback, + PointerGetJLong(CurrentResourceOwner)); + + if ( isTopLevel ) + Backend_warnJEP411(isCommit); +} + +static void sysCacheCB(Datum arg, int cacheid, uint32 hash) +{ + int32 index = DatumGetInt32(arg); + if ( ! s_sysCacheInvalArmed [ index ] ) + return; + + JNI_callStaticObjectMethodLocked(s_CatalogObjectImpl_Factory_class, + s_CatalogObjectImpl_Factory_syscacheInvalidate, + (jint)index, (jint)cacheid, (jint)hash); +} + +void pljava_ResourceOwner_unregister(void) +{ + UnregisterResourceReleaseCallback(resourceReleaseCB, NULL); +} + +void pljava_ModelUtils_initialize(void) +{ + jclass cls; + + JNINativeMethod catalogObjectAddressedMethods[] = + { + { + "_lookupRowtypeTupdesc", + "(II)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1lookupRowtypeTupdesc + }, + { + "_searchSysCacheCopy1", + "(II)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1searchSysCacheCopy1 + }, + { + "_searchSysCacheCopy2", + "(III)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1searchSysCacheCopy2 + }, + { + "_sysTableGetByOid", + "(IIIIJ)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1sysTableGetByOid + }, + { + "_tupDescBootstrap", + "(I)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1tupDescBootstrap + }, + { + "_windowSysCacheInvalArmed", + "()Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1windowSysCacheInvalArmed + }, + { 0, 0, 0 } + }; + + JNINativeMethod catalogObjectFactoryMethods[] = + { + { + "_currentDatabase", + "()I", + Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Factory__1currentDatabase + }, + { 0, 0, 0 } + }; + + JNINativeMethod charsetMethods[] = + { + { + "_serverEncoding", + "()I", + Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1serverEncoding + }, + { + "_clientEncoding", + "()I", + Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1clientEncoding + }, + { + "_nameToOrdinal", + "(Ljava/nio/ByteBuffer;)I", + Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1nameToOrdinal + }, + { + "_ordinalToName", + "(I)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1ordinalToName + }, + { + "_ordinalToIcuName", + "(I)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1ordinalToIcuName + }, + { 0, 0, 0 } + }; + + JNINativeMethod datumMethods[] = + { + { + "_addressOf", + "(Ljava/nio/ByteBuffer;)J", + Java_org_postgresql_pljava_pg_DatumUtils__1addressOf + }, + { + "_map", + "(JI)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_DatumUtils__1map + }, + { + "_mapBitmapset", + "(J)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_DatumUtils__1mapBitmapset + }, + { + "_mapCString", + "(J)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_DatumUtils__1mapCString + }, + { + "_mapVarlena", + "(Ljava/nio/ByteBuffer;JJJ)Lorg/postgresql/pljava/adt/spi/Datum$Input;", + Java_org_postgresql_pljava_pg_DatumUtils__1mapVarlena + }, + { 0, 0, 0 } + }; + + JNINativeMethod exprContextMethods[] = + { + { + "_registerCallback", + "(JI)V", + Java_org_postgresql_pljava_pg_ExprContextImpl__1registerCallback + }, + { 0, 0, 0 } + }; + + JNINativeMethod lookupImplMethods[] = + { + { + "_cacheReference", + "(Lorg/postgresql/pljava/pg/LookupImpl;J)V", + Java_org_postgresql_pljava_pg_LookupImpl__1cacheReference + }, + { + "_get_fn_expr_variadic", + "(Ljava/nio/ByteBuffer;)Z", + Java_org_postgresql_pljava_pg_LookupImpl__1get_1fn_1expr_1variadic + }, + { + "_stableInputs", + "(Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;)V", + Java_org_postgresql_pljava_pg_LookupImpl__1stableInputs + }, + { + "_notionalCallResultType", + "(Ljava/nio/ByteBuffer;[I)Lorg/postgresql/pljava/model/TupleDescriptor;", + Java_org_postgresql_pljava_pg_LookupImpl__1notionalCallResultType + }, + { + "_resolveArgTypes", + "(Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;II)Z", + Java_org_postgresql_pljava_pg_LookupImpl__1resolveArgTypes + }, + { 0, 0, 0 } + }; + + JNINativeMethod memoryContextMethods[] = + { + { + "_registerCallback", + "(J)V", + Java_org_postgresql_pljava_pg_MemoryContextImpl_00024EarlyNatives__1registerCallback + }, + { + "_window", + "(Ljava/lang/Class;)[Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_MemoryContextImpl_00024EarlyNatives__1window + }, + { 0, 0, 0 } + }; + + JNINativeMethod resourceOwnerMethods[] = + { + { + "_window", + "(Ljava/lang/Class;)[Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_ResourceOwnerImpl_00024EarlyNatives__1window + }, + { 0, 0, 0 } + }; + + JNINativeMethod spiMethods[] = + { + { + "_window", + "(Ljava/lang/Class;)[Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_internal_SPI_00024EarlyNatives__1window + }, + { 0, 0, 0 } + }; + + JNINativeMethod tdiMethods[] = + { + { + "_assign_record_type_typmod", + "(Ljava/nio/ByteBuffer;)I", + Java_org_postgresql_pljava_pg_TupleDescImpl__1assign_1record_1type_1typmod + }, + { + "_synthesizeDescriptor", + "(ILjava/nio/ByteBuffer;)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_TupleDescImpl__1synthesizeDescriptor + }, + { 0, 0, 0 } + }; + + JNINativeMethod ttsiMethods[] = + { + { + "_getsomeattrs", + "(Ljava/nio/ByteBuffer;I)V", + Java_org_postgresql_pljava_pg_TupleTableSlotImpl__1getsomeattrs + }, + { + "_mapHeapTuple", + "(J)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_pg_TupleTableSlotImpl__1mapHeapTuple + }, + { + "_store_heaptuple", + "(Ljava/nio/ByteBuffer;JZ)V", + Java_org_postgresql_pljava_pg_TupleTableSlotImpl__1store_1heaptuple + }, + { 0, 0, 0 } + }; + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/CatalogObjectImpl$Addressed"); + PgObject_registerNatives2(cls, catalogObjectAddressedMethods); + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/CatalogObjectImpl$Factory"); + s_CatalogObjectImpl_Factory_class = JNI_newGlobalRef(cls); + PgObject_registerNatives2(cls, catalogObjectFactoryMethods); + JNI_deleteLocalRef(cls); + s_CatalogObjectImpl_Factory_invalidateRelation = + PgObject_getStaticJavaMethod( + s_CatalogObjectImpl_Factory_class, "invalidateRelation", "(I)V"); + s_CatalogObjectImpl_Factory_syscacheInvalidate = + PgObject_getStaticJavaMethod( + s_CatalogObjectImpl_Factory_class, "syscacheInvalidate", "(III)V"); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/CharsetEncodingImpl$EarlyNatives"); + PgObject_registerNatives2(cls, charsetMethods); + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/DatumUtils"); + PgObject_registerNatives2(cls, datumMethods); + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/ExprContextImpl"); + s_ExprContextImpl_class = JNI_newGlobalRef(cls); + PgObject_registerNatives2(cls, exprContextMethods); + s_ExprContextImpl_releaseAndDecache = PgObject_getStaticJavaMethod( + cls, "releaseAndDecache", "(I)V"); + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/LookupImpl"); + PgObject_registerNatives2(cls, lookupImplMethods); + s_LookupImpl_class = JNI_newGlobalRef(cls); + JNI_deleteLocalRef(cls); + s_LookupImpl_dispatchNew = + PgObject_getStaticJavaMethod(s_LookupImpl_class, "dispatchNew", + "(JJIZZ" + "Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;" + "JJ)" + "V"); + s_LookupImpl_dispatch = + PgObject_getJavaMethod(s_LookupImpl_class, "dispatch", + "(IZZLjava/nio/ByteBuffer;Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;JJ)" + "V"); + s_LookupImpl_dispatchInline = + PgObject_getStaticJavaMethod(s_LookupImpl_class, "dispatchInline", + "(IZLjava/nio/ByteBuffer;)" + "V"); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/MemoryContextImpl$EarlyNatives"); + PgObject_registerNatives2(cls, memoryContextMethods); + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/MemoryContextImpl"); + s_MemoryContextImpl_class = JNI_newGlobalRef(cls); + JNI_deleteLocalRef(cls); + s_MemoryContextImpl_callback = PgObject_getStaticJavaMethod( + s_MemoryContextImpl_class, "callback", "(J)V"); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/ResourceOwnerImpl$EarlyNatives"); + PgObject_registerNatives2(cls, resourceOwnerMethods); + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/ResourceOwnerImpl"); + s_ResourceOwnerImpl_class = JNI_newGlobalRef(cls); + JNI_deleteLocalRef(cls); + s_ResourceOwnerImpl_callback = PgObject_getStaticJavaMethod( + s_ResourceOwnerImpl_class, "callback", "(J)V"); + + cls = PgObject_getJavaClass("org/postgresql/pljava/internal/SPI$EarlyNatives"); + PgObject_registerNatives2(cls, spiMethods); + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/TupleDescImpl"); + s_TupleDescImpl_class = JNI_newGlobalRef(cls); + PgObject_registerNatives2(cls, tdiMethods); + JNI_deleteLocalRef(cls); + + s_TupleDescImpl_fromByteBuffer = PgObject_getStaticJavaMethod( + s_TupleDescImpl_class, + "fromByteBuffer", + "(Ljava/nio/ByteBuffer;IIII)" + "Lorg/postgresql/pljava/model/TupleDescriptor;"); + + cls = PgObject_getJavaClass("org/postgresql/pljava/pg/TupleTableSlotImpl"); + s_TupleTableSlotImpl_class = JNI_newGlobalRef(cls); + PgObject_registerNatives2(cls, ttsiMethods); + JNI_deleteLocalRef(cls); + + s_TupleTableSlotImpl_newDeformed = PgObject_getStaticJavaMethod( + s_TupleTableSlotImpl_class, + "newDeformed", + "(Ljava/nio/ByteBuffer;Lorg/postgresql/pljava/model/TupleDescriptor;" + "Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;)" + "Lorg/postgresql/pljava/pg/TupleTableSlotImpl$Deformed;"); + + RegisterResourceReleaseCallback(resourceReleaseCB, NULL); + + CacheRegisterRelcacheCallback(relCacheCB, 0); + +#define REGISTER_SYSCACHE_CALLBACK(cache) \ + CacheRegisterSyscacheCallback((cache), sysCacheCB, Int32GetDatum(\ + org_postgresql_pljava_pg_CatalogObjectImpl_Factory_##cache##_CB)) + + REGISTER_SYSCACHE_CALLBACK(LANGOID); + REGISTER_SYSCACHE_CALLBACK(PROCOID); + REGISTER_SYSCACHE_CALLBACK(TRFOID); + REGISTER_SYSCACHE_CALLBACK(TYPEOID); + +#undef REGISTER_SYSCACHE_CALLBACK +} + +/* + * Class: org_postgresql_pljava_pg_CatalogObjectImpl_Addressed + * Method: _lookupRowtypeTupdesc + * Signature: (II)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1lookupRowtypeTupdesc(JNIEnv* env, jobject _cls, jint typeid, jint typmod) +{ + TupleDesc td; + jlong length; + jobject result = NULL; + BEGIN_NATIVE_AND_TRY + td = lookup_rowtype_tupdesc_noerror(typeid, typmod, true); + if ( NULL != td ) + { + /* + * Per contract, we return the tuple descriptor with its reference count + * incremented, but not registered with a resource owner for descriptor + * leak warnings. l_r_t_n() will have incremented already, but also + * registered for warnings. The proper dance is a second pure increment + * here, followed by a DecrTupleDescRefCount to undo what l_r_t_n() did. + * And none of that, of course, if the descriptor is not refcounted. + */ + if ( td->tdrefcount >= 0 ) + { + ++ td->tdrefcount; + DecrTupleDescRefCount(td); + } + length = (jlong)TupleDescSize(td); + result = JNI_newDirectByteBuffer((void *)td, length); + } + END_NATIVE_AND_CATCH("_lookupRowtypeTupdesc") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CatalogObjectImpl_Addressed + * Method: _searchSysCacheCopy1 + * Signature: (II)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1searchSysCacheCopy1(JNIEnv *env, jclass cls, jint cacheId, jint key1) +{ + jobject result = NULL; + HeapTuple ht; + BEGIN_NATIVE_AND_TRY + ht = SearchSysCacheCopy1(cacheId, Int32GetDatum(key1)); + if ( HeapTupleIsValid(ht) ) + { + result = JNI_newDirectByteBuffer(ht, HEAPTUPLESIZE + ht->t_len); + } + END_NATIVE_AND_CATCH("_searchSysCacheCopy1") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CatalogObjectImpl_Addressed + * Method: _searchSysCacheCopy2 + * Signature: (III)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1searchSysCacheCopy2(JNIEnv *env, jclass cls, jint cacheId, jint key1, jint key2) +{ + jobject result = NULL; + HeapTuple ht; + BEGIN_NATIVE_AND_TRY + ht = SearchSysCacheCopy2(cacheId, Int32GetDatum(key1), Int32GetDatum(key2)); + if ( HeapTupleIsValid(ht) ) + { + result = JNI_newDirectByteBuffer(ht, HEAPTUPLESIZE + ht->t_len); + } + END_NATIVE_AND_CATCH("_searchSysCacheCopy2") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CatalogObjectImpl_Addressed + * Method: _sysTableGetByOid + * Signature: (IIIIJ)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1sysTableGetByOid(JNIEnv *env, jclass cls, jint relOid, jint objOid, jint oidCol, jint indexOid, jlong tupleDesc) +{ + jobject result = NULL; + HeapTuple ht; + Relation rel; + SysScanDesc scandesc; + ScanKeyData entry[1]; + + BEGIN_NATIVE_AND_TRY + rel = relation_open((Oid)relOid, AccessShareLock); + + ScanKeyInit(&entry[0], (AttrNumber)oidCol, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum((Oid)objOid)); + + scandesc = systable_beginscan( + rel, (Oid)indexOid, InvalidOid != indexOid, NULL, 1, entry); + + ht = systable_getnext(scandesc); + + /* + * As in the extension.c code from which this is brazenly copied, we assume + * there can be at most one matching tuple. (Oid ought to be the primary key + * of a catalog table we care about, so it's not a daring assumption.) + */ + if ( HeapTupleIsValid(ht) ) + { + /* + * We wish to return a tuple satisfying the same conditions as if it had + * been obtained from the syscache, including that it has no external + * TOAST pointers. (Inline-compressed values, it could still have.) + */ + if ( HeapTupleHasExternal(ht) ) + ht = toast_flatten_tuple(ht, JLongGet(TupleDesc, tupleDesc)); + else + ht = heap_copytuple(ht); + result = JNI_newDirectByteBuffer(ht, HEAPTUPLESIZE + ht->t_len); + } + + systable_endscan(scandesc); + relation_close(rel, AccessShareLock); + END_NATIVE_AND_CATCH("_sysTableGetByOid") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CatalogObjectImpl_Addressed + * Method: _tupDescBootstrap + * Signature: (I)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1tupDescBootstrap(JNIEnv* env, jobject _cls, jint relid) +{ + Relation rel; + TupleDesc td; + jlong length; + jobject result = NULL; + BEGIN_NATIVE_AND_TRY + rel = relation_open((Oid)relid, AccessShareLock); + td = RelationGetDescr(rel); + /* + * Per contract, we return the tuple descriptor with its reference count + * incremented, without registering it with a resource owner for descriptor + * leak warnings. + */ + ++ td->tdrefcount; + /* + * Can close the relation now that the td reference count is bumped. + */ + relation_close(rel, AccessShareLock); + length = (jlong)TupleDescSize(td); + result = JNI_newDirectByteBuffer((void *)td, length); + END_NATIVE_AND_CATCH("_tupDescBootstrap") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CatalogObjectImpl_Addressed + * Method: _windowSysCacheInvalArmed + * Signature: ()Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Addressed__1windowSysCacheInvalArmed(JNIEnv *env, jclass cls) +{ + return (*env)->NewDirectByteBuffer( + env, s_sysCacheInvalArmed, sizeof s_sysCacheInvalArmed); +} + +/* + * Class: org_postgresql_pljava_pg_CatalogObjectImpl_Factory + * Method: _currentDatabase + * Signature: ()I + */ +JNIEXPORT jint JNICALL +Java_org_postgresql_pljava_pg_CatalogObjectImpl_00024Factory__1currentDatabase(JNIEnv *env, jclass cls) +{ + return MyDatabaseId; +} + +/* + * Class: org_postgresql_pljava_pg_CharsetEncodingImpl_EarlyNatives + * Method: _serverEncoding + * Signature: ()I + */ +JNIEXPORT jint JNICALL +Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1serverEncoding(JNIEnv *env, jclass cls) +{ + int result = -1; + BEGIN_NATIVE_AND_TRY + result = GetDatabaseEncoding(); + END_NATIVE_AND_CATCH("_serverEncoding") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CharsetEncodingImpl_EarlyNatives + * Method: _clientEncoding + * Signature: ()I + */ +JNIEXPORT jint JNICALL +Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1clientEncoding(JNIEnv *env, jclass cls) +{ + int result = -1; + BEGIN_NATIVE_AND_TRY + result = pg_get_client_encoding(); + END_NATIVE_AND_CATCH("_clientEncoding") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CharsetEncodingImpl_EarlyNatives + * Method: _nameToOrdinal + * Signature: (Ljava/nio/ByteBuffer;)I + */ +JNIEXPORT jint JNICALL +Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1nameToOrdinal(JNIEnv *env, jclass cls, jobject bb) +{ + int result = -1; + char const *name = (*env)->GetDirectBufferAddress(env, bb); + if ( NULL == name ) + return result; + BEGIN_NATIVE_AND_TRY + result = pg_char_to_encoding(name); + END_NATIVE_AND_CATCH("_nameToOrdinal") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CharsetEncodingImpl_EarlyNatives + * Method: _ordinalToName + * Signature: (I)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1ordinalToName(JNIEnv *env, jclass cls, jint ordinal) +{ + jobject result = NULL; + char const *name; + BEGIN_NATIVE_AND_TRY + name = pg_encoding_to_char(ordinal); + if ( '\0' != *name ) + result = JNI_newDirectByteBuffer((void *)name, (jint)strlen(name)); + END_NATIVE_AND_CATCH("_ordinalToName") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_CharsetEncodingImpl_EarlyNatives + * Method: _ordinalToIcuName + * Signature: (I)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_CharsetEncodingImpl_00024EarlyNatives__1ordinalToIcuName(JNIEnv *env, jclass cls, jint ordinal) +{ + jobject result = NULL; + char const *name; + BEGIN_NATIVE_AND_TRY + name = get_encoding_name_for_icu(ordinal); + if ( NULL != name ) + result = JNI_newDirectByteBuffer((void *)name, (jint)strlen(name)); + END_NATIVE_AND_CATCH("_ordinalToIcuName") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_DatumUtils + * Method: _addressOf + * Signature: (Ljava/nio/ByteBuffer;)J + */ +JNIEXPORT jlong JNICALL +Java_org_postgresql_pljava_pg_DatumUtils__1addressOf(JNIEnv* env, jobject _cls, jobject bb) +{ + return PointerGetJLong((*env)->GetDirectBufferAddress(env, bb)); +} + +/* + * Class: org_postgresql_pljava_pg_DatumUtils + * Method: _map + * Signature: (JI)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_DatumUtils__1map(JNIEnv* env, jobject _cls, jlong nativeAddress, jint length) +{ + return (*env)->NewDirectByteBuffer( + env, JLongGet(void *, nativeAddress), length); +} + +/* + * Class: org_postgresql_pljava_pg_DatumUtils + * Method: _mapBitmapset + * Signature: (J)Ljava/nio/ByteBuffer; + * The Java caller has already checked that the address is not null. + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_DatumUtils__1mapBitmapset(JNIEnv* env, jobject _cls, jlong nativeAddress) +{ + Bitmapset *bms = JLongGet(Bitmapset *, nativeAddress); + jlong size; + size = offsetof(Bitmapset, words) + bms->nwords * sizeof(bitmapword); + return (*env)->NewDirectByteBuffer(env, (void *)bms, size); +} + +/* + * Class: org_postgresql_pljava_pg_DatumUtils + * Method: _mapCString + * Signature: (J)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_DatumUtils__1mapCString(JNIEnv* env, jobject _cls, jlong nativeAddress) +{ + jlong length; + void *base = JLongGet(void *, nativeAddress); + length = (jlong)strlen(base); + return (*env)->NewDirectByteBuffer(env, base, length); +} + +/* + * Class: org_postgresql_pljava_pg_DatumUtils + * Method: _mapVarlena + * Signature: (Ljava/nio/ByteBuffer;JJJ)Lorg/postgresql/pljava/adt/spi/Datum$Input; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_DatumUtils__1mapVarlena(JNIEnv* env, jobject _cls, jobject bb, jlong offset, jlong resowner, jlong memcontext) +{ + Pointer vl; + jobject result = NULL; + + if ( NULL == bb ) + vl = JLongGet(Pointer, offset); + else + { + void *buf = (*env)->GetDirectBufferAddress(env, bb); + if ( NULL == buf ) + return NULL; + vl = (Pointer)((char *)buf + offset); + } + + BEGIN_NATIVE_AND_TRY + result = pljava_VarlenaWrapper_Input(PointerGetDatum(vl), + JLongGet(MemoryContext, memcontext), JLongGet(ResourceOwner, resowner)); + END_NATIVE_AND_CATCH("_mapVarlena") + return result; +} + + +/* + * Class: org_postgresql_pljava_pg_ExprContextImpl + * Method: _registerCallback + * Signature: (JI)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_pg_ExprContextImpl__1registerCallback(JNIEnv* env, jobject _cls, jlong ecxt, jint key) +{ + BEGIN_NATIVE_AND_TRY + RegisterExprContextCallback( + (ExprContext *)(uintptr_t)ecxt, exprContextCB, Int32GetDatum(key)); + END_NATIVE_AND_CATCH("_mapVarlena") +} + + +/* + * Class: org_postgresql_pljava_pg_LookupImpl + * Method: _cacheReference + * Signature: (Lorg/postgresql/pljava/pg/LookupImpl;J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_pg_LookupImpl__1cacheReference(JNIEnv* env, jobject _cls, jobject lref, jlong extra) +{ + RegProcedureLookup *extraStruct = JLongGet(RegProcedureLookup *, extra); + extraStruct->lookup = (*env)->NewGlobalRef(env, lref); +} + +/* + * Class: org_postgresql_pljava_pg_LookupImpl + * Method: _get_fn_expr_variadic + * Signature: (Ljava/nio/ByteBuffer;)Z + */ +JNIEXPORT jboolean JNICALL +Java_org_postgresql_pljava_pg_LookupImpl__1get_1fn_1expr_1variadic(JNIEnv* env, jobject _cls, jobject fcinfo_b) +{ + bool result = false; + FunctionCallInfo fcinfo = (*env)->GetDirectBufferAddress(env, fcinfo_b); + if ( NULL == fcinfo ) + return JNI_FALSE; /* shouldn't happen; there's probably an exception */ + + BEGIN_NATIVE_AND_TRY + result = get_fn_expr_variadic(fcinfo->flinfo); + END_NATIVE_AND_CATCH("_get_fn_expr_variadic") + + return result ? JNI_TRUE : JNI_FALSE; +} + +/* + * Class: org_postgresql_pljava_pg_LookupImpl + * Method: _stableInputs + * Signature: (Ljava/nio/ByteBuffer;)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_pg_LookupImpl__1stableInputs(JNIEnv* env, jobject _cls, jobject fcinfo_b, jobject bits_b) +{ + FunctionCallInfo fcinfo = (*env)->GetDirectBufferAddress(env, fcinfo_b); + Bitmapset *bits = (*env)->GetDirectBufferAddress(env, bits_b); + FmgrInfo *flinfo; + int idx; + + if ( NULL == fcinfo || NULL == bits ) + return; /* shouldn't happen; there's probably an exception */ + + flinfo = fcinfo->flinfo; + + BEGIN_NATIVE_AND_TRY + + /* + * The caller has set one guard bit at the next higher index beyond the + * bits of interest. Find that one, then bms_prev_member loop from there. + */ + idx = bms_prev_member(bits, -1); + if ( -2 != idx ) + { + while ( -2 != (idx = bms_prev_member(bits, idx)) ) + { + if ( ! get_fn_expr_arg_stable(flinfo, idx) ) + bms_del_member(bits, idx); + } + } + + END_NATIVE_AND_CATCH("_stableInputs") +} + +/* + * Class: org_postgresql_pljava_pg_LookupImpl + * Method: _notionalCallResultType + * Signature: (Ljava/nio/ByteBuffer;[I)Lorg/postgresql/pljava/model/TupleDescriptor; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_LookupImpl__1notionalCallResultType(JNIEnv* env, jobject _cls, jobject fcinfo_b, jintArray returnTypeOid) +{ + FunctionCallInfo fcinfo = (*env)->GetDirectBufferAddress(env, fcinfo_b); + Oid typeId; + jint joid; + TupleDesc td = NULL; + jobject result = NULL; + + if ( NULL == fcinfo ) + return NULL; /* shouldn't happen; there's probably an exception */ + + BEGIN_NATIVE_AND_TRY + + get_call_result_type(fcinfo, &typeId, &td); /* simple so far */ + joid = typeId; + JNI_setIntArrayRegion(returnTypeOid, 0, 1, &joid); + + if ( NULL != td ) + result = pljava_TupleDescriptor_create(td, InvalidOid); + + END_NATIVE_AND_CATCH("_notionalCallResultType") + return result; +} + +/* + * Class: org_postgresql_pljava_pg_LookupImpl + * Method: _resolveArgTypes + * Signature: (Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;II)Z + */ +JNIEXPORT jboolean JNICALL +Java_org_postgresql_pljava_pg_LookupImpl__1resolveArgTypes(JNIEnv* env, jobject _cls, jobject fcinfo_b, jobject types_b, jobject unresolved_b, jint tplSz, jint argSz) +{ + FunctionCallInfo fcinfo = (*env)->GetDirectBufferAddress(env, fcinfo_b); + Oid *types = (*env)->GetDirectBufferAddress(env, types_b); + Bitmapset *unresolved = (*env)->GetDirectBufferAddress(env, unresolved_b); + FmgrInfo *flinfo; + int idx; + bool result = false; + + if ( NULL == fcinfo || NULL == types_b || NULL == unresolved_b ) + return JNI_FALSE; /* shouldn't happen; there's probably an exception */ + + flinfo = fcinfo->flinfo; + + BEGIN_NATIVE_AND_TRY + + /* + * If the types array is longer than the template (the spread variadic "any" + * case), grab all the arg types beyond the end of the template. + */ + for ( idx = tplSz ; idx < argSz ; ++ idx ) + types[idx] = get_fn_expr_argtype(flinfo, idx); + + /* + * Check the template's unresolved types for the "any" type and grab + * those types too. resolve_polymorphic_argtypes will only attend to + * the civilized polymorphic types. + * + * The caller has set one guard bit in the Bitmapset beyond the last bit + * of interest. Find that one, then bms_prev_member loop from there. + */ + idx = bms_prev_member(unresolved, -1); + if ( -2 != idx ) + { + while ( -2 != (idx = bms_prev_member(unresolved, idx)) ) + if ( ANYOID == types[idx] ) + types[idx] = get_fn_expr_argtype(flinfo, idx); + } + + /* + * resolve_polymorphic_argtypes will do the rest of the job. + * It only needs to look at the first tplSz types. + */ + result = resolve_polymorphic_argtypes(tplSz, types, NULL, flinfo->fn_expr); + + END_NATIVE_AND_CATCH("_resolveArgTypes") + return result ? JNI_TRUE : JNI_FALSE; +} + + +/* + * Class: org_postgresql_pljava_pg_MemoryContext_EarlyNatives + * Method: _registerCallback + * Signature: (J)V; + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_pg_MemoryContextImpl_00024EarlyNatives__1registerCallback(JNIEnv* env, jobject _cls, jlong nativeAddress) +{ + MemoryContext cxt = JLongGet(MemoryContext, nativeAddress); + MemoryContextCallback *cb; + + BEGIN_NATIVE_AND_TRY + /* + * Optimization? Use MemoryContextAllocExtended with NO_OOM, and do without + * the AND_TRY/AND_CATCH to catch a PostgreSQL ereport. + */ + cb = MemoryContextAlloc(cxt, sizeof *cb); + cb->func = memoryContextCallback; + cb->arg = cxt; + MemoryContextRegisterResetCallback(cxt, cb); + END_NATIVE_AND_CATCH("_registerCallback") +} + +/* + * Class: org_postgresql_pljava_pg_MemoryContext_EarlyNatives + * Method: _window + * Signature: ()[Ljava/nio/ByteBuffer; + * + * Return an array of ByteBuffers constructed to window the PostgreSQL globals + * holding the well-known memory contexts. The indices into the array are + * assigned arbitrarily in the API class CatalogObject.Factory and inherited + * from it in CatalogObjectImpl.Factory, from which the native .h makes them + * visible here. A peculiar consequence is that the code in MemoryContextImpl + * can be ignorant of them, and just fetch the array element at the index passed + * from the API class. + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_MemoryContextImpl_00024EarlyNatives__1window(JNIEnv* env, jobject _cls, jclass component) +{ + jobject r = (*env)->NewObjectArray(env, (jsize)10, component, NULL); + if ( NULL == r ) + return NULL; + +#define POPULATE(tag) do {\ + jobject b = (*env)->NewDirectByteBuffer(env, \ + &tag##Context, sizeof tag##Context);\ + if ( NULL == b )\ + return NULL;\ + (*env)->SetObjectArrayElement(env, r, \ + (jsize)org_postgresql_pljava_pg_CatalogObjectImpl_Factory_MCX_##tag, \ + b);\ +} while (0) + + POPULATE(CurrentMemory); + POPULATE(TopMemory); + POPULATE(Error); + POPULATE(Postmaster); + POPULATE(CacheMemory); + POPULATE(Message); + POPULATE(TopTransaction); + POPULATE(CurTransaction); + POPULATE(Portal); + POPULATE(JavaMemory); + +#undef POPULATE + + return r; +} + + +/* + * Class: org_postgresql_pljava_pg_ResourceOwnerImpl_EarlyNatives + * Method: _window + * Signature: ()[Ljava/nio/ByteBuffer; + * + * Return an array of ByteBuffers constructed to window the PostgreSQL globals + * holding the well-known resource owners. The indices into the array are + * assigned arbitrarily in the API class CatalogObject.Factory and inherited + * from it in CatalogObjectImpl.Factory, from which the native .h makes them + * visible here. A peculiar consequence is that the code in ResourceOwnerImpl + * can be ignorant of them, and just fetch the array element at the index passed + * from the API class. + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_ResourceOwnerImpl_00024EarlyNatives__1window(JNIEnv* env, jobject _cls, jclass component) +{ + jobject r = (*env)->NewObjectArray(env, (jsize)4, component, NULL); + if ( NULL == r ) + return NULL; + +#define POPULATE(tag) do {\ + jobject b = (*env)->NewDirectByteBuffer(env, \ + &tag##ResourceOwner, sizeof tag##ResourceOwner);\ + if ( NULL == b )\ + return NULL;\ + (*env)->SetObjectArrayElement(env, r, \ + (jsize)org_postgresql_pljava_pg_CatalogObjectImpl_Factory_RSO_##tag, \ + b);\ +} while (0) + + POPULATE(Current); + POPULATE(CurTransaction); + POPULATE(TopTransaction); + POPULATE(AuxProcess); + +#undef POPULATE + + return r; +} + + +/* + * Class: org_postgresql_pljava_internal_SPI_EarlyNatives + * Method: _window + * Signature: ()[Ljava/nio/ByteBuffer; + * + * Return an array of ByteBuffers constructed to window the PostgreSQL globals + * SPI_result, SPI_processed, and SPI_tuptable. The indices into the array are + * assigned arbitrarily in the internal class SPI, from which the native .h + * makes them visible here. + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_SPI_00024EarlyNatives__1window(JNIEnv* env, jobject _cls, jclass component) +{ + jobject r = (*env)->NewObjectArray(env, (jsize)3, component, NULL); + if ( NULL == r ) + return NULL; + +#define POPULATE(tag) do {\ + jobject b = (*env)->NewDirectByteBuffer(env, &tag, sizeof tag);\ + if ( NULL == b )\ + return NULL;\ + (*env)->SetObjectArrayElement(env, r, \ + (jsize)org_postgresql_pljava_internal_SPI_##tag, \ + b);\ +} while (0) + + POPULATE(SPI_result); + POPULATE(SPI_processed); + POPULATE(SPI_tuptable); + +#undef POPULATE + + return r; +} + + +/* + * Class: org_postgresql_pljava_pg_TupleDescImpl + * Method: _assign_record_type_typmod + * Signature: (Ljava/nio/ByteBuffer;)I + */ +JNIEXPORT jint JNICALL +Java_org_postgresql_pljava_pg_TupleDescImpl__1assign_1record_1type_1typmod(JNIEnv* env, jobject _cls, jobject td_b) +{ + TupleDesc td = (*env)->GetDirectBufferAddress(env, td_b); + if ( NULL == td ) + return -1; + + BEGIN_NATIVE_AND_TRY + assign_record_type_typmod(td); + END_NATIVE_AND_CATCH("_assign_record_type_typmod") + return td->tdtypmod; +} + +/* + * Class: org_postgresql_pljava_pg_TupleDescImpl + * Method: _synthesizeDescriptor + * Signature: (ILjava/nio/ByteBuffer;)Ljava/nio/ByteBuffer; + * + * When synthesizing a TupleDescriptor from only a list of types and names, it + * is tempting to make an ephemeral descriptor all in Java and avoid any JNI + * call. On the other hand, TupleDescInitEntry is more likely to know what to + * store in fields of the struct we don't care about, or added in new versions. + * + * The Java caller passes n (the number of attributes wanted) and one ByteBuffer + * in which the sequence (int32 typoid, int32 typmod, bool array, encodedname\0) + * occurs n times, INTALIGN'd between. + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_TupleDescImpl__1synthesizeDescriptor(JNIEnv* env, jobject _cls, jint n, jobject in_b) +{ + jobject result = NULL; + jlong tupdesc_size; + int i; + Oid typoid; + int32 typmod; + bool isArray; + TupleDesc td; + int32 *in_i; + char *in_c = (*env)->GetDirectBufferAddress(env, in_b); + if ( NULL == in_c ) + return NULL; + + BEGIN_NATIVE_AND_TRY + + td = CreateTemplateTupleDesc(n); + + for ( i = 0 ; i < n ; ++ i ) + { + in_i = (int32 *)INTALIGN((uintptr_t)in_c); + typoid = *(in_i++); + typmod = *(in_i++); + in_c = (char *)(uintptr_t)in_i; + isArray = *(in_c++); + + TupleDescInitEntry(td, 1 + i, in_c, typoid, typmod, isArray ? 1 : 0); + + in_c += strlen(in_c) + 1; + } + + tupdesc_size = (jlong)TupleDescSize(td); + result = JNI_newDirectByteBuffer(td, tupdesc_size); + + END_NATIVE_AND_CATCH("_synthesizeDescriptor") + return result; +} + + +/* + * Class: org_postgresql_pljava_pg_TupleTableSlotImpl + * Method: _getsomeattrs + * Signature: (Ljava/nio/ByteBuffer;I)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_pg_TupleTableSlotImpl__1getsomeattrs(JNIEnv* env, jobject _cls, jobject tts_b, jint attnum) +{ + TupleTableSlot *tts = (*env)->GetDirectBufferAddress(env, tts_b); + if ( NULL == tts ) + return; + + BEGIN_NATIVE_AND_TRY + slot_getsomeattrs_int(tts, attnum); + END_NATIVE_AND_CATCH("_getsomeattrs") +} + +/* + * Class: org_postgresql_pljava_pg_TupleTableSlotImpl + * Method: _mapHeapTuple + * Signature: (J)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_pg_TupleTableSlotImpl__1mapHeapTuple(JNIEnv* env, jobject _cls, jlong nativeAddress) +{ + HeapTuple htp; + jlong size; + + if ( 0 == nativeAddress ) + return NULL; + + htp = JLongGet(HeapTuple, nativeAddress); + + if ( ! HeapTupleIsValid(htp) || htp->t_data == NULL ) + return NULL; + + size = HEAPTUPLESIZE + htp->t_len; + + return (*env)->NewDirectByteBuffer(env, htp, size); +} + +/* + * Class: org_postgresql_pljava_pg_TupleTableSlotImpl + * Method: _store_heaptuple + * Signature: (Ljava/nio/ByteBuffer;JZ)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_pg_TupleTableSlotImpl__1store_1heaptuple(JNIEnv* env, jobject _cls, jobject tts_b, jlong ht, jboolean shouldFree) +{ + HeapTuple htp = JLongGet(HeapTuple, ht); + TupleTableSlot *tts = (*env)->GetDirectBufferAddress(env, tts_b); + if ( NULL == tts ) + return; + + BEGIN_NATIVE_AND_TRY + ExecStoreHeapTuple(htp, tts, JNI_TRUE == shouldFree); + END_NATIVE_AND_CATCH("_store_heaptuple") +} diff --git a/pljava-so/src/main/c/SPI.c b/pljava-so/src/main/c/SPI.c index 3d891b30d..1b4aa4676 100644 --- a/pljava-so/src/main/c/SPI.c +++ b/pljava-so/src/main/c/SPI.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -23,10 +23,17 @@ #include #endif +/* + * Yes, this macro works because the class's simple name happens to be SPI + * and it defines constants named without the SPI_ prefix the PG source uses. + */ #define CONFIRMCONST(c) \ StaticAssertStmt((c) == (org_postgresql_pljava_internal_##c), \ "Java/C value mismatch for " #c) +static jclass s_TupleList_SPI_class; +static jmethodID s_TupleList_SPI_init; + extern void SPI_initialize(void); void SPI_initialize(void) { @@ -37,14 +44,9 @@ void SPI_initialize(void) Java_org_postgresql_pljava_internal_SPI__1exec }, { - "_getProcessed", - "()J", - Java_org_postgresql_pljava_internal_SPI__1getProcessed - }, - { - "_getResult", - "()I", - Java_org_postgresql_pljava_internal_SPI__1getResult + "_endXact", + "(Z)V", + Java_org_postgresql_pljava_internal_SPI__1endXact }, { "_getTupTable", @@ -52,14 +54,29 @@ void SPI_initialize(void) Java_org_postgresql_pljava_internal_SPI__1getTupTable }, { + "_mapTupTable", + "(Lorg/postgresql/pljava/pg/TupleTableSlotImpl;JI)Lorg/postgresql/pljava/pg/TupleList;", + Java_org_postgresql_pljava_internal_SPI__1mapTupTable + }, + { "_freeTupTable", "()V", Java_org_postgresql_pljava_internal_SPI__1freeTupTable }, { 0, 0, 0 }}; + /* + * See also ModelUtils.c for newer methods associated with SPI.EarlyNatives. + */ PgObject_registerNatives("org/postgresql/pljava/internal/SPI", methods); + s_TupleList_SPI_class = JNI_newGlobalRef( + PgObject_getJavaClass("org/postgresql/pljava/pg/TupleList$SPI")); + s_TupleList_SPI_init = PgObject_getJavaMethod(s_TupleList_SPI_class, + "", + "(Lorg/postgresql/pljava/pg/TupleTableSlotImpl;JLjava/nio/ByteBuffer;)V" + ); + /* * Statically assert that the Java code has the right values for these. * I would rather have this at the top, but these count as statements and @@ -111,6 +128,7 @@ void SPI_initialize(void) /**************************************** * JNI methods + * See also ModelUtils.c for newer methods associated with SPI.EarlyNatives. ****************************************/ /* * Class: org_postgresql_pljava_internal_SPI @@ -150,24 +168,32 @@ Java_org_postgresql_pljava_internal_SPI__1exec(JNIEnv* env, jclass cls, jstring /* * Class: org_postgresql_pljava_internal_SPI - * Method: _getProcessed - * Signature: ()J + * Method: _endXact + * Signature: (Z)V */ -JNIEXPORT jlong JNICALL -Java_org_postgresql_pljava_internal_SPI__1getProcessed(JNIEnv* env, jclass cls) +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_SPI__1endXact(JNIEnv* env, jclass cls, jboolean rollback) { - return (jlong)SPI_processed; -} + char *where = rollback ? "SPI_rollback" : "SPI_commit"; + BEGIN_NATIVE + STACK_BASE_VARS + STACK_BASE_PUSH(env) + PG_TRY(); + { + Invocation_assertConnect(); + if ( rollback ) + SPI_rollback(); + else + SPI_commit(); -/* - * Class: org_postgresql_pljava_internal_SPI - * Method: _getResult - * Signature: ()I - */ -JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_SPI__1getResult(JNIEnv* env, jclass cls) -{ - return (jint)SPI_result; + } + PG_CATCH(); + { + Exception_throw_ERROR(where); + } + PG_END_TRY(); + STACK_BASE_POP() + END_NATIVE } /* @@ -188,6 +214,30 @@ Java_org_postgresql_pljava_internal_SPI__1getTupTable(JNIEnv* env, jclass cls, j return tupleTable; } +/* + * Class: org_postgresql_pljava_internal_SPI + * Method: _mapTupTable + * Signature: (Lorg/postgresql/pljava/pg/TupleTableSlotImpl;JI)Lorg/postgresql/pljava/pg/TupleList; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_SPI__1mapTupTable(JNIEnv* env, jclass cls, jobject ttsi, jlong p, jint sizeToMap) +{ + jobject tupleList = NULL; + SPITupleTable *tuptbl = JLongGet(SPITupleTable *, p); + jobject bb; + if ( p != 0 ) + { + BEGIN_NATIVE_AND_TRY + bb = JNI_newDirectByteBuffer(tuptbl->vals, sizeToMap); + tupleList = JNI_newObjectLocked( + s_TupleList_SPI_class, s_TupleList_SPI_init, ttsi, p, bb); + END_NATIVE_AND_CATCH("_mapTupleTable") + } + if ( 0 != tupleList && SPI_tuptable == tuptbl ) + SPI_tuptable = NULL; /* protect from legacy _freetuptable below */ + return tupleList; +} + /* * Class: org_postgresql_pljava_internal_SPI * Method: _freeTupTable diff --git a/pljava-so/src/main/c/SQLInputFromTuple.c b/pljava-so/src/main/c/SQLInputFromTuple.c index 563887710..a4fd220b1 100644 --- a/pljava-so/src/main/c/SQLInputFromTuple.c +++ b/pljava-so/src/main/c/SQLInputFromTuple.c @@ -25,13 +25,12 @@ static jmethodID s_SQLInputFromTuple_init; jobject pljava_SQLInputFromTuple_create(HeapTupleHeader hth) { jlong heapTup = PointerGetJLong(hth); - jlong lifespan = PointerGetJLong(currentInvocation); jobject result; jobject jtd = pljava_SingleRowReader_getTupleDesc(hth); result = JNI_newObjectLocked(s_SQLInputFromTuple_class, s_SQLInputFromTuple_init, - pljava_DualState_key(), lifespan, heapTup, jtd); + heapTup, jtd); JNI_deleteLocalRef(jtd); return result; @@ -44,7 +43,7 @@ void pljava_SQLInputFromTuple_initialize(void) jclass cls = PgObject_getJavaClass("org/postgresql/pljava/jdbc/SQLInputFromTuple"); s_SQLInputFromTuple_init = PgObject_getJavaMethod(cls, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;JJLorg/postgresql/pljava/internal/TupleDesc;)V"); + "(JLorg/postgresql/pljava/internal/TupleDesc;)V"); s_SQLInputFromTuple_class = JNI_newGlobalRef(cls); JNI_deleteLocalRef(cls); } diff --git a/pljava-so/src/main/c/VarlenaWrapper.c b/pljava-so/src/main/c/VarlenaWrapper.c index 079852bf0..e97fa440c 100644 --- a/pljava-so/src/main/c/VarlenaWrapper.c +++ b/pljava-so/src/main/c/VarlenaWrapper.c @@ -48,8 +48,11 @@ #define INITIALSIZE 1024 +static jclass s_DatumImpl_class; + +static jmethodID s_DatumImpl_adopt; + static jclass s_VarlenaWrapper_class; -static jmethodID s_VarlenaWrapper_adopt; static jclass s_VarlenaWrapper_Input_class; static jclass s_VarlenaWrapper_Output_class; @@ -189,7 +192,7 @@ jobject pljava_VarlenaWrapper_Input( jdatum = PointerGetJLong(vl); vr = JNI_newObjectLocked(s_VarlenaWrapper_Input_class, - s_VarlenaWrapper_Input_init, pljava_DualState_key(), + s_VarlenaWrapper_Input_init, jro, jcxt, jpin, jdatum, (jlong)parked, (jlong)actual, dbb); @@ -251,7 +254,7 @@ jobject pljava_VarlenaWrapper_Output(MemoryContext parent, ResourceOwner ro) dbb = JNI_newDirectByteBuffer(evosh->tail + 1, INITIALSIZE); vos = JNI_newObjectLocked(s_VarlenaWrapper_Output_class, - s_VarlenaWrapper_Output_init, pljava_DualState_key(), + s_VarlenaWrapper_Output_init, jro, jcxt, jdatum, dbb); JNI_deleteLocalRef(dbb); @@ -270,9 +273,7 @@ Datum pljava_VarlenaWrapper_adopt(jobject vlw) { jlong adopted; - adopted = JNI_callLongMethodLocked(vlw, s_VarlenaWrapper_adopt, - pljava_DualState_key()); - + adopted = JNI_callLongMethodLocked(vlw, s_DatumImpl_adopt); return PointerGetDatum(JLongGet(Pointer, adopted)); } @@ -335,6 +336,9 @@ void pljava_VarlenaWrapper_initialize(void) { 0, 0, 0 } }; + s_DatumImpl_class = + (jclass)JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/pg/DatumImpl")); s_VarlenaWrapper_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass( "org/postgresql/pljava/internal/VarlenaWrapper")); @@ -347,17 +351,14 @@ void pljava_VarlenaWrapper_initialize(void) s_VarlenaWrapper_Input_init = PgObject_getJavaMethod( s_VarlenaWrapper_Input_class, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;" - "JJJJJJLjava/nio/ByteBuffer;)V"); + "(JJJJJJLjava/nio/ByteBuffer;)V"); s_VarlenaWrapper_Output_init = PgObject_getJavaMethod( s_VarlenaWrapper_Output_class, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;" - "JJJLjava/nio/ByteBuffer;)V"); + "(JJJLjava/nio/ByteBuffer;)V"); - s_VarlenaWrapper_adopt = PgObject_getJavaMethod( - s_VarlenaWrapper_class, "adopt", - "(Lorg/postgresql/pljava/internal/DualState$Key;)J"); + s_DatumImpl_adopt = PgObject_getJavaMethod( + s_DatumImpl_class, "adopt", "()J"); clazz = PgObject_getJavaClass( "org/postgresql/pljava/internal/VarlenaWrapper$Input$State"); diff --git a/pljava-so/src/main/c/type/ErrorData.c b/pljava-so/src/main/c/type/ErrorData.c index a9d86591d..54354ce15 100644 --- a/pljava-so/src/main/c/type/ErrorData.c +++ b/pljava-so/src/main/c/type/ErrorData.c @@ -31,14 +31,8 @@ jobject pljava_ErrorData_getCurrentError(void) ErrorData* errorData = CopyErrorData(); MemoryContextSwitchTo(curr); - /* - * Passing (jlong)0 as the ResourceOwner means this will never be matched by - * a nativeRelease call; that's appropriate (for now) as the ErrorData copy - * is being made into JavaMemoryContext, which never gets reset, so only - * unreachability from the Java side will free it. - */ jed = JNI_newObjectLocked(s_ErrorData_class, s_ErrorData_init, - pljava_DualState_key(), (jlong)0, PointerGetJLong(errorData)); + PointerGetJLong(errorData)); return jed; } @@ -139,7 +133,7 @@ void pljava_ErrorData_initialize(void) s_ErrorData_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/ErrorData")); PgObject_registerNatives2(s_ErrorData_class, methods); s_ErrorData_init = PgObject_getJavaMethod(s_ErrorData_class, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;JJ)V"); + "(J)V"); s_ErrorData_getNativePointer = PgObject_getJavaMethod(s_ErrorData_class, "getNativePointer", "()J"); } diff --git a/pljava-so/src/main/c/type/Oid.c b/pljava-so/src/main/c/type/Oid.c index 37bb89b2e..987654d10 100644 --- a/pljava-so/src/main/c/type/Oid.c +++ b/pljava-so/src/main/c/type/Oid.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -23,7 +23,6 @@ #include "pljava/type/String.h" #include "pljava/Exception.h" #include "pljava/Function.h" -#include "pljava/Invocation.h" static jclass s_Oid_class; static jmethodID s_Oid_init; @@ -31,6 +30,32 @@ static jmethodID s_Oid_registerType; static jfieldID s_Oid_m_native; static jobject s_OidOid; +static jclass s_CatalogObject_class; +static jclass s_CatalogObjectImpl_class; +static jmethodID s_CatalogObjectImpl_of; +static jmethodID s_CatalogObject_oid; + +static bool _CatalogObject_canReplaceType(Type self, Type other) +{ + TypeClass cls = Type_getClass(other); + return Type_getClass(self) == cls || Type_getOid(other) == OIDOID; +} + +static jvalue _CatalogObject_coerceDatum(Type self, Datum arg) +{ + Oid oid = DatumGetObjectId(arg); + jvalue result; + result.l = JNI_callStaticObjectMethodLocked( + s_CatalogObjectImpl_class, s_CatalogObjectImpl_of, (jint)oid); + return result; +} + +static Datum _CatalogObject_coerceObject(Type self, jobject obj) +{ + jint o = JNI_callIntMethod(obj, s_CatalogObject_oid); + return ObjectIdGetDatum((Oid)o); +} + /* * org.postgresql.pljava.type.Oid type. */ @@ -225,6 +250,25 @@ void Oid_initialize(void) "(Ljava/lang/Class;Lorg/postgresql/pljava/internal/Oid;)V"); JNI_callStaticVoidMethod(s_Oid_class, s_Oid_registerType, s_Oid_class, s_OidOid); + + s_CatalogObject_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/model/CatalogObject")); + s_CatalogObjectImpl_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/pg/CatalogObjectImpl")); + s_CatalogObject_oid = PgObject_getJavaMethod(s_CatalogObject_class, + "oid", "()I"); + s_CatalogObjectImpl_of = PgObject_getStaticJavaMethod( + s_CatalogObjectImpl_class, + "of", "(I)Lorg/postgresql/pljava/model/CatalogObject;"); + + cls = TypeClass_alloc("type.CatalogObject"); + cls->JNISignature = "Lorg/postgresql/pljava/model/CatalogObject;"; + cls->javaTypeName = "org.postgresql.pljava.model.CatalogObject"; + cls->canReplaceType = _CatalogObject_canReplaceType; + cls->coerceDatum = _CatalogObject_coerceDatum; + cls->coerceObject = _CatalogObject_coerceObject; + Type_registerType("org.postgresql.pljava.model.CatalogObject", + TypeClass_allocInstance(cls, OIDOID)); } /* @@ -301,7 +345,7 @@ Java_org_postgresql_pljava_internal_Oid__1getJavaClassName(JNIEnv* env, jclass c } else { - Type type = Type_objectTypeFromOid((Oid)oid, Invocation_getTypeMap()); + Type type = Type_objectTypeFromOid((Oid)oid, Function_currentTypeMap()); result = String_createJavaStringFromNTS(Type_getJavaTypeName(type)); } END_NATIVE diff --git a/pljava-so/src/main/c/type/Portal.c b/pljava-so/src/main/c/type/Portal.c index 4a42a8e82..44d48fe03 100644 --- a/pljava-so/src/main/c/type/Portal.c +++ b/pljava-so/src/main/c/type/Portal.c @@ -22,6 +22,7 @@ #include "pljava/Exception.h" #include "pljava/Invocation.h" #include "pljava/HashMap.h" +#include "pljava/ModelUtils.h" #include "pljava/type/Type_priv.h" #include "pljava/type/TupleDesc.h" #include "pljava/type/Portal.h" @@ -31,6 +32,10 @@ #include #endif +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == (org_postgresql_pljava_internal_Portal_##c), \ + "Java/C value mismatch for " #c) + static jclass s_Portal_class; static jmethodID s_Portal_init; @@ -44,8 +49,8 @@ jobject pljava_Portal_create(Portal portal, jobject jplan) return NULL; jportal = JNI_newObjectLocked(s_Portal_class, s_Portal_init, - pljava_DualState_key(), - PointerGetJLong(portal->resowner), PointerGetJLong(portal), jplan); + PointerGetJLong(portal->resowner), + PointerGetJLong(portal->portalContext), PointerGetJLong(portal), jplan); return jportal; } @@ -56,6 +61,16 @@ void pljava_Portal_initialize(void) { JNINativeMethod methods[] = { + { + "_getTupleDescriptor", + "(J)Lorg/postgresql/pljava/model/TupleDescriptor;", + Java_org_postgresql_pljava_internal_Portal__1getTupleDescriptor + }, + { + "_makeTupleTableSlot", + "(JLorg/postgresql/pljava/model/TupleDescriptor;)Lorg/postgresql/pljava/pg/TupleTableSlotImpl;", + Java_org_postgresql_pljava_internal_Portal__1makeTupleTableSlot + }, { "_getName", "(J)Ljava/lang/String;", @@ -67,6 +82,11 @@ void pljava_Portal_initialize(void) Java_org_postgresql_pljava_internal_Portal__1getPortalPos }, { + "_getTupleDescriptor", + "(J)Lorg/postgresql/pljava/model/TupleDescriptor;", + Java_org_postgresql_pljava_internal_Portal__1getTupleDescriptor + }, + { "_getTupleDesc", "(J)Lorg/postgresql/pljava/internal/TupleDesc;", Java_org_postgresql_pljava_internal_Portal__1getTupleDesc @@ -97,13 +117,70 @@ void pljava_Portal_initialize(void) s_Portal_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/Portal")); PgObject_registerNatives2(s_Portal_class, methods); s_Portal_init = PgObject_getJavaMethod(s_Portal_class, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;JJLorg/postgresql/pljava/internal/ExecutionPlan;)V"); + "(JJJLorg/postgresql/pljava/internal/ExecutionPlan;)V"); + + /* + * Statically assert that the Java code has the right values for these. + * I would rather have this at the top, but these count as statements and + * would trigger a declaration-after-statment warning. + */ + CONFIRMCONST(FETCH_FORWARD); + CONFIRMCONST(FETCH_BACKWARD); + CONFIRMCONST(FETCH_ABSOLUTE); + CONFIRMCONST(FETCH_RELATIVE); + + /* + * Many SPI functions are declared with 'long' parameters and while + * FETCH_ALL is declared as LONG_MAX everywhere, it's not the same value + * everywhere (Windows has 32-bit longs), so this can't just be a fixed Java + * constant with CONFIRMCONST here. May as well check that the assumption + * FETCH_ALL == LONG_MAX still holds, though. + */ + StaticAssertStmt((FETCH_ALL) == (LONG_MAX), "Unexpected FETCH_ALL value"); } /**************************************** * JNI methods ****************************************/ +/* + * Class: org_postgresql_pljava_internal_Portal + * Method: _getTupleDescriptor + * Signature: (J)Lorg/postgresql/pljava/model/TupleDescriptor; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_Portal__1getTupleDescriptor(JNIEnv* env, jclass clazz, jlong _this) +{ + jobject result = 0; + if(_this != 0) + { + BEGIN_NATIVE + result = pljava_TupleDescriptor_create( + JLongGet(Portal, _this)->tupDesc, InvalidOid); + END_NATIVE + } + return result; +} + +/* + * Class: org_postgresql_pljava_internal_Portal + * Method: _makeTupleTableSlot + * Signature: (JLorg/postgresql/pljava/model/TupleDescriptor;)Lorg/postgresql/pljava/pg/TupleTableSlotImpl; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_Portal__1makeTupleTableSlot(JNIEnv* env, jclass clazz, jlong _this, jobject jtd) +{ + jobject result = 0; + if(_this != 0) + { + BEGIN_NATIVE + result = pljava_TupleTableSlot_create(JLongGet(Portal, _this)->tupDesc, + jtd, &TTSOpsHeapTuple, InvalidOid); + END_NATIVE + } + return result; +} + /* * Class: org_postgresql_pljava_internal_Portal * Method: _getPortalPos diff --git a/pljava-so/src/main/c/type/Relation.c b/pljava-so/src/main/c/type/Relation.c index 7b5bec912..10e3b372c 100644 --- a/pljava-so/src/main/c/type/Relation.c +++ b/pljava-so/src/main/c/type/Relation.c @@ -18,7 +18,7 @@ #include "org_postgresql_pljava_internal_Relation.h" #include "pljava/DualState.h" #include "pljava/Exception.h" -#include "pljava/Invocation.h" +#include "pljava/Function.h" #include "pljava/SPI.h" #include "pljava/type/Type_priv.h" #include "pljava/type/String.h" @@ -40,8 +40,6 @@ jobject pljava_Relation_create(Relation r) return JNI_newObjectLocked( s_Relation_class, s_Relation_init, - pljava_DualState_key(), - PointerGetJLong(currentInvocation), PointerGetJLong(r)); } @@ -75,7 +73,7 @@ void pljava_Relation_initialize(void) s_Relation_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/Relation")); PgObject_registerNatives2(s_Relation_class, methods); s_Relation_init = PgObject_getJavaMethod(s_Relation_class, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;JJ)V"); + "(J)V"); } /**************************************** @@ -190,7 +188,7 @@ Java_org_postgresql_pljava_internal_Relation__1modifyTuple(JNIEnv* env, jclass c { jint idx; TupleDesc tupleDesc = self->rd_att; - jobject typeMap = Invocation_getTypeMap(); + jobject typeMap = Function_currentTypeMap(); jint count = JNI_getArrayLength(_indexes); Datum* values = (Datum*)palloc(count * sizeof(Datum)); diff --git a/pljava-so/src/main/c/type/SQLXMLImpl.c b/pljava-so/src/main/c/type/SQLXMLImpl.c index 157f4251b..a65e2ca7b 100644 --- a/pljava-so/src/main/c/type/SQLXMLImpl.c +++ b/pljava-so/src/main/c/type/SQLXMLImpl.c @@ -193,19 +193,19 @@ void pljava_SQLXMLImpl_initialize(void) s_SQLXML_class = JNI_newGlobalRef(PgObject_getJavaClass( "org/postgresql/pljava/jdbc/SQLXMLImpl")); s_SQLXML_adopt = PgObject_getStaticJavaMethod(s_SQLXML_class, "adopt", - "(Ljava/sql/SQLXML;I)Lorg/postgresql/pljava/internal/VarlenaWrapper;"); + "(Ljava/sql/SQLXML;I)Lorg/postgresql/pljava/adt/spi/Datum;"); s_SQLXML_Readable_PgXML_class = JNI_newGlobalRef(PgObject_getJavaClass( "org/postgresql/pljava/jdbc/SQLXMLImpl$Readable$PgXML")); s_SQLXML_Readable_PgXML_init = PgObject_getJavaMethod( s_SQLXML_Readable_PgXML_class, - "", "(Lorg/postgresql/pljava/internal/VarlenaWrapper$Input;I)V"); + "", "(Lorg/postgresql/pljava/adt/spi/Datum$Input;I)V"); s_SQLXML_Readable_Synthetic_class = JNI_newGlobalRef(PgObject_getJavaClass( "org/postgresql/pljava/jdbc/SQLXMLImpl$Readable$Synthetic")); s_SQLXML_Readable_Synthetic_init = PgObject_getJavaMethod( s_SQLXML_Readable_Synthetic_class, - "", "(Lorg/postgresql/pljava/internal/VarlenaWrapper$Input;I)V"); + "", "(Lorg/postgresql/pljava/adt/spi/Datum$Input;I)V"); s_SQLXML_Writable_class = JNI_newGlobalRef(PgObject_getJavaClass( "org/postgresql/pljava/jdbc/SQLXMLImpl$Writable")); diff --git a/pljava-so/src/main/c/type/SingleRowReader.c b/pljava-so/src/main/c/type/SingleRowReader.c index 50736ebb7..86b70125b 100644 --- a/pljava-so/src/main/c/type/SingleRowReader.c +++ b/pljava-so/src/main/c/type/SingleRowReader.c @@ -52,7 +52,6 @@ jobject pljava_SingleRowReader_create(HeapTupleHeader ht) result = JNI_newObjectLocked(s_SingleRowReader_class, s_SingleRowReader_init, - pljava_DualState_key(), PointerGetJLong(currentInvocation), PointerGetJLong(ht), jtd); JNI_deleteLocalRef(jtd); @@ -76,7 +75,7 @@ void pljava_SingleRowReader_initialize(void) PgObject_getJavaClass("org/postgresql/pljava/jdbc/SingleRowReader"); PgObject_registerNatives2(cls, methods); s_SingleRowReader_init = PgObject_getJavaMethod(cls, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;JJLorg/postgresql/pljava/internal/TupleDesc;)V"); + "(JLorg/postgresql/pljava/internal/TupleDesc;)V"); s_SingleRowReader_class = JNI_newGlobalRef(cls); JNI_deleteLocalRef(cls); } diff --git a/pljava-so/src/main/c/type/TriggerData.c b/pljava-so/src/main/c/type/TriggerData.c index 2c5b23c6d..fb29061e0 100644 --- a/pljava-so/src/main/c/type/TriggerData.c +++ b/pljava-so/src/main/c/type/TriggerData.c @@ -37,8 +37,6 @@ jobject pljava_TriggerData_create(TriggerData* triggerData) return JNI_newObjectLocked( s_TriggerData_class, s_TriggerData_init, - pljava_DualState_key(), - PointerGetJLong(currentInvocation), PointerGetJLong(triggerData)); } @@ -127,8 +125,7 @@ void pljava_TriggerData_initialize(void) jcls = PgObject_getJavaClass("org/postgresql/pljava/internal/TriggerData"); PgObject_registerNatives2(jcls, methods); - s_TriggerData_init = PgObject_getJavaMethod(jcls, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;JJ)V"); + s_TriggerData_init = PgObject_getJavaMethod(jcls, "", "(J)V"); s_TriggerData_getTriggerReturnTuple = PgObject_getJavaMethod( jcls, "getTriggerReturnTuple", "()J"); s_TriggerData_class = JNI_newGlobalRef(jcls); diff --git a/pljava-so/src/main/c/type/Tuple.c b/pljava-so/src/main/c/type/Tuple.c index bda3e81ed..2cc563827 100644 --- a/pljava-so/src/main/c/type/Tuple.c +++ b/pljava-so/src/main/c/type/Tuple.c @@ -62,14 +62,10 @@ jobject pljava_Tuple_internalCreate(HeapTuple ht, bool mustCopy) ht = heap_copytuple(ht); /* - * Passing (jlong)0 as the ResourceOwner means this will never be matched by a - * nativeRelease call; that's appropriate (for now) as the Tuple copy is - * being made into JavaMemoryContext, which never gets reset, so only - * unreachability from the Java side will free it. * XXX? this seems like a lot of tuple copying. */ jht = JNI_newObjectLocked(s_Tuple_class, s_Tuple_init, - pljava_DualState_key(), (jlong)0, PointerGetJLong(ht)); + PointerGetJLong(ht)); return jht; } @@ -97,7 +93,7 @@ void pljava_Tuple_initialize(void) s_Tuple_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/Tuple")); PgObject_registerNatives2(s_Tuple_class, methods); s_Tuple_init = PgObject_getJavaMethod(s_Tuple_class, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;JJ)V"); + "(J)V"); cls = TypeClass_alloc("type.Tuple"); cls->JNISignature = "Lorg/postgresql/pljava/internal/Tuple;"; diff --git a/pljava-so/src/main/c/type/TupleDesc.c b/pljava-so/src/main/c/type/TupleDesc.c index c862a9a42..45d1bcc87 100644 --- a/pljava-so/src/main/c/type/TupleDesc.c +++ b/pljava-so/src/main/c/type/TupleDesc.c @@ -18,7 +18,7 @@ #include "pljava/Backend.h" #include "pljava/DualState.h" #include "pljava/Exception.h" -#include "pljava/Invocation.h" +#include "pljava/Function.h" #include "pljava/type/Type_priv.h" #include "pljava/type/String.h" #include "pljava/type/Tuple.h" @@ -51,15 +51,8 @@ jobject pljava_TupleDesc_internalCreate(TupleDesc td) jobject jtd; td = CreateTupleDescCopyConstr(td); - /* - * Passing (jlong)0 as the ResourceOwner means this will never be matched by a - * nativeRelease call; that's appropriate (for now) as the TupleDesc copy is - * being made into JavaMemoryContext, which never gets reset, so only - * unreachability from the Java side will free it. - * XXX what about invalidating if DDL alters the column layout? - */ jtd = JNI_newObjectLocked(s_TupleDesc_class, s_TupleDesc_init, - pljava_DualState_key(), (jlong)0, PointerGetJLong(td), (jint)td->natts); + PointerGetJLong(td), (jint)td->natts); return jtd; } @@ -79,7 +72,7 @@ Type pljava_TupleDesc_getColumnType(TupleDesc tupleDesc, int index) type = 0; } else /* Type_objectTypeFromOid returns boxed types, when that matters */ - type = Type_objectTypeFromOid(typeId, Invocation_getTypeMap()); + type = Type_objectTypeFromOid(typeId, Function_currentTypeMap()); return type; } @@ -122,7 +115,7 @@ void pljava_TupleDesc_initialize(void) s_TupleDesc_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/TupleDesc")); PgObject_registerNatives2(s_TupleDesc_class, methods); s_TupleDesc_init = PgObject_getJavaMethod(s_TupleDesc_class, "", - "(Lorg/postgresql/pljava/internal/DualState$Key;JJI)V"); + "(JI)V"); cls = TypeClass_alloc("type.TupleDesc"); cls->JNISignature = "Lorg/postgresql/pljava/internal/TupleDesc;"; @@ -224,7 +217,7 @@ Java_org_postgresql_pljava_internal_TupleDesc__1formTuple(JNIEnv* env, jclass cl int count = self->natts; Datum* values = (Datum*)palloc(count * sizeof(Datum)); bool* nulls = palloc(count * sizeof(bool)); - jobject typeMap = Invocation_getTypeMap(); /* a global ref */ + jobject typeMap = Function_currentTypeMap(); /* a global ref */ memset(values, 0, count * sizeof(Datum)); memset(nulls, true, count * sizeof(bool));/*all values null initially*/ diff --git a/pljava-so/src/main/c/type/Type.c b/pljava-so/src/main/c/type/Type.c index d3dcc50a5..b0f4f22f2 100644 --- a/pljava-so/src/main/c/type/Type.c +++ b/pljava-so/src/main/c/type/Type.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -79,50 +79,8 @@ typedef struct Function fn; jobject rowProducer; jobject rowCollector; - /* - * Invocation instance, if any, the Java counterpart to currentInvocation - * the C struct. There isn't one unless it gets asked for, then if it is, - * it's saved here, so even though the C currentInvocation really is new on - * each entry from PG, Java will see one Invocation instance throughout the - * sequence of calls. - */ - jobject invocation; - /* - * Two pieces of state from Invocation.c's management of SPI connection, - * effectively keeping one such connection alive through the sequence of - * calls. I could easily be led to question the advisability of even doing - * that, but it has a long history in PL/Java, so changing it might call for - * some careful analysis. - */ - MemoryContext spiContext; - bool hasConnected; } CallContextData; -/* - * Called during evaluation of a set-returning function, at various points after - * calls into Java code could have instantiated an Invocation, or connected SPI. - * Does not stash elemType, rowProducer, or rowCollector; those are all - * unconditionally set in the first-call initialization, and spiContext to zero. - */ -static void stashCallContext(CallContextData *ctxData) -{ - bool wasConnected = ctxData->hasConnected; - - ctxData->hasConnected = currentInvocation->hasConnected; - - ctxData->invocation = currentInvocation->invocation; - - if ( wasConnected ) - return; - - /* - * If SPI has been connected for the first time, capture the memory context - * it imposed. Curiously, this is not used again except in _closeIteration. - */ - if(ctxData->hasConnected) - ctxData->spiContext = CurrentMemoryContext; -} - /* * Called either at normal completion of a set-returning function, or by the * _endOfSetCB if PostgreSQL doesn't want all the results. @@ -130,8 +88,6 @@ static void stashCallContext(CallContextData *ctxData) static void _closeIteration(CallContextData* ctxData) { jobject dummy; - currentInvocation->hasConnected = ctxData->hasConnected; - currentInvocation->invocation = ctxData->invocation; /* * Why pass 1 as the call_cntr? We won't always have the actual call_cntr @@ -147,24 +103,6 @@ static void _closeIteration(CallContextData* ctxData) JNI_deleteGlobalRef(ctxData->rowProducer); if(ctxData->rowCollector != 0) JNI_deleteGlobalRef(ctxData->rowCollector); - - if(ctxData->hasConnected && ctxData->spiContext != 0) - { - /* - * SPI was connected. We will (1) switch back to the memory context that - * was imposed by SPI_connect, then (2) disconnect. SPI_finish will have - * switched back to whatever memory context was current when SPI_connect - * was called, and that context had better still be valid. It might be - * the executor's multi_call_memory_ctx, if the SPI_connect happened - * during initialization of the rowProducer or rowCollector, or the - * executor's per-row context, if it happened later. Both of those are - * still valid at this point. The final step (3) is to switch back to - * the context we had before (1) and (2) happened. - */ - MemoryContext currCtx = MemoryContextSwitchTo(ctxData->spiContext); - Invocation_assertDisconnect(); - MemoryContextSwitchTo(currCtx); - } } /* @@ -494,6 +432,39 @@ Datum Type_invokeSRF(Type self, Function fn, PG_FUNCTION_ARGS) SRF_RETURN_DONE(context); } + /* + * If the set-up function called above did not connect SPI, we are + * (unless the function changed it in some other arbitrary way) still + * in the multi_call_memory_ctx. We will return to currCtx (the executor + * per-row context) at the end of this set-up block, in preparation for + * producing the first row, if any. + * + * If the set-up function did connect SPI, we are now in the SPI Proc + * memory context (which will go away in SPI_finish when this call + * returns). That's not very much different from currCtx, the one the + * executor supplied us, which will be reset by the executor after the + * return of this call and before the next invocation. Here, we will + * switch back to the multi_call_memory_ctx for the remainder of this + * set-up block. As always, this block will end with a switch to currCtx + * and be ready to produce the first row. + * + * Two choices are possible here: 1) leave currCtx unchanged, so we + * end up in the executor's per-row context; 2) assign the SPI Proc + * context to it, so we end up in that. Because the contexts have very + * similar lifecycles, the choice does not seem critical. Of note, + * though, is that any SPI function that operates in the SPI Exec + * context will unconditionally leave the SPI Proc context as + * the current context when it returns; it will not save and restore + * its context on entry. Given that behavior, the choice here of (2) + * reassigning currCtx to mean the SPI Proc context would seem to create + * the situation with the least potential for surprises. + */ + if ( currentInvocation->hasConnected ) + currCtx = MemoryContextSwitchTo(context->multi_call_memory_ctx); + + /* + * This palloc depends on being made in the multi_call_memory_ctx. + */ ctxData = (CallContextData*)palloc0(sizeof(CallContextData)); context->user_fctx = ctxData; @@ -512,8 +483,6 @@ Datum Type_invokeSRF(Type self, Function fn, PG_FUNCTION_ARGS) JNI_deleteLocalRef(tmp); } - stashCallContext(ctxData); - /* Register callback to be called when the function ends */ RegisterExprContextCallback( @@ -531,15 +500,14 @@ Datum Type_invokeSRF(Type self, Function fn, PG_FUNCTION_ARGS) /* * Invariant: whether this is the first call and the SRF_IS_FIRSTCALL block * above just completed, or this is a subsequent call, at this point, the - * memory context is the per-row one supplied by the executor (which gets - * reset between calls). + * memory context is one that gets reset between calls: either the per-row + * context supplied by the executor, or (if this is the first call and the + * setup code used SPI) the "SPI Proc" context. */ context = SRF_PERCALL_SETUP(); ctxData = (CallContextData*)context->user_fctx; - currCtx = CurrentMemoryContext; /* save executor's per-row context */ - currentInvocation->hasConnected = ctxData->hasConnected; - currentInvocation->invocation = ctxData->invocation; + currCtx = CurrentMemoryContext; /* save the supplied per-row context */ if(JNI_TRUE == pljava_Function_vpcInvoke(ctxData->fn, ctxData->rowProducer, ctxData->rowCollector, (jlong)context->call_cntr, @@ -547,18 +515,9 @@ Datum Type_invokeSRF(Type self, Function fn, PG_FUNCTION_ARGS) { Datum result = Type_datumFromSRF(self, row, ctxData->rowCollector); JNI_deleteLocalRef(row); - stashCallContext(ctxData); - currentInvocation->hasConnected = false; - currentInvocation->invocation = 0; - MemoryContextSwitchTo(currCtx); SRF_RETURN_NEXT(context, result); } - stashCallContext(ctxData); - currentInvocation->hasConnected = false; - currentInvocation->invocation = 0; - MemoryContextSwitchTo(currCtx); - /* Unregister this callback and call it manually. We do this because * otherwise it will be called when the backend is in progress of * cleaning up Portals. If we close cursors (i.e. drop portals) in @@ -732,7 +691,12 @@ Type Type_fromOid(Oid typeId, jobject typeMap) /* For some reason, the anyarray is *not* an array with anyelement as the * element type. We'd like to see it that way though. - * XXX would we, or does that mistake something intended in PostgreSQL? + * XXX this is a longstanding PL/Java misconception about the polymorphic + * types in PostgreSQL. When a function is declared with types like + * ANYARRAY and ANYELEMENT, there is supposed to be a step involving + * funcapi.c routines like get_fn_expr_argtype to resolve them to specific + * types for the current call site. Another thing to be sure to handle + * correctly in the API revamp. */ if(typeId == ANYARRAYOID) { @@ -810,6 +774,14 @@ bool _Type_canReplaceType(Type self, Type other) return self->typeClass == other->typeClass; } +/* + * The Type_invoke implementation that is 'inherited' by all type classes + * except Coerce, Composite, and those corresponding to Java primitives. + * This implementation unconditionally switches to the "upper memory context" + * recorded in the Invocation before coercing the Java result to a Datum, + * in case SPI has been connected (which would have switched to a context that + * is reset too soon for the caller to use the result). + */ Datum _Type_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { MemoryContext currCtx; @@ -841,9 +813,24 @@ static jobject _Type_getSRFCollector(Type self, PG_FUNCTION_ARGS) return 0; } +/* + * The Type_datumFromSRF implementation that is 'inherited' by all type classes + * except Composite. This implementation makes no use of the rowCollector + * parameter, and unconditionally switches to the "upper memory context" + * recorded in the Invocation before coercing the Java result to a Datum, in + * case SPI has been connected (which would have switched to a context that is + * reset too soon for the caller to use the result). + */ static Datum _Type_datumFromSRF(Type self, jobject row, jobject rowCollector) { - return Type_coerceObject(self, row); + MemoryContext currCtx; + Datum ret; + + currCtx = Invocation_switchToUpperContext(); + ret = Type_coerceObject(self, row); + MemoryContextSwitchTo(currCtx); + + return ret; } jobject Type_getSRFCollector(Type self, PG_FUNCTION_ARGS) @@ -929,6 +916,9 @@ static void initializeTypeBridges() #endif ); + addTypeBridge(cls, ofInterface, + "org.postgresql.pljava.model.CatalogObject", OIDOID); + JNI_deleteLocalRef(cls); cls = PgObject_getJavaClass("org/postgresql/pljava/jdbc/TypeBridge$Holder"); diff --git a/pljava-so/src/main/include/pljava/DualState.h b/pljava-so/src/main/include/pljava/DualState.h index 64e111c76..a736b8a24 100644 --- a/pljava-so/src/main/include/pljava/DualState.h +++ b/pljava-so/src/main/include/pljava/DualState.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -13,7 +13,6 @@ #define __pljava_DualState_h #include -#include #include "pljava/pljava.h" @@ -21,16 +20,10 @@ extern "C" { #endif -extern jobject pljava_DualState_key(void); - extern void pljava_DualState_cleanEnqueuedInstances(void); extern void pljava_DualState_initialize(void); -extern void pljava_DualState_unregister(void); - -extern void pljava_DualState_nativeRelease(void *); - #ifdef __cplusplus } #endif diff --git a/pljava-so/src/main/include/pljava/Function.h b/pljava-so/src/main/include/pljava/Function.h index 005d0e5fc..700220d09 100644 --- a/pljava-so/src/main/include/pljava/Function.h +++ b/pljava-so/src/main/include/pljava/Function.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -51,8 +51,8 @@ extern Type Function_checkTypeBaseUDT(Oid typeId, Form_pg_type typeStruct); /* * First translate a function Oid to a Function (looking it up according to the - * trusted, forTrigger, forValidator, and checkBody parameters), and then - * (unless forValidator is true) invoke it: i.e. coerce the parameters, call the + * forTrigger, forValidator, and checkBody parameters), and then (unless + * forValidator is true) invoke it: i.e. coerce the parameters, call the * java method, and coerce the return value back to a Datum. The return-value * coercion is handled by a convention where this call will delegate to the Type * representing the SQL return type. That will call back on one of the flavors @@ -64,7 +64,7 @@ extern Type Function_checkTypeBaseUDT(Oid typeId, Form_pg_type typeStruct); */ extern Datum Function_invoke( Oid funcoid, - bool trusted, bool forTrigger, bool forValidator, bool checkBody, + bool forTrigger, bool forValidator, bool checkBody, PG_FUNCTION_ARGS); /* @@ -136,26 +136,32 @@ extern jobject pljava_Function_udtReadHandle( jclass clazz, char *langName, bool trusted); /* - * Returns the type map that is held by the function's schema loader (the - * initiating loader that was used when the function was resolved). It is a map - * from Java Oid objects to Class objects, as resolved by that loader. + * Returns a JNI global reference to the initiating (schema) class loader used + * to load the currently-executing function. */ -extern jobject Function_getTypeMap(Function self); +extern jobject Function_currentLoader(void); /* - * Returns true if the currently executing function is non volatile, i.e. stable - * or immutable. Such functions are not allowed to have side effects. + * Returns the type map held by the innermost executing PL/Java function's + * schema loader (the initiating loader that was used to resolve the function). + * The type map is a map from Java Oid objects to Class, + * as resolved by that loader. This is effectively Function_currentLoader() + * followed by JNI-invoking getTypeMap on the loader, but cached to avoid JNI. */ -extern bool Function_isCurrentReadOnly(void); +extern jobject Function_currentTypeMap(void); /* - * Return a global reference to the initiating (schema) class loader used - * to load the currently-executing function. - * - * Invocation_getTypeMap is equivalent to calling this and then JNI-invoking - * getTypeMap on the returned loader (cast to PL/Java's loader subclass). + * Returns true if the currently executing function is non volatile, i.e. stable + * or immutable: the function author has declared it will not have visible + * side effects in the database. The normal behavior of JDBC methods that + * call SPI functions having a "read-only" parameter will be to pass true + * for that parameter, if this function returns true. Passing true to an SPI + * "read-only" parameter means both less and more than you might think: it may + * not necessarily preclude all visible effects, and it also constrains the + * function to use an existing snapshot in which the results of recent + * preceding operations cannot be seen. */ -extern jobject Function_currentLoader(void); +extern bool Function_isCurrentReadOnly(void); /* * A nameless Function singleton with the property ! isCurrentReadOnly() diff --git a/pljava-so/src/main/include/pljava/InstallHelper.h b/pljava-so/src/main/include/pljava/InstallHelper.h index 65acdc614..cc47e2acc 100644 --- a/pljava-so/src/main/include/pljava/InstallHelper.h +++ b/pljava-so/src/main/include/pljava/InstallHelper.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -59,10 +59,10 @@ extern bool pljavaLoadingAsExtension; /* * Another way of getting the library path: if invoked by the fmgr before - * initialization is complete, save the last function Oid seen (trusted or - * untrusted) ... can be used later to get the library path if needed. - * isPLJavaFunction can use the stashed information to determine whether an - * arbitrary function Oid is a function built on PL/Java, without relying on + * initialization is complete, save the last function Oid seen ... + * can be used later to get the library path if needed. isPLJavaFunction + * can use the stashed information to determine whether an arbitrary + * function Oid is a function built on PL/Java, without relying on * assumptions about the language name, etc. * * It can return the language name and/or trusted flag if non-null pointers @@ -70,7 +70,13 @@ extern bool pljavaLoadingAsExtension; */ extern char *pljavaFnOidToLibPath(Oid fn, char **langName, bool *trusted); -extern Oid pljavaTrustedOid, pljavaUntrustedOid; +/* + * Some oid that can be taken to refer to PL/Java (because PostgreSQL passed it + * as a target to one of PL/Java's entry points, and it wasn't rejected by the + * validator), and so can be used for possible chores like ascertaining the + * pg_language or the shared-object name by which we are known. + */ +extern Oid pljavaOid; extern bool InstallHelper_isPLJavaFunction( Oid fn, char **langName, bool *trusted); diff --git a/pljava-so/src/main/include/pljava/Invocation.h b/pljava-so/src/main/include/pljava/Invocation.h index 28bebe41c..cd1738ae7 100644 --- a/pljava-so/src/main/include/pljava/Invocation.h +++ b/pljava-so/src/main/include/pljava/Invocation.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -28,48 +28,56 @@ extern "C" { struct Invocation_ { /** - * A Java object representing the current invocation. This - * field will be NULL if no such object has been requested. + * The level of nested call into PL/Java represented by this Invocation. + * Including it in this struct is slightly redundant (it can be "saved" and + * "restored" just by increment/decrement), but allows it to be read with no + * additional fuss by the Java code through a single ByteBuffer window over + * the currentInvocation struct. */ - jobject invocation; + int32 nestLevel; + + /** + * Set if the Java Invocation instance corresponding to this invocation + * has been requested and assigned. If so, its onExit method will be called + * when this invocation is popped. + */ + bool hasDual; /** - * The context to use when allocating values that are to be - * returned from the call. + * Set to true if an elog with a severity >= ERROR + * has occured. All calls from Java to the backend will + * be prevented until this flag is reset (by a rollback + * of a savepoint or function exit). */ - MemoryContext upperContext; + bool errorOccurred; /** * Set when an SPI_connect is issued. Ensures that SPI_finish * is called when the function exits. */ - bool hasConnected; + bool hasConnected:1, /** * Set to true if the call originates from an ExprContextCallback. When - * it does, we should not close any cursors. + * it does, we should not close any cursors. Such a callback is registered + * in the setup of a value-per-call set-returning function, and used to + * detect when no further values of the set will be wanted. */ - bool inExprContextCB; + inExprContextCB:1, /** - * The saved limits reserved in Function.c's static parameter frame, as a - * count of reference and primitive parameters combined in a short. - * FRAME_LIMITS_PUSHED is an otherwise invalid value used to record that the - * more heavyweight saving of the frame as a Java ParameterFrame instance - * has occurred. Otherwise, this value (and the primitive slot 0 value - * below) are simply restored when this Invocation is exited normally or - * exceptionally. + * Set if transaction-control operations are to be allowed in SPI. */ - jshort frameLimits; -#define FRAME_LIMITS_PUSHED ((jshort)-1) + nonAtomic:1; /** - * The saved value of the first primitive slot in Function's static - * parameter frame. Unless frameLimits above is FRAME_LIMITS_PUSHED, this - * value is simply restored when this Invocation is exited normally or - * exceptionally. + * The context to use when allocating values that are to be + * returned from the call. Copied from CurrentMemoryContext on invocation + * entry. If SPI_connect is later called (which changes the context to + * a local one), this is the same as what SPI calls the "upper executor + * context" and uses in functions like SPI_palloc. */ - jvalue primSlot0; + MemoryContext upperContext; /** * The saved thread context classloader from before this invocation @@ -79,15 +87,7 @@ struct Invocation_ /** * The currently executing Function. */ - Function function; - - /** - * Set to true if an elog with a severity >= ERROR - * has occured. All calls from Java to the backend will - * be prevented until this flag is reset (by a rollback - * of a savepoint or function exit). - */ - bool errorOccurred; + Function function; #if PG_VERSION_NUM >= 100000 /** @@ -95,18 +95,39 @@ struct Invocation_ * so it can be passed to SPI_register_trigger_data if the function connects * to SPI. */ - TriggerData* triggerData; + TriggerData* triggerData; #endif /** * The previous call context when nested function calls * are made or 0 if this call is at the top level. */ - Invocation* previous; + Invocation* previous; + + /** + * The saved value of the first primitive slot in Function's static + * parameter frame. Unless frameLimits above is FRAME_LIMITS_PUSHED, this + * value is simply restored when this Invocation is exited normally or + * exceptionally. + */ + jvalue primSlot0; + + /** + * The saved limits reserved in Function.c's static parameter frame, as a + * count of reference and primitive parameters combined in a short. + * FRAME_LIMITS_PUSHED is an otherwise invalid value used to record that the + * more heavyweight saving of the frame as a Java ParameterFrame instance + * has occurred. Otherwise, this value (and the primitive slot 0 value + * below) are simply restored when this Invocation is exited normally or + * exceptionally. + */ + jshort frameLimits; +#define FRAME_LIMITS_PUSHED ((jshort)-1) }; -extern Invocation* currentInvocation; +extern Invocation currentInvocation[]; +#define HAS_INVOCATION (0 < currentInvocation->nestLevel) extern void Invocation_assertConnect(void); @@ -121,19 +142,10 @@ extern void Invocation_pushInvocation(Invocation* ctx); extern void Invocation_popInvocation(bool wasException); /* - * Return the type map held by the innermost executing PL/Java function's - * schema loader (the initiating loader that was used to resolve the function). - * The type map is a map from Java Oid objects to Class class objects, - * as resolved by that loader. This is effectively Function_currentLoader() - * followed by JNI-invoking getTypeMap on the loader, but cached to avoid JNI). - */ -extern jobject Invocation_getTypeMap(void); - -/* - * Switch memory context to a context that is durable between calls to + * Switches memory context to a context that is durable between calls to * the call manager but not durable between queries. The old context is * returned. This method can be used when creating values that will be - * returned from the Pl/Java routines. Once the values have been created + * returned from the PL/Java routines. Once the values have been created * a call to MemoryContextSwitchTo(oldContext) must follow where oldContext * is the context returned from this call. */ diff --git a/pljava-so/src/main/include/pljava/JNICalls.h b/pljava-so/src/main/include/pljava/JNICalls.h index fe95b8c44..c3ed39844 100644 --- a/pljava-so/src/main/include/pljava/JNICalls.h +++ b/pljava-so/src/main/include/pljava/JNICalls.h @@ -25,9 +25,13 @@ extern "C" { extern jint (JNICALL *pljava_createvm)(JavaVM **, void **, void *); #define BEGIN_NATIVE_NO_ERRCHECK if(beginNativeNoErrCheck(env)) { -#define BEGIN_NATIVE if(beginNative(env)) { +#define BEGIN_NATIVE if(!beginNative(env)) ; else { #define END_NATIVE JNI_setEnv(0); } +#define BEGIN_NATIVE_AND_TRY BEGIN_NATIVE PG_TRY(); { +#define END_NATIVE_AND_CATCH(shortfunc) } PG_CATCH(); { \ + Exception_throw_ERROR(shortfunc); } PG_END_TRY(); END_NATIVE + /*********************************************************************** * All calls to and from the JVM uses this header. The calls are implemented * using a fence mechanism that prevents multiple threads to access diff --git a/pljava-so/src/main/include/pljava/ModelConstants.h b/pljava-so/src/main/include/pljava/ModelConstants.h new file mode 100644 index 000000000..60593094f --- /dev/null +++ b/pljava-so/src/main/include/pljava/ModelConstants.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +#ifndef __pljava_ModelConstants_h +#define __pljava_ModelConstants_h + +#include "pljava/pljava.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern void pljava_ModelConstants_initialize(void); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/pljava-so/src/main/include/pljava/ModelUtils.h b/pljava-so/src/main/include/pljava/ModelUtils.h new file mode 100644 index 000000000..8a88c17fc --- /dev/null +++ b/pljava-so/src/main/include/pljava/ModelUtils.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +#ifndef __pljava_ModelUtils_h +#define __pljava_ModelUtils_h + +#include +#include +#include + +#include "pljava/pljava.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern void pljava_ModelUtils_initialize(void); + +extern void pljava_ModelUtils_inlineDispatch(PG_FUNCTION_ARGS); + +extern Datum pljava_ModelUtils_callDispatch(PG_FUNCTION_ARGS, bool validating); + +extern void pljava_ResourceOwner_unregister(void); + +/* + * Return a Java TupleDescriptor based on a PostgreSQL one. + * + * If the descriptor's tdtypeid is not RECORDOID (meaning the descriptor is + * for a named composite type), passing the relation oid here, if handy, will + * save a lookup in the Java code. In other cases, or if it simply is not + * handily available, InvalidOid can be passed, and the relation will be looked + * up if needed. + * + * If there is already a cached Java representation, the existing one + * is returned, and the supplied one's reference count (if it is counted) is + * untouched. If the supplied one is used to create a cached Java version, its + * reference count is incremented (without registering it for descriptor leak + * warnings), and it will be released upon removal from PL/Java's cache for + * invalidation or unreachability. If the descriptor is non-reference-counted, + * the returned Java object will not depend on it, and it is expendable + * after this function returns. + */ +extern jobject pljava_TupleDescriptor_create(TupleDesc tupdesc, Oid reloid); + +/* + * Create a PostgreSQL TupleTableSlot (of the specific type specified by + * tts_ops) and return a Java TupleTableSlot wrapping it. + * + * reloid is simply passed along to pljava_TupleDescriptor_create, so may be + * passed as InvalidOid with the same effects described there. + * + * If jtd is not NULL, it must be a JNI local reference to an existing Java + * TupleDescriptor that corresponds to the native tupdesc, and will be used + * instead of calling pljava_TupleDescriptor_create. On return, the local + * reference will have been deleted. + */ +extern jobject pljava_TupleTableSlot_create( + TupleDesc tupdesc, jobject jtd, + const TupleTableSlotOps *tts_ops, Oid reloid); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/pljava/pom.xml b/pljava/pom.xml index d7170e137..692b7b1fb 100644 --- a/pljava/pom.xml +++ b/pljava/pom.xml @@ -140,6 +140,8 @@ function executeReport(report, locale) "-quiet", "--show-module-contents", "all", "--show-packages", "all", + "--show-types", "package", + "--show-members", "package", /* * Options that are passed to the doclet. */ diff --git a/pljava/src/main/java/module-info.java b/pljava/src/main/java/module-info.java index 68923bbe4..856d6b319 100644 --- a/pljava/src/main/java/module-info.java +++ b/pljava/src/main/java/module-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2020-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -19,6 +19,8 @@ requires java.management; requires org.postgresql.pljava; + exports org.postgresql.pljava.pg.adt to org.postgresql.pljava; + exports org.postgresql.pljava.mbeans; // bothers me, but only interfaces exports org.postgresql.pljava.elog to java.logging; @@ -33,6 +35,12 @@ provides java.sql.Driver with org.postgresql.pljava.jdbc.SPIDriver; + provides org.postgresql.pljava.Adapter.Service + with org.postgresql.pljava.pg.adt.Service; + provides org.postgresql.pljava.Session with org.postgresql.pljava.internal.Session; + + provides org.postgresql.pljava.model.CatalogObject.Factory + with org.postgresql.pljava.pg.CatalogObjectImpl.Factory; } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/AbstractNoSplitList.java b/pljava/src/main/java/org/postgresql/pljava/internal/AbstractNoSplitList.java new file mode 100644 index 000000000..aaa82f7d6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/AbstractNoSplitList.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.util.AbstractList; +import java.util.Iterator; +import java.util.List; +import java.util.Spliterator; +import static java.util.Spliterator.ORDERED; +import static java.util.Spliterator.SIZED; +import java.util.Spliterators.AbstractSpliterator; + +import java.util.function.Consumer; + +import java.util.stream.Stream; + +/** + * An {@link AbstractList} whose {@link #parallelStream parallelStream} method + * returns a sequential stream (a behavior the spec does allow), and whose + * {@link #spliterator spliterator} method returns a {@link Spliterator} that + * never splits. + *

+ * In interfacing with the single-threaded PostgreSQL backend, there are many + * uses for a class with the behavior of {@link List} but that does not invite + * unintended parallelism through the stream API. + */ +public abstract class AbstractNoSplitList extends AbstractList +{ + /** + * "It is allowable" (and, in this case, inevitable) for this method to + * return a sequential stream. + */ + @Override + public Stream parallelStream() + { + return stream(); + } + + /** + * Returns a {@code Spliterator} that never splits. + */ + @Override + public Spliterator spliterator() + { + return new IteratorNonSpliterator<>(iterator(), size(), ORDERED|SIZED); + } + + /** + * As promised, an iterator whose spliterator won't split. + */ + public static class IteratorNonSpliterator extends AbstractSpliterator + { + private Iterator it; + + public IteratorNonSpliterator( + Iterator it, long est, int characteristics) + { + super(est, characteristics); + this.it = it; + } + + @Override + public boolean tryAdvance(Consumer action) + { + if ( ! it.hasNext() ) + return false; + action.accept(it.next()); + return true; + } + + @Override + public Spliterator trySplit() + { + return null; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Backend.java b/pljava/src/main/java/org/postgresql/pljava/internal/Backend.java index a07c477da..4209c3778 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Backend.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Backend.java @@ -15,6 +15,12 @@ import java.io.InputStream; import java.io.IOException; +import java.lang.annotation.Native; + +import java.nio.ByteBuffer; + +import java.security.Permission; + import java.sql.SQLException; import java.sql.SQLDataException; import java.util.ArrayList; @@ -23,6 +29,9 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; + import org.postgresql.pljava.elog.ELogHandler; // for javadoc import org.postgresql.pljava.sqlgen.Lexicals.Identifier; @@ -50,9 +59,19 @@ public class Backend public static final boolean WITHOUT_ENFORCEMENT = "disallow".equals(System.getProperty("java.security.manager")); + public static final Consumer CHECKER; + @SuppressWarnings("deprecation") // Java >= 10: .feature() static final int JAVA_MAJOR = Runtime.version().major(); + /* + * Indices (index, singular, for now) into array of ByteBuffer returned by + * EarlyNatives._window + */ + @Native private static final int check_function_bodies = 0; + + public static final BooleanSupplier validateBodies; + static { IAMPGTHREAD.set(Boolean.TRUE); @@ -75,6 +94,13 @@ public class Backend { throw new ExceptionInInitializerError(e); } + + ByteBuffer[] bs = EarlyNatives._window(ByteBuffer.class); + ByteBuffer cfb = bs[check_function_bodies]; + validateBodies = () -> 0 != doInPG(() -> cfb.get(0)); + + CHECKER = + WITHOUT_ENFORCEMENT ? p -> {} : EntryPoints.permissionChecker(); } private static final Pattern s_gucList = Pattern.compile(String.format( @@ -177,7 +203,7 @@ public static int doInPG(Checked.IntSupplier op) /** * Specialization of {@link #doInPG(Supplier) doInPG} for operations that * return a long result. This method need not be present: without it, the - * Java compiler will happily match int lambdas or method references to + * Java compiler will happily match long lambdas or method references to * the generic method, at the small cost of some boxing/unboxing; providing * this method simply allows that to be avoided. */ @@ -193,6 +219,84 @@ public static long doInPG(Checked.LongSupplier op) return op.getAsLong(); } + /** + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return a float result. This method need not be present: without it, the + * Java compiler will happily match float lambdas or method references to + * the generic method, at the small cost of some boxing/unboxing; providing + * this method simply allows that to be avoided. + */ + public static float doInPG( + Checked.FloatSupplier op) + throws E + { + if ( null != THREADLOCK ) + synchronized(THREADLOCK) + { + return op.getAsFloat(); + } + assertThreadMayEnterPG(); + return op.getAsFloat(); + } + + /** + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return a short result. This method need not be present: without it, the + * Java compiler will happily match short lambdas or method references to + * the generic method, at the small cost of some boxing/unboxing; providing + * this method simply allows that to be avoided. + */ + public static short doInPG( + Checked.ShortSupplier op) + throws E + { + if ( null != THREADLOCK ) + synchronized(THREADLOCK) + { + return op.getAsShort(); + } + assertThreadMayEnterPG(); + return op.getAsShort(); + } + + /** + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return a char result. This method need not be present: without it, the + * Java compiler will happily match char lambdas or method references to + * the generic method, at the small cost of some boxing/unboxing; providing + * this method simply allows that to be avoided. + */ + public static char doInPG(Checked.CharSupplier op) + throws E + { + if ( null != THREADLOCK ) + synchronized(THREADLOCK) + { + return op.getAsChar(); + } + assertThreadMayEnterPG(); + return op.getAsChar(); + } + + /** + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return a byte result. This method need not be present: without it, the + * Java compiler will happily match int lambdas or method references to + * the generic method, at the small cost of some boxing/unboxing; providing + * this method simply allows that to be avoided. + */ + public static byte doInPG(Checked.ByteSupplier op) + throws E + { + if ( null != THREADLOCK ) + synchronized(THREADLOCK) + { + return op.getAsByte(); + } + assertThreadMayEnterPG(); + return op.getAsByte(); + } + /** * Return true if the current thread may JNI-call into Postgres. *

@@ -355,5 +459,7 @@ private static class EarlyNatives private static native boolean _forbidOtherThreads(); private static native Class _defineClass( String name, ClassLoader loader, byte[] buf); + private static native ByteBuffer[] _window( + Class component); } } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/CacheMap.java b/pljava/src/main/java/org/postgresql/pljava/internal/CacheMap.java new file mode 100644 index 000000000..1b67bbe4f --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/CacheMap.java @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.lang.ref.Reference; +import static java.lang.ref.Reference.reachabilityFence; +import java.lang.ref.ReferenceQueue; +import java.lang.ref.SoftReference; +import java.lang.ref.WeakReference; + +import java.nio.ByteBuffer; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import java.util.concurrent.ConcurrentHashMap; + +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static java.util.stream.Collectors.joining; + +/** + * Utility class for constructing caches that map from keys that consist of + * one or more primitive values, whose entries can be retained strongly, softly, + * or weakly. + */ +public class CacheMap +{ + private final Map> m_map; + private final ThreadLocal> m_holder; + private final ThreadLocal> m_holderWithBuffer; + private final ReferenceQueue m_queue = new ReferenceQueue<>(); + + private CacheMap( + Map> map, + Supplier keyBufferSupplier) + { + m_map = map; + m_holder = ThreadLocal.withInitial(() -> new KVHolder()); + m_holderWithBuffer = ThreadLocal.withInitial(() -> + { + KVHolder h = m_holder.get(); + h.key = keyBufferSupplier.get(); + return h; + }); + } + + /** + * Construct a {@code CacheMap} based on a concurrent map. + */ + public static CacheMap newConcurrent( + Supplier keyBufferSupplier) + { + return new CacheMap<>( + new ConcurrentHashMap>(), + keyBufferSupplier); + } + + /** + * Construct a {@code CacheMap} based on a non-thread-safe map, for cases + * where concurrent access from multiple threads can be ruled out. + */ + public static CacheMap newThreadConfined( + Supplier keyBufferSupplier) + { + return new CacheMap<>( + new HashMap>(), + keyBufferSupplier); + } + + private void poll() + { + for ( KeyedEntry e; null != (e = (KeyedEntry)m_queue.poll()); ) + m_map.remove(e.key(), e); + /* + * Reference objects (of which e is one) do not override equals() from + * Object, which is good, because Map's remove(k,v) actually uses + * v.equals(...) and could therefore remove a different object than + * intended, if the object had other than == semantics for equals(). + */ + } + + public T softlyCache( + Checked.Consumer keyer, + Checked.Function cacher) + throws E + { + BiFunction> wrapper = + (k,v) -> new SoftEntry<>(k, v, m_queue); + return cache(keyer, cacher, wrapper); + } + + public T weaklyCache( + Checked.Consumer keyer, + Checked.Function cacher) + throws E + { + BiFunction> wrapper = + (k,v) -> new WeakEntry<>(k, v, m_queue); + return cache(keyer, cacher, wrapper); + } + + public T stronglyCache( + Checked.Consumer keyer, + Checked.Function cacher) + throws E + { + BiFunction> wrapper = + (k,v) -> new StrongEntry<>(k, v, m_map); + return cache(keyer, cacher, wrapper); + } + + @Override + public String toString() + { + return m_map.values().stream() + .map(Entry::get) + .filter(Objects::nonNull) + .map(Object::toString) + .collect(joining(", ", "{", "}")); + } + + private T cache( + Checked.Consumer keyer, + Checked.Function cacher, + BiFunction> wrapper) + throws E + { + poll(); + KVHolder h = m_holderWithBuffer.get(); + ByteBuffer b = h.key; + b.clear(); + keyer.accept(b); + b.flip(); + KeyedEntry w; + for ( ;; ) + { + w = cacher.inReturning(Checked.Function.use( + (c) -> m_map.computeIfAbsent(b, + (k) -> + { + m_holderWithBuffer.remove(); + T v = c.apply(k); + h.value = v; // keep it live while returning through ref + return null == v ? null : wrapper.apply(k,v); + } + ) + )); + + if ( null == w ) + return null; + T v = w.get(); + reachabilityFence(h.value); + h.value = null; // no longer needed now that v is a strong reference + if ( null != v ) + return v; + m_map.remove(w.key(), w); + } + } + + /** + * Simple lookup, with no way to cache a new entry; returns null if no such + * entry is present. + *

+ * Returns an {@link Entry Entry} if found, which provides a method to + * remove the entry if appropriate. + */ + public Entry find( + Checked.Consumer keyer) + throws E + { + poll(); + KVHolder h = m_holderWithBuffer.get(); + ByteBuffer b = h.key; + b.clear(); + keyer.accept(b); + b.flip(); + return m_map.get(b); + } + + public void forEachValue(Consumer action) + { + if ( m_map instanceof ConcurrentHashMap ) + { + ConcurrentHashMap> m = + (ConcurrentHashMap>)m_map; + m.forEachValue(Long.MAX_VALUE, Entry::get, action); + return; + } + m_map.values().stream().map(Entry::get).filter(Objects::nonNull) + .forEach(action); + } + + /** + * An entry in a {@link CacheMap CacheMap}. + */ + public interface Entry + { + T get(); + void remove(); + } + + /** + * An {@link Entry Entry} that keeps a reference to its key. + */ + interface KeyedEntry extends Entry + { + ByteBuffer key(); + } + + /** + * A {@link KeyedEntry KeyedEntry} that holds + * a {@link SoftReference SoftReference} to its value. + */ + static class SoftEntry extends SoftReference implements KeyedEntry + { + final ByteBuffer m_key; + + SoftEntry(ByteBuffer k, T v, ReferenceQueue q) + { + super(v, q); + m_key = k; + } + + @Override + public ByteBuffer key() + { + return m_key; + } + + @Override + public void remove() + { + clear(); + enqueue(); + } + } + + /** + * A {@link KeyedEntry KeyedEntry} that holds + * a {@link WeakReference WeakReference} to its value. + */ + static class WeakEntry extends WeakReference implements KeyedEntry + { + final ByteBuffer m_key; + + WeakEntry(ByteBuffer k, T v, ReferenceQueue q) + { + super(v, q); + m_key = k; + } + + @Override + public ByteBuffer key() + { + return m_key; + } + + @Override + public void remove() + { + clear(); + enqueue(); + } + } + + /** + * A {@link KeyedEntry KeyedEntry} that holds + * a strong reference to its value. + */ + static class StrongEntry implements KeyedEntry + { + final ByteBuffer m_key; + T m_value; + final Map> m_map; + + StrongEntry(ByteBuffer k, T v, Map> map) + { + m_key = k; + m_value = v; + m_map = map; + } + + @Override + public ByteBuffer key() + { + return m_key; + } + + @Override + public T get() + { + return m_value; + } + + @Override + public void remove() + { + m_value = null; + m_map.remove(m_key, this); + } + } + + /* + * Hold a ByteBuffer for key use, and any new value briefly between + * construction and return (to avoid any chance of its being found + * weakly reachable before its return). + */ + /** + * An effectively private class used during {@link CacheMap CacheMap} + * operations. + *

+ * Until PL/Java's support horizon for Java moves to Java >= 11 where + * classes have nestmates, there can be overhead in making nested classes + * private, so some in the internal module have been left at package access + * for now. + */ + static class KVHolder + { + ByteBuffer key; + T value; + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/DualState.java b/pljava/src/main/java/org/postgresql/pljava/internal/DualState.java index 438bc4a9d..b57d592f6 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/DualState.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/DualState.java @@ -17,6 +17,8 @@ import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; +import java.nio.ByteBuffer; + import java.sql.SQLException; import java.util.ArrayDeque; @@ -27,6 +29,7 @@ import java.util.List; import java.util.Map; import java.util.NoSuchElementException; +import static java.util.Objects.requireNonNull; import java.util.Queue; import java.util.concurrent.CancellationException; @@ -41,8 +44,15 @@ import javax.management.ObjectName; import javax.management.JMException; +import org.postgresql.pljava.Lifespan; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.LifespanImpl.Addressed; + import org.postgresql.pljava.mbeans.DualStateStatistics; +import org.postgresql.pljava.model.MemoryContext; + /** * Base class for object state with corresponding Java and native components. *

@@ -74,25 +84,18 @@ *

* A subclass calls {@link #releaseFromJava releaseFromJava} to signal an event * of the first kind. Events of the second kind are, naturally, detected by the - * Java garbage collector. To detect events of the third kind, a resource owner + * Java garbage collector. To detect events of the third kind, a lifespan * must be associated with the instance. *

- * A parameter to the {@code DualState} constructor is a {@code ResourceOwner}, - * a PostgreSQL implementation concept introduced in PG 8.0. A + * A parameter to the {@code DualState} constructor is a {@code Lifespan}. A * {@code nativeStateReleased} event occurs when the corresponding - * {@code ResourceOwner} is released in PostgreSQL. - *

- * However, this class does not require the {@code resourceOwner} parameter to - * be, in all cases, a pointer to a PostgreSQL {@code ResourceOwner}. It is - * treated simply as an opaque {@code long} value, to be compared to a value - * passed at release time (as if in a {@code ResourceOwner} callback). Other - * values (such as pointers to other allocated structures, which of course - * cannot match any PG {@code ResourceOwner} existing at the same time) can also - * be used. In PostgreSQL 9.5 and later, a {@code MemoryContext} could be used, - * with its address passed to a {@code MemoryContextCallback} for release. For - * state that is scoped to a single invocation of a PL/Java function, the - * address of the {@code Invocation} can be used. Such references can be - * considered "generalized" resource owners. + * {@code Lifespan} is released in PostgreSQL. PostgreSQL {@code ResourceOwner} + * and {@code MemoryContext} are two types of object that can serve + * as lifespans. A PL/Java {@code Invocation} object may also be used, to mark + * the lifespans of function arguments and other data expected to live at least + * for the duration of a function call. The lifespan argument can be null, + * for an object allocated in an immortal context and managed only by its + * {@code javaStateReleased} or {@code javaStateUnreachable} methods. *

* Java code may execute in multiple threads, but PostgreSQL is not * multi-threaded; at any given time, there is no more than one thread that may @@ -161,7 +164,7 @@ * native state until the pin is released. *

* If either the native state or the Java state has been released already (by - * the resource owner callback or an explicit call to {@code releaseFromJava}, + * the lifespan callback or an explicit call to {@code releaseFromJava}, * respectively), {@code pin()} will detect that and throw the appropriate * exception. Otherwise, the state is safe to make use of until {@code unpin}. * A subclass can customize the messages or {@code SQLSTATE} codes for the @@ -189,8 +192,8 @@ * The exclusive counterparts to {@code pin} and {@code unpin} are * {@link #lock lock} and {@link #unlock(int,boolean) unlock}, which are not * expected to be used as widely. The chief use of {@code lock}/{@code unlock} - * is around the call to {@code nativeStateReleased} when handling a resource - * owner callback from PostgreSQL. They can be used in subclasses to surround + * is around the call to {@code nativeStateReleased} when handling a lifespan + * callback from PostgreSQL. They can be used in subclasses to surround * modifications to the state, as needed. A {@code lock} will block until all * earlier-acquired pins are released; subsequent pins block until the lock is * released. Only the PG thread may use {@code lock}/{@code unlock}. An @@ -240,17 +243,14 @@ *

  • Instance construction *
  • Reference queue processing (instances found unreachable by Java's * garbage collector, or enqueued following {@code releaseFromJava}) - *
  • Exit of a resource owner's scope + *
  • Exit of a lifespan's scope * *
  • There is only one PG thread, or only one at a time. *
  • Construction of any {@code DualState} instance is to take place only on - * the PG thread. The requirement to pass any - * constructor a {@code DualState.Key} instance, obtainable by native code, is - * intended to reinforce that convention. It is not abuse-proof, or intended as - * a security mechanism, but only a guard against programming mistakes. + * the PG thread. *
  • Reference queue processing takes place only at chosen points where a * thread enters or exits native code, on the PG thread. - *
  • Resource-owner callbacks originate in native code, on the PG thread. + *
  • Lifespan callbacks originate in native code, on the PG thread. * */ public abstract class DualState extends WeakReference @@ -294,23 +294,10 @@ public abstract class DualState extends WeakReference private static final IdentityHashMap s_unscopedInstances = new IdentityHashMap<>(); - /** - * All native-scoped instances are added to this structure upon creation. - *

    - * The hash map takes a resource owner to the doubly-linked list of - * instances it owns. The list is implemented directly with the two list - * fields here (rather than by a Collections class), so that an instance can - * be unlinked with no searching in the case of {@code javaStateUnreachable} - * or {@code javaStateReleased}, where the instance to be unlinked is - * already at hand. The list head is of a dummy {@code DualState} subclass. - */ - private static final Map s_scopedInstances = - new HashMap<>(); - - /** Backward link in per-resource-owner list. */ + /** Backward link in per-lifespan list. */ private DualState m_prev; - /** Forward link in per-resource-owner list. */ + /** Forward link in per-lifespan list. */ private DualState m_next; /** @@ -541,27 +528,42 @@ private boolean hasPin(DualState s) /** Thread local record of when the PG thread is invoking callbacks. */ private static final CleanupTracker s_inCleanup = new CleanupTracker(); + /** Flag for cleanup entry/exit from the Java side */ + private static final int CLEAN_JAVA = 1; + + /** Flag for cleanup entry/exit from the native side */ + private static final int CLEAN_NATIVE = 2; + /** Thread local boolean with pairing enter/exit operations. */ - static final class CleanupTracker extends ThreadLocal + static final class CleanupTracker extends ThreadLocal { - boolean enter() + boolean enter(int how) { assert Backend.threadMayEnterPG() : m("inCleanup.enter thread"); - assert ! inCleanup() : m("inCleanup.enter re-entered"); - set(Boolean.TRUE); + int got = get(); + assert got < Integer.MAX_VALUE >>> 2 : + m("inCleanup.enter too many entries"); + set((got << 2) | how); return true; } - boolean exit() + boolean exit(int how) { - assert inCleanup() : m("inCleanup.exit mispaired"); - set(Boolean.FALSE); + int got = get(); + assert (got&3) == how : m("inCleanup.exit mispaired"); + set(got >>> 2); return true; } boolean inCleanup() { - return Boolean.TRUE == get(); + return 0 != get(); + } + + @Override + protected Integer initialValue() + { + return 0; } } @@ -605,23 +607,6 @@ boolean inCleanup() catch ( JMException e ) { /* XXX */ } } - /** - * Pointer value of the {@code ResourceOwner} this instance belongs to, - * if any. - */ - protected final long m_resourceOwner; - - /** - * Check that a cookie is valid, throwing an unchecked exception otherwise. - */ - protected static void checkCookie(Key cookie) - { - assert Backend.threadMayEnterPG(); - if ( ! Key.class.isInstance(cookie) ) - throw new UnsupportedOperationException( - "Operation on DualState instance without cookie"); - } - /** Flag held in lock state showing the native state has been released. */ private static final int NATIVE_RELEASED = 0x80000000; /** Flag held in lock state showing the Java state has been released. */ @@ -653,7 +638,7 @@ protected static void checkCookie(Key cookie) /** * Return the argument; convenient breakpoint target for failed assertions. */ - static T m(T detail) + public static T m(T detail) { return detail; } @@ -667,30 +652,24 @@ static T m(T detail) * some confidence that constructor parameters representing native values * are for real, and also that the construction is taking place on a thread * holding the native lock, keeping the concurrency story simple. - * @param cookie Capability held by native code to invoke {@code DualState} - * constructors. * @param referent The Java object whose state this instance represents. - * @param resourceOwner Pointer value of the native {@code ResourceOwner} + * @param lifespan {@link Lifespan Lifespan} * whose release callback will indicate that this object's native state is - * no longer valid. If zero (a NULL pointer in C), it indicates that the + * no longer valid. If null, it indicates that the * state is held in long-lived native memory (such as JavaMemoryContext), * and can only be released via {@code javaStateUnreachable} or * {@code javaStateReleased}. */ - protected DualState(Key cookie, T referent, long resourceOwner) + protected DualState(T referent, Lifespan lifespan) { super(referent, s_releasedInstances); - checkCookie(cookie); - long scoped = 0L; - m_resourceOwner = resourceOwner; - assert Backend.threadMayEnterPG() : m("DualState construction"); /* * The following stanza publishes 'this' into one of the static data - * structures, for resource-owner-scoped or non-native-scoped instances, + * structures, for lifespan-scoped or non-native-scoped instances, * respectively. That may look like escape of 'this' from an unfinished * constructor, but the structures are private, and only manipulated * during construction and release, always on the thread cleared to @@ -700,15 +679,10 @@ protected DualState(Key cookie, T referent, long resourceOwner) * That will happen after this constructor returns, so the reference is * safely published. */ - if ( 0 != resourceOwner ) + if ( null != lifespan ) { scoped = 1L; - DualState.ListHead head = s_scopedInstances.get(resourceOwner); - if ( null == head ) - { - head = new DualState.ListHead(resourceOwner); - s_scopedInstances.put(resourceOwner, head); - } + ListHead head = (ListHead)lifespan; m_prev = head; m_next = ((DualState)head).m_next; m_prev.m_next = m_next.m_prev = this; @@ -721,25 +695,24 @@ protected DualState(Key cookie, T referent, long resourceOwner) /** * Private constructor only for dummy instances to use as the list heads - * for per-resource-owner lists. + * for per-lifespan lists. */ - private DualState(T referent, long resourceOwner) + private DualState(T referent) { super(referent); // as a WeakReference subclass, must have a referent super.clear(); // but nobody ever said for how long. - m_resourceOwner = resourceOwner; m_prev = m_next = this; m_waiters = null; } /** - * Method that will be called when the associated {@code ResourceOwner} + * Method that will be called when the associated {@code Lifespan} * is released, indicating that the native portion of the state * is no longer valid. The implementing class should clean up * whatever is appropriate to that event. *

    * This object's exclusive {@code lock()} will always be held when this - * method is called during resource owner release. The class whose state + * method is called during lifespan release. The class whose state * this is must use {@link #pin() pin()}, followed by * {@link #unpin() unpin()} in a {@code finally} block, around every * (ideally short) block of code that could refer to the native state. @@ -768,7 +741,7 @@ protected void nativeStateReleased(boolean javaStateLive) * live-instances data structures; that will have been done just before * this method is called. * @param nativeStateLive true is passed if the instance's "native state" is - * still considered live, that is, no resource-owner callback has been + * still considered live, that is, no lifespan callback has been * invoked to stamp it invalid (nor has it been "adopted"). */ protected void javaStateUnreachable(boolean nativeStateLive) @@ -795,7 +768,7 @@ protected void javaStateUnreachable(boolean nativeStateLive) * This default implementation calls {@code javaStateUnreachable}, which, in * typical cases, will have the same cleanup to do. * @param nativeStateLive true is passed if the instance's "native state" is - * still considered live, that is, no resource-owner callback has been + * still considered live, that is, no lifespan callback has been * invoked to stamp it invalid (nor has it been "adopted"). */ protected void javaStateReleased(boolean nativeStateLive) @@ -982,6 +955,49 @@ public final void pin() throws SQLException throw new SQLException(releasedMessage(), releasedSqlState()); } + /** + * Obtains a pin on this state, returning an + * {@link AutoCloseable AutoCloseable} instance that can be used in a + * {@code try}-with resources statement to ensure it is unpinned. + * @throws SQLException if the native state or the Java state has been + * released. + */ + public final Pinned pinned() throws SQLException + { + pin(); + return this::unpin; + } + + /** + * Obtains a pin on this state, returning an + * {@link AutoCloseable AutoCloseable} instance that can be used in a + * {@code try}-with resources statement to ensure it is unpinned. + * @throws IllegalStateException for use where a checked exception is not + * wanted, any resulting SQLException will be wrapped in + * IllegalStateException + */ + public final Pinned pinnedNoChecked() + { + try + { + return pinned(); + } + catch ( SQLException e ) + { + throw new IllegalStateException(e.getMessage(), e); + } + } + + /** + * A subinterface of {@link AutoCloseable AutoCloseable} whose {@code close} + * method throws no checked exceptions. + */ + @FunctionalInterface + public interface Pinned extends AutoCloseable + { + public void close(); + } + /** * Obtain a pin on this state, if it is still valid, blocking if necessary * until release of a lock. @@ -1002,6 +1018,27 @@ public final boolean pinUnlessReleased() return !z(_pin()); } + /** + * Runs r with this state pinned, unless the state has already + * been released, completing normally without running r in that + * case. + */ + public final void unlessReleased( + Checked.Runnable r) + throws E + { + if ( pinUnlessReleased() ) + return; + try + { + r.run(); + } + finally + { + unpin(); + } + } + /** * Workhorse for {@code pin()} and {@code pinUnlessReleased()}. * @return zero if the pin was obtained, otherwise {@code NATIVE_RELEASED}, @@ -1054,8 +1091,9 @@ private final int _pin() * null for most DualState instances, and be 'inflated' by having a * queue installed when first needed. That requires a null check here. */ - if ( null != m_waiters ) - m_waiters.add(thr); + Queue queue = m_waiters; + if ( null != queue ) + queue.add(thr); else { /* @@ -1527,12 +1565,10 @@ protected final void unlock(int s, boolean isNativeRelease) * nor {@code JAVA_RELEASED} flag may be set. This method is non-blocking * and will simply throw an exception if these preconditions are not * satisfied. - * @param cookie Capability held by native code to invoke special - * {@code DualState} methods. */ - protected final void adoptionLock(Key cookie) throws SQLException + protected final void adoptionLock() throws SQLException { - checkCookie(cookie); + assert threadMayEnterPG() : m("adoptionLock thread"); s_mutatorThread = Thread.currentThread(); assert pinnedByCurrentThread() : m("adoptionLock without pin"); int s = 1; // must be: quiescent (our pin only), unreleased @@ -1556,12 +1592,10 @@ protected final void adoptionLock(Key cookie) throws SQLException * and {@code JAVA_RELEASED} flags set. When the calling code releases the * prior pin it was expected to hold, the {@code javaStateReleased} callback * will execute. A value of false will be passed to both callbacks. - * @param cookie Capability held by native code to invoke special - * {@code DualState} methods. */ - protected final void adoptionUnlock(Key cookie) throws SQLException + protected final void adoptionUnlock() throws SQLException { - checkCookie(cookie); + assert threadMayEnterPG() : m("adoptionUnlock thread"); int s = NATIVE_RELEASED | JAVA_RELEASED | MUTATOR_HOLDS | 1 << WAITERS_SHIFT; int t = NATIVE_RELEASED | JAVA_RELEASED | 1; @@ -1647,7 +1681,7 @@ protected String releasedSqlState() /** * Produce a string describing this state object in a way useful for - * debugging, with such information as the associated {@code ResourceOwner} + * debugging, with such information as the associated {@code Lifespan} * and whether the state is fresh or stale. *

    * This method calls {@link #toString(Object)} passing {@code this}. @@ -1684,71 +1718,101 @@ public String toString(Object o) Class c = (null == o ? this : o).getClass(); String cn = c.getCanonicalName(); int pnl = c.getPackageName().length(); - return String.format("%s owner:%x %s", - cn.substring(1 + pnl), m_resourceOwner, + return String.format("%s lifespan:%s %s", + cn.substring(1 + pnl), lifespan(), z((int)s_stateVH.getVolatile(this) & NATIVE_RELEASED) ? "fresh" : "stale"); } /** - * Called only from native code by the {@code ResourceOwner} callback when a - * resource owner is being released. Must identify the live instances that - * have been registered to that owner, if any, and call their + * Return the {@code Lifespan} with which this instance is associated. + *

    + * As it is only needed for infrequent operations like {@code toString}, + * this is implemented simply by walking the circular list of owned objects + * back to the list head. + * @return the owning Lifespan, or null for an unscoped instance + */ + private Lifespan lifespan() + { + if ( this instanceof ListHead ) + return (Lifespan)this; + if ( null == m_prev ) + return null; + for ( DualState t = m_prev; t != this; t = t.m_prev ) + if ( t instanceof ListHead ) + return (Lifespan)t; + throw new AssertionError(m("degenerate owned-object list")); + } + + /** + * Called only on the PG thread when a + * lifespan is being released. Must identify the live instances that + * have been registered to that lifespan, if any, and call their * {@link #nativeStateReleased nativeStateReleased} methods. - * @param resourceOwner Pointer value identifying the resource owner being - * released. Calls can be received for resource owners to which no instances - * here have been registered. + * @param lifespan The lifespan being released, whose implementation must + * extend {@link ListHead ListHead}. Calls can be received for lifespans + * to which no instances here have been registered. *

    * Some state subclasses may have their nativeStateReleased methods called * from Java code, when it is clear the native state is no longer needed in - * Java. That doesn't remove the state instance from s_scopedInstances, + * Java. That doesn't unlink the state instance from its lifespan (if any) * though, so it will still eventually be seen by this loop and efficiently * removed by the iterator. Hence the {@code NATIVE_RELEASED} test, to avoid * invoking nativeStateReleased more than once. */ - private static void resourceOwnerRelease(long resourceOwner) + private static void lifespanRelease(Lifespan lifespan) { long total = 0L, release = 0L; - assert Backend.threadMayEnterPG() : m("resourceOwnerRelease thread"); + assert Backend.threadMayEnterPG() : m("lifespanRelease thread"); - DualState head = s_scopedInstances.remove(resourceOwner); + DualState head = (ListHead)lifespan; if ( null == head ) return; DualState t = head.m_next; - head.m_prev = head.m_next = null; - for ( DualState s = t ; s != head ; s = t ) + head.m_prev = head.m_next = head; + + assert s_inCleanup.enter(CLEAN_NATIVE); //no-op when assertions disabled + try { - t = s.m_next; - s.m_prev = s.m_next = null; - ++ total; - /* - * This lock() is part of DualState's contract with clients. - * They are responsible for pinning the state instance - * whenever they need the wrapped native state (which is verified - * to still be valid at that time) and for the duration of whatever - * operation needs access to that state. Taking this lock here - * ensures the native state is blocked from vanishing while it is - * actively in use. - */ - int state = s.lock(false); - try + for ( DualState s = t ; s != head ; s = t ) { - if ( z(NATIVE_RELEASED & state) ) + t = s.m_next; + s.m_prev = s.m_next = null; + ++ total; + /* + * This lock() is part of DualState's contract with clients. + * They are responsible for pinning the state instance whenever + * they need the wrapped native state (which is verified to + * still be valid at that time) and for the duration of whatever + * operation needs access to that state. Taking this lock here + * ensures the native state is blocked from vanishing while it + * is actively in use. + */ + int state = s.lock(false); + try + { + if ( z(NATIVE_RELEASED & state) ) + { + ++ release; + s.nativeStateReleased( + z(JAVA_RELEASED & state) && null != s.referent()); + } + } + catch ( Throwable x ) { } /* JDK 9 Cleaner ignores exceptions */ + finally { - ++ release; - s.nativeStateReleased( - z(JAVA_RELEASED & state) && null != s.referent()); + s.unlock(state, true);//true->ensure NATIVE_RELEASED is set. } } - finally - { - s.unlock(state, true); // true -> ensure NATIVE_RELEASED is set. - } + } + finally + { + assert s_inCleanup.exit(CLEAN_NATIVE); } - s_stats.resourceOwnerPoll(release, total); + s_stats.lifespanPoll(release, total); } /** @@ -1768,7 +1832,7 @@ private static void cleanEnqueuedInstances() int nDeferred = s_deferredReleased.size(); boolean isDeferred; - assert s_inCleanup.enter(); // no-op when assertions disabled + assert s_inCleanup.enter(CLEAN_JAVA); // no-op when assertions disabled try { for ( ;; ) @@ -1809,7 +1873,7 @@ else if ( z(NATIVE_RELEASED & state) ) } finally { - assert s_inCleanup.exit(); + assert s_inCleanup.exit(CLEAN_JAVA); } s_stats.referenceQueueDrain(total - release, release, total, reDefer); @@ -1817,22 +1881,21 @@ else if ( z(NATIVE_RELEASED & state) ) /** * Remove this instance from the data structure holding it, for scoped - * instances if it has a non-zero resource owner, otherwise for unscoped + * instances if it is linked on a scoped list, otherwise for unscoped * instances. */ private void delist() { assert Backend.threadMayEnterPG() : m("DualState delist thread"); - if ( 0 == m_resourceOwner ) + if ( null == m_next ) { if ( null != s_unscopedInstances.remove(this) ) s_stats.delistUnscoped(); return; } - if ( null == m_prev || null == m_next ) - return; + // m_next is non-null, so m_prev had better be also. if ( this == m_prev.m_next ) m_prev.m_next = m_next; if ( this == m_next.m_prev ) @@ -1842,47 +1905,34 @@ private void delist() } /** - * Magic cookie needed as a constructor parameter to confirm that - * {@code DualState} subclass instances are being constructed from - * native code. - */ - public static final class Key - { - private static boolean constructed = false; - private Key() - { - synchronized ( Key.class ) - { - if ( constructed ) - throw new IllegalStateException("Duplicate DualState.Key"); - constructed = true; - } - } - } - - /** - * Dummy DualState concrete class whose instances only serve as list - * headers in per-resource-owner lists of instances. + * An otherwise nonfunctional DualState subclass whose instances only serve + * as list headers in per-lifespan lists of instances. + *

    + * Implementations of {@link Lifespan Lifespan} extend this. */ - private static class ListHead extends DualState // because why not? + public static abstract class ListHead + extends DualState // because why not? { /** - * Construct a {@code ListHead} instance. As a subclass of - * {@code DualState}, it can't help having a resource owner field, so - * may as well use it to store the resource owner that the list is for, - * in case it's of interest in debugging. - * @param owner The resource owner + * Construct a {@code ListHead} instance. + *

    + * The instance must be a concrete subtype of {@code Lifespan}. */ - private ListHead(long owner) + protected ListHead() { - super("", owner); // An instance needs an object to be its referent + super(""); // An instance needs some object to be its referent + assert this instanceof Lifespan : m( + getClass() + " does not implement Lifespan and may not " + + "extend DualState.ListHead"); } - @Override - public String toString(Object o) + /** + * Walk the chain of objects owned by this lifespan, signaling + * their release from native code. + */ + protected void lifespanRelease() { - return String.format( - "DualState.ListHead for resource owner %x", m_resourceOwner); + DualState.lifespanRelease((Lifespan)this); } } @@ -1902,9 +1952,9 @@ public static abstract class SingleGuardedLong extends DualState private final long m_guardedLong; protected SingleGuardedLong( - Key cookie, T referent, long resourceOwner, long guardedLong) + T referent, Lifespan span, long guardedLong) { - super(cookie, referent, resourceOwner); + super(referent, span); m_guardedLong = guardedLong; } @@ -1933,6 +1983,55 @@ protected final long guardedLong() } } + /** + * A {@code DualState} subclass serving only to guard access to a single + * nonnull {@code ByteBuffer} value. + *

    + * Nothing in particular is done to the native resource at the time of + * {@code javaStateReleased} or {@code javaStateUnreachable}; if it is + * subject to reclamation, this class assumes it will be shortly, in the + * normal operation of the native code. This can be appropriate for native + * state that was set up by a native caller for a short lifetime, such as a + * single function invocation. + */ + public static abstract class SingleGuardedBB extends DualState + { + private final ByteBuffer m_guardedBuffer; + + protected SingleGuardedBB( + T referent, Lifespan span, ByteBuffer guardedBuffer) + { + super(referent, span); + m_guardedBuffer = requireNonNull(guardedBuffer); + assert guardedBuffer.isDirect() : "GuardedBB is not direct"; + } + + @Override + public String toString(Object o) + { + return + String.format( + formatString(), super.toString(o), m_guardedBuffer); + } + + /** + * Return a {@code printf} format string resembling + * {@code "%s something(%s)"} where the second {@code %s} will be + * the value being guarded; the "something" should indicate what the + * value represents, or what will be done with it when released by Java. + */ + protected String formatString() + { + return "%s GuardedBB(%s)"; + } + + protected final ByteBuffer guardedBuffer() + { + assert pinnedByCurrentThread() : m("guardedBuffer() without pin"); + return m_guardedBuffer; + } + } + /** * A {@code DualState} subclass whose only native resource releasing action * needed is {@code pfree} of a single pointer. @@ -1940,9 +2039,9 @@ protected final long guardedLong() public static abstract class SinglePfree extends SingleGuardedLong { protected SinglePfree( - Key cookie, T referent, long resourceOwner, long pfreeTarget) + T referent, Lifespan span, long pfreeTarget) { - super(cookie, referent, resourceOwner, pfreeTarget); + super(referent, span, pfreeTarget); } @Override @@ -1978,18 +2077,33 @@ protected void javaStateUnreachable(boolean nativeStateLive) * native code is responsible for whatever happens to it next. */ public static abstract class SingleMemContextDelete - extends SingleGuardedLong + extends DualState { + private final MemoryContext m_context; + protected SingleMemContextDelete( - Key cookie, T referent, long resourceOwner, long memoryContext) + T referent, Lifespan span, MemoryContext cxt) { - super(cookie, referent, resourceOwner, memoryContext); + super(referent, span); + m_context = cxt; } @Override + public String toString(Object o) + { + return + String.format(formatString(), super.toString(o), m_context); + } + public String formatString() { - return "%s MemoryContextDelete(%x)"; + return "%s MemoryContextDelete(%s)"; + } + + protected final MemoryContext memoryContext() + { + assert pinnedByCurrentThread() : m("memoryContext() without pin"); + return m_context; } /** @@ -2003,7 +2117,7 @@ protected void javaStateUnreachable(boolean nativeStateLive) { assert Backend.threadMayEnterPG(); if ( nativeStateLive ) - _memContextDelete(guardedLong()); + _memContextDelete(((Addressed)memoryContext()).address()); } private native void _memContextDelete(long pointer); @@ -2017,9 +2131,9 @@ public static abstract class SingleFreeTupleDesc extends SingleGuardedLong { protected SingleFreeTupleDesc( - Key cookie, T referent, long resourceOwner, long ftdTarget) + T referent, Lifespan span, long ftdTarget) { - super(cookie, referent, resourceOwner, ftdTarget); + super(referent, span, ftdTarget); } @Override @@ -2053,9 +2167,9 @@ public static abstract class SingleHeapFreeTuple extends SingleGuardedLong { protected SingleHeapFreeTuple( - Key cookie, T referent, long resourceOwner, long hftTarget) + T referent, Lifespan span, long hftTarget) { - super(cookie, referent, resourceOwner, hftTarget); + super(referent, span, hftTarget); } @Override @@ -2089,9 +2203,9 @@ public static abstract class SingleFreeErrorData extends SingleGuardedLong { protected SingleFreeErrorData( - Key cookie, T referent, long resourceOwner, long fedTarget) + T referent, Lifespan span, long fedTarget) { - super(cookie, referent, resourceOwner, fedTarget); + super(referent, span, fedTarget); } @Override @@ -2117,6 +2231,56 @@ protected void javaStateUnreachable(boolean nativeStateLive) private native void _freeErrorData(long pointer); } + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code SPI_freetuptable} of a single pointer. + *

    + * Note: for now, the tuptable is freed only on {@code javaStateReleased}. + * It turns out that {@code SPI_freetuptable}, since PG 9.4 (and backpatched + * releases of 9.3) has contained code to raise a warning if the tuple table + * being released does not belong to the current SPI connection. + * When this class earlier did the free in {@code javaStateUnreachable}, + * that warning could be triggered on rare and irksome occasions if Java's + * GC happened to find, during a nested invocation of some Java function, + * that a tuple table from an outer invocation had become unreachable. + *

    + * It would be conceivable to have {@code javaStateUnreachable} try to + * determine if the current nest level matches that of the tuple table's + * creation, and free it if so at least, otherwise leaking it to the + * exit of the outer call. But for now it's also conceivable to just have it + * do nothing, and let the context reset at invocation exit mop it up. + */ + public static abstract class SingleSPIfreetuptable + extends SingleGuardedLong + { + protected SingleSPIfreetuptable( + T referent, Lifespan span, long fttTarget) + { + super(referent, span, fttTarget); + } + + @Override + public String formatString() + { + return "%s SPI_freetuptable(%x)"; + } + + /** + * When the Java state is released, an {@code SPI_freetuptable} + * call is made so the native memory is released without having to wait + * for release of its containing context. + */ + @Override + protected void javaStateReleased(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _spiFreeTupTable(guardedLong()); + } + + private native void _spiFreeTupTable(long pointer); + } + /** * A {@code DualState} subclass whose only native resource releasing action * needed is {@code SPI_freeplan} of a single pointer. @@ -2125,9 +2289,9 @@ public static abstract class SingleSPIfreeplan extends SingleGuardedLong { protected SingleSPIfreeplan( - Key cookie, T referent, long resourceOwner, long fpTarget) + T referent, Lifespan span, long fpTarget) { - super(cookie, referent, resourceOwner, fpTarget); + super(referent, span, fpTarget); } @Override @@ -2161,9 +2325,9 @@ public static abstract class SingleSPIcursorClose extends SingleGuardedLong { protected SingleSPIcursorClose( - Key cookie, T referent, long resourceOwner, long ccTarget) + T referent, Lifespan span, long ccTarget) { - super(cookie, referent, resourceOwner, ccTarget); + super(referent, span, ccTarget); } @Override @@ -2202,6 +2366,91 @@ protected void javaStateUnreachable(boolean nativeStateLive) private native void _spiCursorClose(long pointer); } + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code heap_freetuple} of the address of a direct byte buffer. + */ + public static abstract class BBHeapFreeTuple + extends SingleGuardedBB + { + protected BBHeapFreeTuple( + T referent, Lifespan span, ByteBuffer hftTarget) + { + super(referent, span, hftTarget); + } + + @Override + public String formatString() + { + return"%s heap_freetuple(%s)"; + } + + /** + * When the Java state is released or unreachable, a + * {@code heap_freetuple} + * call is made so the native memory is released without having to wait + * for release of its containing context. + */ + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _heapFreeTuple(guardedBuffer()); + } + + private native void _heapFreeTuple(ByteBuffer tuple); + } + + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is a JNI {@code DeleteGlobalRef} of a single pointer. + */ + public static abstract class SingleDeleteGlobalRefP + extends SingleGuardedLong + { + protected SingleDeleteGlobalRefP( + T referent, Lifespan span, long dgrTarget) + { + super(referent, span, dgrTarget); + } + + @Override + public String formatString() + { + return "%s DeleteGlobalRef(%x)"; + } + + /** + * When the Java state is released (it won't normally go unreachable, + * because of the global ref), a JNI {@code DeleteGlobalRef} call + * is made so the instance can be collected without having to wait + * for release of its containing context. + */ + @Override + protected void javaStateReleased(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _deleteGlobalRefP(guardedLong()); + } + + /** + * When the native state is released, a JNI {@code DeleteGlobalRef} call + * is made so the global ref stored in that to-be-released memory isn't + * leaked (left permanently live). + */ + @Override + protected void nativeStateReleased(boolean javaStateLive) + { + assert Backend.threadMayEnterPG(); + if ( javaStateLive ) + _deleteGlobalRefP(guardedLong()); + } + + private native void _deleteGlobalRefP(long pointer); + } + /** * Bean exposing some {@code DualState} allocation and lifecycle statistics * for viewing in a JMX management client. @@ -2248,9 +2497,9 @@ public long getNativeReleased() return nativeReleased.sum(); } - public long getResourceOwnerPasses() + public long getLifespanPasses() { - return resourceOwnerPasses.sum(); + return lifespanPasses.sum(); } public long getReferenceQueuePasses() @@ -2297,7 +2546,7 @@ public long getReleaseReleaseRaces() private LongAdder javaUnreachable = new LongAdder(); private LongAdder javaReleased = new LongAdder(); private LongAdder nativeReleased = new LongAdder(); - private LongAdder resourceOwnerPasses = new LongAdder(); + private LongAdder lifespanPasses = new LongAdder(); private LongAdder referenceQueuePasses = new LongAdder(); private LongAdder referenceQueueItems = new LongAdder(); private LongAdder contendedLocks = new LongAdder(); @@ -2313,9 +2562,9 @@ final void construct(long scoped) enlistedUnscoped.add(1L - scoped); } - final void resourceOwnerPoll(long released, long total) + final void lifespanPoll(long released, long total) { - resourceOwnerPasses.increment(); + lifespanPasses.increment(); nativeReleased.add(released); delistedScoped.add(total); } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/EntryPoints.java b/pljava/src/main/java/org/postgresql/pljava/internal/EntryPoints.java index 82f042795..21c56a964 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/EntryPoints.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/EntryPoints.java @@ -16,7 +16,9 @@ import static java.lang.invoke.MethodType.methodType; import java.security.AccessControlContext; +import java.security.AccessController; import static java.security.AccessController.doPrivileged; +import java.security.Permission; import java.security.PrivilegedAction; import java.sql.SQLData; @@ -28,6 +30,8 @@ import static java.util.Objects.requireNonNull; +import java.util.function.Consumer; + import org.postgresql.pljava.internal.UncheckedException; import static org.postgresql.pljava.internal.UncheckedException.unchecked; @@ -374,6 +378,22 @@ static Class loadAndInitWithACC( return doPrivilegedAndUnwrap(action, acc); } + /** + * Returns a permission checker for use when enforcing. + *

    + * With JEP 486, Java essentially becomes everything-is-allowed, except + * where user code makes its own permission checks. JEP 486 changed + * {@link AccessController#checkPermission(Permission) checkPermission} + * to unconditionally throw an exception. So checking has to be abstracted, + * using the {@code Consumer} returned by this method when the Java runtime + * supports it, otherwise a different {@code Consumer} that does something + * else. + */ + static Consumer permissionChecker() + { + return AccessController::checkPermission; + } + /** * A class carrying a payload of some kind and an access control context * to impose when it is invoked. diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ErrorData.java b/pljava/src/main/java/org/postgresql/pljava/internal/ErrorData.java index e667d23f8..e0cc20e29 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ErrorData.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ErrorData.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -27,18 +27,24 @@ public class ErrorData { private final State m_state; - ErrorData(DualState.Key cookie, long resourceOwner, long pointer) + ErrorData(long pointer) { - m_state = new State(cookie, this, resourceOwner, pointer); + m_state = new State(this, pointer); } private static class State extends DualState.SingleFreeErrorData { - private State( - DualState.Key cookie, ErrorData ed, long ro, long ht) + private State(ErrorData ed, long ht) { - super(cookie, ed, ro, ht); + /* + * Passing null as the Lifespan means this will never be + * matched by a lifespanRelease call; that's appropriate (for now) as + * the ErrorData copy is being made into JavaMemoryContext, which + * never gets reset, so only unreachability from the Java side + * will free it. + */ + super(ed, null, ht); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ExecutionPlan.java b/pljava/src/main/java/org/postgresql/pljava/internal/ExecutionPlan.java index 86ac4c418..0b120e283 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ExecutionPlan.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ExecutionPlan.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -79,10 +79,10 @@ public class ExecutionPlan private static class State extends DualState.SingleSPIfreeplan { - private State( - DualState.Key cookie, ExecutionPlan jep, long ro, long ep) + private State(ExecutionPlan jep, long ep) { - super(cookie, jep, ro, ep); + /* null as Lifespan: the saved plan isn't transient */ + super(jep, null, ep); } /** @@ -145,6 +145,10 @@ protected boolean removeEldestEntry( } }; + /** + * The actual (but treated as opaque {@code Object} key used in the plan + * cache. + */ static final class PlanKey { private final int m_hashCode; @@ -200,11 +204,10 @@ public int hashCode() : cacheSize)); } - private ExecutionPlan(DualState.Key cookie, long resourceOwner, - Object planKey, long spiPlan) + private ExecutionPlan(Object planKey, long spiPlan) { m_key = planKey; - m_state = new State(cookie, this, resourceOwner, spiPlan); + m_state = new State(this, spiPlan); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Function.java b/pljava/src/main/java/org/postgresql/pljava/internal/Function.java index 83ba3ba05..2ae06a539 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Function.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Function.java @@ -37,11 +37,6 @@ import java.lang.invoke.WrongMethodTypeException; import java.lang.reflect.Array; -import java.lang.reflect.Method; -import java.lang.reflect.GenericDeclaration; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.lang.reflect.TypeVariable; import java.nio.ByteBuffer; import java.nio.ByteOrder; @@ -1923,146 +1918,6 @@ private static String getAS(ResultSet procTup) throws SQLException "(" + javaTypeName + ")(" + arrayDims + ")?+" ); - /** - * Test whether the type {@code t0} is, directly or indirectly, - * a specialization of generic type {@code c0}. - * @param t0 a type to be checked - * @param c0 known generic type to check for - * @return null if {@code t0} does not extend {@code c0}, otherwise the - * array of type arguments with which it specializes {@code c0} - */ - private static Type[] specialization(Type t0, Class c0) - { - Type t = t0; - Class c; - ParameterizedType pt = null; - TypeBindings latestBindings = null; - Type[] actualArgs = null; - - if ( t instanceof Class ) - { - c = (Class)t; - if ( ! c0.isAssignableFrom(c) ) - return null; - if ( c0 == c ) - return new Type[0]; - } - else if ( t instanceof ParameterizedType ) - { - pt = (ParameterizedType)t; - c = (Class)pt.getRawType(); - if ( ! c0.isAssignableFrom(c) ) - return null; - if ( c0 == c ) - actualArgs = pt.getActualTypeArguments(); - else - latestBindings = new TypeBindings(null, pt); - } - else - throw new AssertionError( - "expected Class or ParameterizedType, got: " + t); - - if ( null == actualArgs ) - { - List pending = new LinkedList<>(); - pending.add(c.getGenericSuperclass()); - addAll(pending, c.getGenericInterfaces()); - - while ( ! pending.isEmpty() ) - { - t = pending.remove(0); - if ( null == t ) - continue; - if ( t instanceof Class ) - { - c = (Class)t; - if ( c0 == c ) - return new Type[0]; - } - else if ( t instanceof ParameterizedType ) - { - pt = (ParameterizedType)t; - c = (Class)pt.getRawType(); - if ( c0 == c ) - { - actualArgs = pt.getActualTypeArguments(); - break; - } - if ( c0.isAssignableFrom(c) ) - pending.add(new TypeBindings(latestBindings, pt)); - } - else if ( t instanceof TypeBindings ) - { - latestBindings = (TypeBindings)t; - continue; - } - else - throw new AssertionError( - "expected Class or ParameterizedType, got: " + t); - if ( ! c0.isAssignableFrom(c) ) - continue; - pending.add(c.getGenericSuperclass()); - addAll(pending, c.getGenericInterfaces()); - } - } - if ( null == actualArgs ) - throw new AssertionError( - "failed checking whether " + t0 + " specializes " + c0); - - for ( int i = 0; i < actualArgs.length; ++ i ) - if ( actualArgs[i] instanceof TypeVariable ) - actualArgs[i] = - latestBindings.resolve((TypeVariable)actualArgs[i]); - - return actualArgs; - } - - /** - * A class recording the bindings made in a ParameterizedType to the type - * parameters in a {@code GenericDeclaration}. Implements - * {@code Type} so it can be added to the {@code pending} queue in - * {@code specialization}. - *

    - * In {@code specialization}, the tree of superclasses/superinterfaces will - * be searched breadth-first, with all of a node's immediate supers enqueued - * before any from the next level. By recording a node's type variable to - * type argument bindings in an object of this class, and enqueueing it - * before any of the node's supers, any type variables encountered as actual - * type arguments to any of those supers should be resolvable in the object - * of this class most recently dequeued. - */ - static class TypeBindings implements Type - { - private final TypeVariable[] formalTypeParams; - private final Type[] actualTypeArgs; - - TypeBindings(TypeBindings prior, ParameterizedType pt) - { - actualTypeArgs = pt.getActualTypeArguments(); - formalTypeParams = - ((GenericDeclaration)pt.getRawType()).getTypeParameters(); - assert actualTypeArgs.length == formalTypeParams.length; - - if ( null == prior ) - return; - - for ( int i = 0; i < actualTypeArgs.length; ++ i ) - { - Type t = actualTypeArgs[i]; - if ( actualTypeArgs[i] instanceof TypeVariable ) - actualTypeArgs[i] = prior.resolve((TypeVariable)t); - } - } - - Type resolve(TypeVariable v) - { - for ( int i = 0; i < formalTypeParams.length; ++ i ) - if ( formalTypeParams[i].equals(v) ) - return actualTypeArgs[i]; - throw new AssertionError("type binding not found for " + v); - } - } - /** * Wrap the native method to store the values computed in Java, for a * non-UDT function, into the C {@code Function} structure. Returns an array diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java b/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java index a235d864a..9ee90f885 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java @@ -46,6 +46,7 @@ import static org.postgresql.pljava.annotation.processing.DDRWriter.eQuote; import static org.postgresql.pljava.elog.ELogHandler.LOG_WARNING; import static org.postgresql.pljava.internal.Backend.WITHOUT_ENFORCEMENT; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; import static org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; /** @@ -130,27 +131,22 @@ public static String hello( */ setPropertyIfNull( "sqlj.defaultconnection", "jdbc:default:connection"); - String encodingKey = "org.postgresql.server.encoding"; - String encName = System.getProperty(encodingKey); - if ( null == encName ) - encName = Backend.getConfigOption( "server_encoding"); - try - { - Charset cs = Charset.forName(encName); - org.postgresql.pljava.internal.Session.s_serverCharset = cs; // poke - System.setProperty(encodingKey, cs.name()); - } - catch ( IllegalArgumentException iae ) - { - System.clearProperty(encodingKey); - } + SERVER_ENCODING.charset(); // this must be set before beginEnforcing() + + /* so they can be granted permissions in the pljava policy */ + System.setProperty( "org.postgresql.pljava.codesource", + InstallHelper.class.getProtectionDomain().getCodeSource() + .getLocation().toString()); - /* so it can be granted permissions in the pljava policy */ if ( ! WITHOUT_ENFORCEMENT ) { + /* so they can be granted permissions in the pljava policy */ System.setProperty( "org.postgresql.pljava.codesource", InstallHelper.class.getProtectionDomain().getCodeSource() .getLocation().toString()); + System.setProperty( "org.postgresql.pljava.codesource.api", + Simple.class.getProtectionDomain().getCodeSource() + .getLocation().toString()); setPolicyURLs(); } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/Invocation.java b/pljava/src/main/java/org/postgresql/pljava/internal/Invocation.java similarity index 69% rename from pljava/src/main/java/org/postgresql/pljava/jdbc/Invocation.java rename to pljava/src/main/java/org/postgresql/pljava/internal/Invocation.java index 88301d0be..b746cc33f 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/Invocation.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Invocation.java @@ -10,18 +10,25 @@ * Tada AB * Chapman Flack */ -package org.postgresql.pljava.jdbc; +package org.postgresql.pljava.internal; + +import java.lang.annotation.Native; + +import static java.lang.Integer.highestOneBit; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; import java.sql.PreparedStatement; import java.sql.SQLException; -import java.util.ArrayList; import java.util.logging.Logger; -import org.postgresql.pljava.internal.Backend; import static org.postgresql.pljava.internal.Backend.doInPG; -import org.postgresql.pljava.internal.PgSavepoint; -import org.postgresql.pljava.internal.ServerException; // for javadoc -import org.postgresql.pljava.internal.UnhandledPGException; // for javadoc + +import org.postgresql.pljava.model.MemoryContext; + +import static org.postgresql.pljava.pg.DatumUtils.fetchPointer; +import org.postgresql.pljava.pg.MemoryContextImpl; /** * One invocation, from PostgreSQL, of functionality implemented using PL/Java. @@ -31,17 +38,18 @@ * from PG to PL/Java, no instance of this class is created unless requested * (with {@link #current current()}; once requested, a reference to it is saved * in the C struct for the duration of the invocation. - *

    - * One further piece of magic applies to set-returning functions. Under the - * value-per-call protocol, there is technically a new entry into PL/Java, and - * a new C {@code Invocation_} struct, for every row to be returned, but that - * low-level complication is hidden at this level: a single instance of this - * class, if once requested, will be remembered throughout the value-per-call - * sequence of calls. * @author Thomas Hallgren */ -public class Invocation +public class Invocation extends LifespanImpl { + @Native private static final int OFFSET_nestLevel = 0; + @Native private static final int OFFSET_hasDual = 4; + @Native private static final int OFFSET_errorOccurred = 5; + @Native private static final int OFFSET_upperContext = 8; + + private static final ByteBuffer s_window = + EarlyNatives._window().order(nativeOrder()); + /** * The current "stack" of invocations. */ @@ -97,10 +105,10 @@ public class Invocation * this exception can be obtained from {@code popInvocation} by bumping the * level to {@code DEBUG2}. *

    - * Public access so factory methods of {@code ServerException} and - * {@code UnhandledPGException}, in another package, can access it. + * Package access so factory methods of {@code ServerException} and + * {@code UnhandledPGException} can access it. */ - public static SQLException s_unhandled; + static SQLException s_unhandled; /** * Nesting level for this invocation @@ -128,7 +136,7 @@ public int getNestingLevel() /** * @return Returns the savePoint. */ - final PgSavepoint getSavepoint() + public final PgSavepoint getSavepoint() { return m_savepoint; } @@ -136,16 +144,16 @@ final PgSavepoint getSavepoint() /** * @param savepoint The savepoint to set. */ - final void setSavepoint(PgSavepoint savepoint) + public final void setSavepoint(PgSavepoint savepoint) { m_savepoint = savepoint; } /** - * Called from the backend when the invokation exits. Should - * not be invoked any other way. + * Called only from the static {@code onExit} below when the invocation + * is popped; should not be invoked any other way. */ - public void onExit(boolean withError) + private void onExit(boolean withError) throws SQLException { try @@ -155,10 +163,22 @@ public void onExit(boolean withError) } finally { - s_levels[m_nestingLevel] = null; + m_savepoint = null; + lifespanRelease(); } } + /** + * The actual entry point from JNI, which passes a valid nestLevel. + *

    + * Forwards to the instance method at the corresponding level. + */ + private static void onExit(int nestLevel, boolean withError) + throws SQLException + { + s_levels[nestLevel].onExit(withError); + } + /** * @return The current invocation */ @@ -166,63 +186,50 @@ public static Invocation current() { return doInPG(() -> { - Invocation curr = _getCurrent(); - if(curr != null) - return curr; - - int level = _getNestingLevel(); + Invocation curr; + int level = s_window.getInt(OFFSET_nestLevel); int top = s_levels.length; - if(level < top) - { - curr = s_levels[level]; - if(curr != null) - { - curr._register(); - return curr; - } - } - else + + if(level >= top) { - int newSize = top; - do { newSize <<= 2; } while(newSize <= level); + int newSize = highestOneBit(level) << 1; Invocation[] levels = new Invocation[newSize]; System.arraycopy(s_levels, 0, levels, 0, top); s_levels = levels; } - curr = new Invocation(level); - s_levels[level] = curr; - curr._register(); + + curr = s_levels[level]; + if ( null == curr ) + s_levels[level] = curr = new Invocation(level); + + s_window.put(OFFSET_hasDual, (byte)1); return curr; }); } - static void clearErrorCondition() + /** + * The "upper executor" memory context (that is, the context on entry, prior + * to any {@code SPI_connect}) associated with the current (innermost) + * invocation. + */ + public static MemoryContext upperExecutorContext() + { + return + doInPG(() -> MemoryContextImpl.fromAddress( + fetchPointer(s_window, OFFSET_upperContext))); + } + + public static void clearErrorCondition() { doInPG(() -> { s_unhandled = null; - _clearErrorCondition(); + s_window.put(OFFSET_errorOccurred, (byte)0); }); } - /** - * Register this Invocation so that it receives the onExit callback - */ - private native void _register(); - - /** - * Returns the current invocation or null if no invocation has been - * registered yet. - */ - private native static Invocation _getCurrent(); - - /** - * Returns the current nesting level - */ - private native static int _getNestingLevel(); - - /** - * Clears the error condition set by elog(ERROR) - */ - private native static void _clearErrorCondition(); + private static class EarlyNatives + { + private static native ByteBuffer _window(); + } } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/LifespanImpl.java b/pljava/src/main/java/org/postgresql/pljava/internal/LifespanImpl.java new file mode 100644 index 000000000..ee3061445 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/LifespanImpl.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.lang.ref.Reference; // for javadoc + +import org.postgresql.pljava.Lifespan; + +import org.postgresql.pljava.internal.DualState; + +/** + * Implements PL/Java's generalized notion of lifespans. + *

    + * Subclasses are likely to maintain cache mappings from addresses of PostgreSQL + * native objects to instances. Such mappings must hold strong references to the + * instances, because any {@code LifespanImpl} instance can serve as the + * head of a list of {@code DualState} objects, which are + * {@link Reference Reference} instances, and the Java runtime will cease + * tracking those if they themselves are not kept strongly reachable. This + * requirement is acceptable, because all instances represent bounded lifespans + * that end with explicit invalidation and decaching; that's what they're for, + * after all. + */ +public class LifespanImpl extends DualState.ListHead implements Lifespan +{ + /** + * Interface to be additionally implemented by + * a {@link LifespanImpl LifespanImpl} with an associated native address + * that may be needed during release handling. + */ + public interface Addressed + { + long address(); + } + + /** + * Overrides the version provided by {@code DualState} to simply call + * the niladic {@code toString}, as a resource owner isn't directly + * associated with another object the way a {@code DualState} instance + * generally is. + */ + @Override + public String toString(Object o) + { + assert null == o || this == o; + return toString(); + } + + @Override + public String toString() + { + Class c = getClass(); + String cn = c.getCanonicalName(); + int pnl = c.getPackageName().length(); + return cn.substring(1 + pnl); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/PgSavepoint.java b/pljava/src/main/java/org/postgresql/pljava/internal/PgSavepoint.java index cbe5e8e6c..5cca9d889 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/PgSavepoint.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/PgSavepoint.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -13,6 +13,10 @@ package org.postgresql.pljava.internal; import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.LifespanImpl; + +import org.postgresql.pljava.pg.MemoryContextImpl; +import org.postgresql.pljava.pg.ResourceOwnerImpl; import java.sql.Connection; import java.sql.SQLException; @@ -71,6 +75,28 @@ private static void forgetNestLevelsGE(int nestLevel) */ private final String m_name; + /* + * Not especially well documented upstream, but following the example of + * plpgsql/perl/python, the current memory context must be saved before + * calling BeginInternalSubTransaction, and then reimposed afterward, and + * reimposed again later after release or rollback-and-release. During the + * subtransaction, its associated context will of course be available as + * CurTransactionMemoryContext, but we will avoid surprising the caller + * with changes to CurrentMemoryContext. + */ + private final long m_priorMemoryContext; + + /* + * Not especially well documented upstream, but following the example of + * plpgsql/perl/python, the current resource owner must be saved before + * calling BeginInternalSubTransaction, and then reimposed later, after + * release or rollback-and-release. Unlike the memory context, the owner is + * not reimposed immediately after entering the subtransaction, so the newly + * established owner is the CurrentResourceOwner during the subtransaction, + * and the former one is reimposed only at its (normal or abnormal) end. + */ + private final long m_priorResourceOwner; + /* * The transaction ID assigned during BeginInternalSubTransaction is really * the identifier that matters. An instance will briefly have the default @@ -119,9 +145,12 @@ private static void forgetNestLevelsGE(int nestLevel) */ private static PgSavepoint s_nursery; - private PgSavepoint(String name) + private PgSavepoint( + String name, long priorMemoryContext, long priorResourceOwner) { m_name = name; + m_priorMemoryContext = priorMemoryContext; + m_priorResourceOwner = priorResourceOwner; } /** @@ -135,7 +164,9 @@ public static PgSavepoint set(String name) { return doInPG(() -> { - PgSavepoint sp = new PgSavepoint(name); + long mc = MemoryContextImpl.getCurrentRaw(); + long ro = ResourceOwnerImpl.getCurrentRaw(); + PgSavepoint sp = new PgSavepoint(name, mc, ro); s_nursery = sp; try { @@ -148,6 +179,7 @@ public static PgSavepoint set(String name) finally { s_nursery = null; + MemoryContextImpl.setCurrentRaw(mc); } s_knownSavepoints.put(sp, Boolean.TRUE); return sp; @@ -165,6 +197,7 @@ static PgSavepoint forId(int savepointId) PgSavepoint sp = s_nursery; sp.m_xactId = savepointId; s_nursery = null; + MemoryContextImpl.setCurrentRaw(sp.m_priorMemoryContext); return sp; } for ( PgSavepoint sp : s_knownSavepoints.keySet() ) @@ -204,6 +237,8 @@ public void release() " that is no longer valid", "3B001"); _release(m_xactId, m_nestLevel); + MemoryContextImpl.setCurrentRaw(m_priorMemoryContext); + ResourceOwnerImpl.setCurrentRaw(m_priorResourceOwner); forgetNestLevelsGE(m_nestLevel); }); } @@ -230,6 +265,8 @@ public void rollback() " that is no longer valid", "3B001"); _rollback(m_xactId, m_nestLevel); + MemoryContextImpl.setCurrentRaw(m_priorMemoryContext); + ResourceOwnerImpl.setCurrentRaw(m_priorResourceOwner); /* Forget only more-deeply-nested savepoints, NOT this one */ forgetNestLevelsGE(1 + m_nestLevel); @@ -248,6 +285,7 @@ public void rollback() finally { s_nursery = null; + MemoryContextImpl.setCurrentRaw(m_priorMemoryContext); } }); } @@ -286,6 +324,8 @@ public void onInvocationExit(boolean withError) */ _release(m_xactId, m_nestLevel); forgetNestLevelsGE(m_nestLevel); + MemoryContextImpl.setCurrentRaw(m_priorMemoryContext); + ResourceOwnerImpl.setCurrentRaw(m_priorResourceOwner); } else { @@ -299,6 +339,8 @@ public void onInvocationExit(boolean withError) */ _rollback(m_xactId, m_nestLevel); forgetNestLevelsGE(m_nestLevel); + MemoryContextImpl.setCurrentRaw(m_priorMemoryContext); + ResourceOwnerImpl.setCurrentRaw(m_priorResourceOwner); } } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Portal.java b/pljava/src/main/java/org/postgresql/pljava/internal/Portal.java index 363f2bb1b..476906826 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Portal.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Portal.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2024 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -12,24 +12,60 @@ */ package org.postgresql.pljava.internal; -import org.postgresql.pljava.internal.SPI; // for javadoc import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.SPI; + +import org.postgresql.pljava.Lifespan; + +import org.postgresql.pljava.model.MemoryContext; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import org.postgresql.pljava.pg.MemoryContextImpl; +import static org.postgresql.pljava.pg.MemoryContextImpl.allocatingIn; +import org.postgresql.pljava.pg.ResourceOwnerImpl; +import org.postgresql.pljava.pg.TupleTableSlotImpl; import java.sql.SQLException; +import java.util.List; + /** * The Portal correspons to the internal PostgreSQL * Portal type. * * @author Thomas Hallgren */ -public class Portal +public class Portal implements org.postgresql.pljava.model.Portal { + private TupleDescriptor m_tupdesc; + + private TupleTableSlotImpl m_slot; + private final State m_state; - Portal(DualState.Key cookie, long ro, long pointer, ExecutionPlan plan) + private final MemoryContext m_context; + + private static final int FETCH_FORWARD = 0; + private static final int FETCH_BACKWARD = 1; + private static final int FETCH_ABSOLUTE = 2; + private static final int FETCH_RELATIVE = 3; + private static final long FETCH_ALL = ALL; + + static + { + assert FETCH_FORWARD == Direction.FORWARD .ordinal(); + assert FETCH_BACKWARD == Direction.BACKWARD.ordinal(); + assert FETCH_ABSOLUTE == Direction.ABSOLUTE.ordinal(); + assert FETCH_RELATIVE == Direction.RELATIVE.ordinal(); + } + + Portal(long ro, long cxt, long pointer, ExecutionPlan plan) { - m_state = new State(cookie, this, ro, pointer, plan); + m_state = + new State(this, ResourceOwnerImpl.fromAddress(ro), pointer, plan); + m_context = MemoryContextImpl.fromAddress(cxt); } private static class State @@ -43,10 +79,9 @@ private static class State private ExecutionPlan m_plan; private State( - DualState.Key cookie, Portal referent, long ro, long portal, - ExecutionPlan plan) + Portal referent, Lifespan span, long portal, ExecutionPlan plan) { - super(cookie, referent, ro, portal); + super(referent, span, portal); m_plan = plan; } @@ -93,6 +128,78 @@ protected void javaStateReleased(boolean nativeStateLive) public void close() { m_state.releaseFromJava(); + m_tupdesc = null; + m_slot = null; + } + + /** + * Returns the {@link TupleDescriptor} that describes the row tuples for + * this {@code Portal}. + * @throws SQLException if the handle to the native structure is stale. + */ + @Override + public TupleDescriptor tupleDescriptor() + throws SQLException + { + return doInPG(() -> + { + if ( null == m_tupdesc ) + m_tupdesc = _getTupleDescriptor(m_state.getPortalPtr()); + return m_tupdesc; + }); + } + + private TupleTableSlotImpl slot() throws SQLException + { + assert threadMayEnterPG(); // only call slot() on PG thread + if ( null == m_slot ) + { + try ( Checked.AutoCloseable ac = + allocatingIn(m_context) ) + { + m_slot = _makeTupleTableSlot( + m_state.getPortalPtr(), tupleDescriptor()); + } + } + return m_slot; + } + + @Override + public List fetch(Direction dir, long count) + throws SQLException + { + boolean forward; + switch ( dir ) + { + case FORWARD : forward = true ; break; + case BACKWARD: forward = false; break; + default: + throw new UnsupportedOperationException( + dir + " Portal mode not yet supported"); + } + + return doInPG(() -> + { + fetch(forward, count); // for now; it's already implemented + return SPI.getTuples(slot()); + }); + } + + @Override + public long move(Direction dir, long count) + throws SQLException + { + boolean forward; + switch ( dir ) + { + case FORWARD : forward = true ; break; + case BACKWARD: forward = false; break; + default: + throw new UnsupportedOperationException( + dir + " Portal mode not yet supported"); + } + + return move(forward, count); // for now; it's already implemented } /** @@ -193,6 +300,13 @@ public long move(boolean forward, long count) return moved; } + private static native TupleDescriptor _getTupleDescriptor(long pointer) + throws SQLException; + + private static native TupleTableSlotImpl + _makeTupleTableSlot(long pointer, TupleDescriptor td) + throws SQLException; + private static native String _getName(long pointer) throws SQLException; diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Relation.java b/pljava/src/main/java/org/postgresql/pljava/internal/Relation.java index f405ed92c..4860d88d4 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Relation.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Relation.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -13,6 +13,7 @@ package org.postgresql.pljava.internal; import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.Invocation; import java.sql.SQLException; @@ -27,18 +28,17 @@ public class Relation private TupleDesc m_tupleDesc; private final State m_state; - Relation(DualState.Key cookie, long resourceOwner, long pointer) + Relation(long pointer) { - m_state = new State(cookie, this, resourceOwner, pointer); + m_state = new State(this, pointer); } private static class State extends DualState.SingleGuardedLong { - private State( - DualState.Key cookie, Relation r, long ro, long hth) + private State(Relation r, long hth) { - super(cookie, r, ro, hth); + super(r, Invocation.current(), hth); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SPI.java b/pljava/src/main/java/org/postgresql/pljava/internal/SPI.java index 98ac19932..bc601a33f 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/SPI.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SPI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -12,8 +12,24 @@ */ package org.postgresql.pljava.internal; +import static java.lang.Math.multiplyExact; +import static java.lang.Math.toIntExact; + +import java.nio.ByteBuffer; + +import java.util.List; + import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_DATUM; +import org.postgresql.pljava.pg.TupleList; +import org.postgresql.pljava.pg.TupleTableSlotImpl; + +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.DatumUtils.fetchPointer; + /** * The SPI class provides access to some global * variables used by SPI. @@ -57,6 +73,23 @@ public class SPI public static final int OPT_NONATOMIC = 1 << 0; + /* + * Indices into window array. + */ + private static final int SPI_result = 0; + private static final int SPI_processed = 1; + private static final int SPI_tuptable = 2; + + private static final ByteBuffer[] s_windows; + + static + { + ByteBuffer[] bs = EarlyNatives._window(ByteBuffer.class); + for ( int i = 0; i < bs.length; ++ i ) + bs[i] = asReadOnlyNativeOrder(bs[i]); + s_windows = bs; + } + /** * Execute a command using the internal SPI_exec function. * @param command The command to execute. @@ -67,11 +100,28 @@ public class SPI * @deprecated This seems never to have been used in git history of project. */ @Deprecated - private static int exec(String command, int rowCount) + public static int exec(String command, int rowCount) { return doInPG(() -> _exec(command, rowCount)); } + public static void commit() + { + doInPG(() -> _endXact(false)); + } + + public static void rollback() + { + doInPG(() -> _endXact(true)); + } + + /** + * Frees a tuple table returned by SPI. + *

    + * This legacy method has no parameter, and frees whatever tuple table the + * {@code SPI_tuptable} global points to at the moment; beware if SPI has + * returned any newer result since the one you might think you are freeing! + */ public static void freeTupTable() { doInPG(SPI::_freeTupTable); @@ -82,7 +132,12 @@ public static void freeTupTable() */ public static long getProcessed() { - long count = doInPG(SPI::_getProcessed); + long count = doInPG(() -> + { + assert 8 == s_windows[SPI_processed].capacity() : + "SPI_processed width change"; + return s_windows[SPI_processed].getLong(0); + }); if ( count < 0 ) throw new ArithmeticException( "too many rows processed to count in a Java signed long"); @@ -94,11 +149,46 @@ public static long getProcessed() */ public static int getResult() { - return doInPG(SPI::_getResult); + return doInPG(() -> + { + assert 4 == s_windows[SPI_result].capacity() : + "SPI_result width change"; + return s_windows[SPI_result].getInt(0); + }); } /** - * Returns the value of the global variable SPI_tuptable. + * Returns a List of the supplied TupleTableSlot covering the tuples pointed + * to from the pointer array that the global {@code SPI_tuptable} points to. + *

    + * This is an internal, not an API, method, and it does nothing to check + * that the supplied ttsi fits the tuples SPI has returned. The caller is to + * ensure that. + * @return null if the global SPI_tuptable is null + */ + public static TupleList getTuples(TupleTableSlotImpl ttsi) + { + return doInPG(() -> + { + long p = fetchPointer(s_windows[SPI_tuptable], 0); + if ( 0 == p ) + return null; + + long count = getProcessed(); + if ( 0 == count ) + return TupleList.EMPTY; + + // An assertion in the C code checks SIZEOF_DATUM == SIZEOF_VOID_P + // XXX catch ArithmeticException, report a "program limit exceeded" + int sizeToMap = toIntExact(multiplyExact(count, SIZEOF_DATUM)); + + return _mapTupTable(ttsi, p, sizeToMap); + }); + } + + /** + * Returns the tuples located by the global variable {@code SPI_tuptable} + * as an instance of the legacy {@code TupleTable} class. */ public static TupleTable getTupTable(TupleDesc known) { @@ -152,11 +242,25 @@ public static String getResultText(int resultCode) } } + private static class EarlyNatives + { + /** + * Returns an array of ByteBuffer, one covering SPI_result, one for + * SPI_processed, and one for the SPI_tuptable pointer. + *

    + * Takes a {@code Class} argument, to save the native + * code a lookup. + */ + private static native ByteBuffer[] _window( + Class component); + } + @Deprecated - private native static int _exec(String command, int rowCount); + private static native int _exec(String command, int rowCount); - private native static long _getProcessed(); - private native static int _getResult(); - private native static void _freeTupTable(); - private native static TupleTable _getTupTable(TupleDesc known); + private static native void _endXact(boolean rollback); + private static native void _freeTupTable(); + private static native TupleTable _getTupTable(TupleDesc known); + private static native TupleList _mapTupTable( + TupleTableSlotImpl ttsi, long p, int sizeToMap); } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SQL_ASCII.java b/pljava/src/main/java/org/postgresql/pljava/internal/SQL_ASCII.java index a9e5454ae..7e519c66e 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/SQL_ASCII.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SQL_ASCII.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -54,6 +54,9 @@ */ class SQL_ASCII extends Charset { + /** + * Holder class for list of provided {@link Charset Charset}s. + */ static class Holder { static final List s_list = @@ -61,6 +64,9 @@ static class Holder } + /** + * Provider for {@link Charset Charset} discovery. + */ public static class Provider extends CharsetProvider { static final String s_canonName = "X-PGSQL_ASCII"; @@ -109,6 +115,9 @@ public CharsetEncoder newEncoder() } + /** + * Decoder for {@link SQL_ASCII SQL_ASCII}. + */ static class Decoder extends CharsetDecoder { Decoder() @@ -156,6 +165,9 @@ protected CoderResult decodeLoop(ByteBuffer in, CharBuffer out) } } + /** + * Encoder for {@link SQL_ASCII SQL_ASCII}. + */ static class Encoder extends CharsetEncoder { Encoder() diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ServerException.java b/pljava/src/main/java/org/postgresql/pljava/internal/ServerException.java index 5c81053ae..b70a701a8 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ServerException.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ServerException.java @@ -16,10 +16,9 @@ import static java.util.Arrays.copyOfRange; +import static org.postgresql.pljava.internal.Invocation.s_unhandled; import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; -import static org.postgresql.pljava.jdbc.Invocation.s_unhandled; - /** * A Java exception constructed over a PostgreSQL error report. * @author Thomas Hallgren diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Session.java b/pljava/src/main/java/org/postgresql/pljava/internal/Session.java index ca55332e3..cb7655e19 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Session.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Session.java @@ -74,37 +74,12 @@ private static class Holder */ static Properties s_properties; - /** - * The Java charset corresponding to the server encoding, or null if none - * such was found. Put here by InstallHelper via package access at startup. - */ - static Charset s_serverCharset; - @Override public Properties frozenSystemProperties() { return m_properties; } - /** - * A static method (not part of the API-exposed Session interface) by which - * pljava implementation classes can get hold of the server charset without - * the indirection of getting a Session instance. If there turns out to be - * demand for client code to obtain it through the API, an interface method - * {@code serverCharset} can easily be added later. - * @return The Java Charset corresponding to the server's encoding, or null - * if no matching Java charset was found. That can happen if a corresponding - * Java charset really does exist but is not successfully found using the - * name reported by PostgreSQL. That can be worked around by giving the - * right name explicitly as the system property - * {@code org.postgresql.server.encoding} in {@code pljava.vmoptions} for - * the affected database (or cluster-wide, if the same encoding is used). - */ - public static Charset implServerCharset() - { - return s_serverCharset; - } - @SuppressWarnings("removal") private final TransactionalMap m_attributes = new TransactionalMap(new HashMap()); diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SwitchPointCache.java b/pljava/src/main/java/org/postgresql/pljava/internal/SwitchPointCache.java new file mode 100644 index 000000000..683b369cf --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SwitchPointCache.java @@ -0,0 +1,938 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles.Lookup; +import java.lang.invoke.MethodHandles; +import static java.lang.invoke.MethodHandles.collectArguments; +import static java.lang.invoke.MethodHandles.constant; +import static java.lang.invoke.MethodHandles.dropArguments; +import static java.lang.invoke.MethodHandles.empty; +import static java.lang.invoke.MethodHandles.filterArguments; +import static java.lang.invoke.MethodHandles.insertArguments; +import static java.lang.invoke.MethodHandles.lookup; +import static java.lang.invoke.MethodHandles.permuteArguments; +import java.lang.invoke.MethodType; +import static java.lang.invoke.MethodType.methodType; +import java.lang.invoke.VarHandle; +import static java.lang.invoke.VarHandle.AccessMode.GET; +import static java.lang.invoke.VarHandle.AccessMode.SET; + +import java.lang.reflect.Method; +import static java.lang.reflect.Modifier.isStatic; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import static java.util.Objects.requireNonNull; + +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import static java.util.function.Function.identity; + +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toMap; +import java.util.stream.Stream; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.DualState; // for JavaDoc +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/** + * Tool for implementing objects or families of objects with methods that lazily + * compute various values and then return the same values until invalidated, + * after which new values will be lazily computed when next requested. + *

    Synchronization

    + *

    + * Items that have been cached are returned directly, until invalidated by + * the action of {@link SwitchPoint SwitchPoint}. + *

    + * When an item has not been cached or the cached value has been invalidated, + * its lazy recomputation at next use takes place within {@code doInPG}, + * that is, "on the PG thread". (An extended discussion of what that really + * means can be found at {@link DualState DualState}.) The PG thread must be the + * only thread where the {@code SwitchPoint}s will be invalidated, and an old + * {@code SwitchPoint} must be replaced in its field by a newly-constructed one + * before the old one is invalidated. + */ +public class SwitchPointCache +{ + /** + * A subclass of Java's {@link java.lang.invoke.SwitchPoint SwitchPoint} + * that tracks whether it has been used yet + * ({@link #guardWithTest guardWithTest} has been called). + *

    + * The unused flag is accessed with no special concurrency measures: it is + * expected that all calls of {@code guardWithTest} and all calls of + * {@link #unused unused()} will be made "on the PG thread". + *

    + * A subclass may override {@link #onFirstUse() onFirstUse()} to have + * something happen (on the PG thread) the first time {@code guardWithTest} + * is called. + */ + public static class SwitchPoint extends java.lang.invoke.SwitchPoint + { + private boolean unused = true; + + /** + * Returns a method handle which always delegates either to the target + * or the fallback. + *

    + * This method clears the {@link #unused() unused} flag and + * calls the {@link #onFirstUse() onFirstUse} method the first time + * it is called after construction of this {@code SwitchPoint}. + *

    + * This implementation is only to be called on the PG thread. + */ + @Override + public MethodHandle guardWithTest(MethodHandle tgt, MethodHandle fbk) + { + if ( unused ) + { + unused = false; + onFirstUse(); + } + return super.guardWithTest(tgt, fbk); + } + + /** + * Returns (reliably if all calls of this method and of + * {@link #guardWithTest guardWithTest} are made "on the PG thread") + * whether this {@code SwitchPoint} remains unused (no call of + * {@code guardWithTest} has been made since its allocation). + */ + public boolean unused() + { + return unused; + } + + /** + * May be overridden to take some action ("on the PG thread") on + * the first call of {@link #guardWithTest guardWithTest} since + * allocation of this {@code SwitchPoint}. + */ + protected void onFirstUse() + { + } + } + + private SwitchPointCache() // not to be instantiated + { + } + + /** + * Whether to cache the value returned by a computation method; true unless + * the method has called {@code doNotCache}. + *

    + * Because all computation methods are constrained to run on the PG thread, + * a simple static suffices. + */ + private static boolean cache = true; + + /** + * Called from a computation method to prevent caching of the value being + * returned. + *

    + * This can be useful in cases where a not-yet-determined value should not + * 'stick'. Whatever the computation method returns will be returned to the + * caller, but the computation method will be reinvoked the next time + * a caller wants the value. + *

    + * This state is reset on entry and after return of any computation method. + * Therefore, if there are actions within a computation method that could + * involve calling other {@code SwitchPointCache}-based methods, this method + * must be called after all of those to have reliable effect. By convention, + * it should be called immediately before returning. + */ + public static void doNotCache() + { + cache = false; + } + + /** + * Transform a {@code MethodHandle} into one with a reference to itself. + * @param m MethodHandle with methodType(r,MethodHandle,p0,...,pk) where the + * expected first parameter is a MethodHandle h of methodType(r,p0,...,pk) + * that invokes m with inserted first argument h. + * @return h + */ + public static MethodHandle fix(MethodHandle m) + { + MethodHandle[] a = new MethodHandle[1]; + a[0] = m.asSpreader(0, MethodHandle[].class, 1).bindTo(a); + return a[0]; + } + + /** + * Replace {@code slots[index]} with a constant returning {@code v} forever, + * immune to invalidation. + *

    + * The slot must already be populated, as by the initializer created by + * a {@link Builder Builder}; this method adapts the supplied constant to + * the slot's existing {@code methodType}. + */ + public static void setConstant(MethodHandle[] slots, int index, Object v) + { + MethodHandle h = slots[index]; + MethodType t = h.type(); + MethodHandle c = constant(t.returnType(), v); + c = dropArguments(c, 0, t.parameterArray()); + slots[index] = c; + } + + /** + * Builder for use during the static initialization of a class that uses + * {@code SwitchPointCache}. + *

    + * The builder's constructor is passed information about the class, and + * about a {@link SwitchPoint SwitchPoint} that will be used when the + * dependent values need to be invalidated. To accommodate invalidation + * schemes with different granularity, the {@code SwitchPoint} used may be + * kept in an instance field of the class, or in a static field and + * governing all instances of the class, or even somewhere else entirely + * and used for widespread or global invalidation. + *

    + * The builder's {@link #withDependent withDependent} method is then used to + * declare each value that can be computed and cached in an instance of the + * class, by giving the name of a static method that computes the + * value (given one argument, an instance of the class) and functions to get + * and set a {@code MethodHandle}-typed per-instance slot where the + * computation result will be cached. + *

    + * Finally, the builder's {@link #build build} method returns + * a {@code Function} that can be saved in + * a static final field and invoked in the object's constructor on a new + * {@code MethodHandle} array with the right number of slots; it will + * initialize all of the slots that were declared in {@code withDependent} + * calls to their initial, uncomputed states. + */ + /* + * It could be tempting to replace Function + * with the less-unwieldy UnaryOperator. But they are + * frequently composed here with .andThen(...), which UnaryOperator + * inherits from Function without refining its return type. Using + * andThen(...)::apply is a notationally compact workaround for that, but + * bloats the generated code and data. + */ + public static class Builder + { + private final Class m_class; + private Function m_describer; + private Function m_initializer; + private Lookup m_lookup; + private Map m_candidates; + private Function m_spGetter; + private Function m_slotGetter; + private Class m_receiver; + private Class m_return; + + /** + * Create a builder that will be used to declare dependent values + * controlled by a single {@code SwitchPoint} and to create an + * initializer for the per-instance slots that will hold their states. + * @param c the class being configured by this Builder + */ + public Builder(Class c) + { + m_receiver = m_class = requireNonNull(c); + m_describer = Object::toString; + m_initializer = identity(); + } + + /** + * @param describer function, with a signature like that of + * {@code Object.toString}, that will produce a useful description of + * the object if needed in an exception message. The default if this + * method is not called is {@code Object::toString}; a different + * describer can be supplied if the output of {@code toString} isn't + * well suited for an exception message. If null, any exception will + * have its bare constant message with nothing added about the specific + * receiver object. + */ + public Builder withDescriber(Function describer) + { + if ( null == describer ) + m_describer = o -> ""; + else + m_describer = o -> ": " + describer.apply(o); + return this; + } + + /** + * Supply the {@code Lookup} object to be used in resolving dependent + * methods. + * @param l a {@link Lookup Lookup} object obtained by the caller and + * able to introspect in the class + */ + public Builder withLookup(Lookup l) + { + m_lookup = requireNonNull(l); + return this; + } + + /** + * Supply the candidate methods to be available to subsequent + * {@link #withDependent withDependent} calls. + * @param ms array of methods such as the caller may obtain with + * {@link Class#getDeclaredMethods getDeclaredMethods}, avoiding the + * access complications of having this class do it. The methods will be + * filtered to only those that are static with a non-void return type + * and exactly one parameter, assignable from the target class, and + * uniquely named within that set. Only such methods can be named in + * later {@link #withDependent withDependent} calls. No reference to + * the array will be retained. + */ + public Builder withCandidates(Method[] ms) + { + m_candidates = candidatesAmong(Arrays.stream(ms)); + return this; + } + + /** + * Supply a function mapping a receiver object instance to the + * {@code SwitchPoint} to be associated with subsequently declared + * slots. + * @param spGetter a function usable to fetch the SwitchPoint + * that controls this cache. It is passed an instance of T but need not + * use it (in the case, for example, of a single controlling SwitchPoint + * held in a static). + */ + public Builder withSwitchPoint(Function spGetter) + { + m_spGetter = requireNonNull(spGetter); + return this; + } + + /** + * Supply a function mapping a receiver object instance to the + * per-instance {@code MethodHandle} array whose elements will be + * the slots. + * @param slotGetter a function usable to fetch the slot array + * for an instance. + */ + public Builder withSlots(Function slotGetter) + { + m_slotGetter = requireNonNull(slotGetter); + return this; + } + + /** + * Adjust the static return type of subsequently declared dependents + * that return references. + *

    + * This can be a more compact notation if compute methods or API methods + * from a superclass or subclass will be reused and the return type + * needs to be adjusted to match the static type in the API method + * (possibly erased from a generic type). + * @param t Class to serve as the following dependents' static return + * type. Pass null to discontinue adjusting return types for following + * dependents. + * @throws IllegalArgumentException if t represents a primitive type. + */ + public Builder withReturnType(Class t) + { + if ( null != t && t.isPrimitive() ) + throw new IllegalArgumentException( + "return type adjustment cannot accept primitive type " + t); + m_return = t; + return this; + } + + /** + * Adjust the static receiver type of subsequently declared dependents. + *

    + * This can be a more compact notation if compute methods or API methods + * from a superclass or subclass will be reused and the receiver type + * needs to be adjusted to match the static type in the API method + * (possibly erased from a generic type). + * @param t Class to serve as the following dependents' static receiver + * type. Pass null to discontinue adjusting receiver types for following + * dependents. + * @throws IllegalArgumentException if t is neither a widening nor a + * narrowing of the receiver type specified for this builder. + */ + public Builder withReceiverType(Class t) + { + if ( null != t + && ! t.isAssignableFrom(m_class) + && ! m_class.isAssignableFrom(t) ) + throw new IllegalArgumentException( + "receiver type " + m_class + " cannot be adjusted to " + t); + m_receiver = null == t ? m_class : t; + return this; + } + + /** + * Return a {@code Function} to be + * invoked in the constructor of a client object, applied to a + * newly-allocated array of the right number of slots, which will + * initialize all of the array's elements with the corresponding + * fallback method handles and return the initialized array. + *

    + * The initializer can be used conveniently in a constructor that + * assigns the array to a final field, or calls a superclass constructor + * that does so, to arrange that the array's elements are written + * in advance of Java's freeze of the final array reference field. + */ + public Function build() + { + return m_initializer; + } + + /** + * Declare one dependent item that will be controlled by this builder's + * {@code SwitchPoint}. + *

    + * An item is declared by naming the static method that can + * compute its value when needed, and the index into the per-instance + * {@code MethodHandle[]} "slots" array that will be used to cache the + * value. Typically, these will be private, and there will be an API + * method for retrieving the value, by fetching the method handle from + * the array index given here, and invoking it. + *

    + * The method handle that will be found in the named slot has a return + * type matching the compute method named here, and two parameters; it + * expects the receiver object as the first parameter, and itself as + * the second. So the typical API method is simply: + *

    +		 * MethodHandle h = slots[MY_SLOT];
    +		 * return (cast)h.invokeExact(this, h);
    +		 *
    + *

    + * When there is a cached value and the {@code SwitchPoint} has not been + * invalidated, the two arguments are ignored and the cached value + * is returned. + * @param methodName name of the static method that will be used to + * compute values for this item. It must be found among the methods + * that were passed to the Builder constructor, only considering those + * that are static, with a non-void return and one argument of + * the class type. + * @param index index into the per-instance slot arrray where the cached + * state will be maintained. + */ + public Builder withDependent(String methodName, int index) + { + MethodHandle m; + MethodHandle recompute; + + try + { + m = m_lookup.unreflect(m_candidates.get(methodName)); + } + catch ( ReflectiveOperationException e ) + { + throw unchecked(e); + } + + final MethodHandle only_p_erased = eraseP0(m); + MethodType mt = only_p_erased.type(); + Class rt = mt.returnType(); + Class pt = m_receiver; + Function spGetter = m_spGetter; + Function slotGetter = m_slotGetter; + Function describer = m_describer; + + if ( ! rt.isPrimitive() ) + { + Class rtfinal = null == m_return ? rt : m_return; + + final MethodHandle p_and_r_erased = + m.asType(mt.changeReturnType(Object.class)); + recompute = AS_MH.bindTo((As)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + Object v; + try + { + cache = true; + v = p_and_r_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rtfinal, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + recompute = recompute.asType( + recompute.type().changeReturnType(rtfinal)); + } + else if ( int.class == rt ) + { + recompute = ASINT_MH.bindTo((AsInt)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return (int)gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + int v; + try + { + cache = true; + v = (int)only_p_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rt, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + } + else if ( long.class == rt ) + { + recompute = ASLONG_MH.bindTo((AsLong)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return (long)gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + long v; + try + { + cache = true; + v = (long)only_p_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rt, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + } + else if ( boolean.class == rt ) + { + recompute = + ASBOOLEAN_MH.bindTo((AsBoolean)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return (boolean)gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + boolean v; + try + { + cache = true; + v = (boolean)only_p_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rt, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + } + else if ( double.class == rt ) + { + recompute = + ASDOUBLE_MH.bindTo((AsDouble)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return (double)gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + double v; + try + { + cache = true; + v = (double)only_p_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rt, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + } + else if ( float.class == rt ) + { + recompute = + ASFLOAT_MH.bindTo((AsFloat)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return (float)gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + float v; + try + { + cache = true; + v = (float)only_p_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rt, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + } + else if ( short.class == rt ) + { + recompute = + ASSHORT_MH.bindTo((AsShort)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return (short)gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + short v; + try + { + cache = true; + v = (short)only_p_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rt, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + } + else if ( char.class == rt ) + { + recompute = ASCHAR_MH.bindTo((AsChar)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return (char)gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + char v; + try + { + cache = true; + v = (char)only_p_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rt, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + } + else if ( byte.class == rt ) + { + recompute = ASBYTE_MH.bindTo((AsByte)(h,o,g) -> doInPG(() -> + { + MethodHandle[] slots = slotGetter.apply(o); + MethodHandle gwt = slots[index]; + if ( gwt != g ) // somebody else refreshed it already + return (byte)gwt.invoke(o, gwt); + /* + * Still the same invalidated g, so the task of computing + * a fresh value and replacing it has fallen to us. + */ + SwitchPoint sp = spGetter.apply(o); + if ( null == sp || sp.hasBeenInvalidated() ) + throw new IllegalStateException( + "function call after invalidation of object" + + describer.apply(o)); + byte v; + try + { + cache = true; + v = (byte)only_p_erased.invokeExact(o); + if ( cache ) + { + MethodHandle c = constant(rt, v); + c = dropArguments(c, 0, pt, MethodHandle.class); + slots[index] = sp.guardWithTest(c, h); + } + } + finally + { + cache = true; + } + return v; + })); + } + else + throw new AssertionError("unhandled primitive"); // pacify javac + + recompute = recompute.asType( + recompute.type().changeParameterType(1, pt)); + + MethodHandle init = fix(recompute); + + m_initializer = m_initializer.andThen(s -> + { + s[index] = init; + return s; + }); + + return this; + } + + /** + * Return a map from name to {@code Method} for all methods in ms that + * are static with a non-void return type and exactly one parameter, + * assignable from the target class, and uniquely named within that set. + */ + private Map candidatesAmong(Stream ms) + { + Map> m1 = ms + .filter(m -> + isStatic(m.getModifiers()) && + void.class != m.getReturnType() && + 1 == m.getParameterCount() && + m.getParameterTypes()[0].isAssignableFrom(m_class)) + .collect(groupingBy(Method::getName)); + + return m1.values().stream() + .filter(list -> 1 == list.size()) + .map(list -> list.get(0)) + .collect(toMap(Method::getName, identity())); + } + + private static MethodHandle eraseP0(MethodHandle m) + { + MethodType mt = m.type().changeParameterType(0, Object.class); + return m.asType(mt); + } + } + + private static final MethodHandle AS_MH; + private static final MethodHandle ASLONG_MH; + private static final MethodHandle ASDOUBLE_MH; + private static final MethodHandle ASINT_MH; + private static final MethodHandle ASFLOAT_MH; + private static final MethodHandle ASSHORT_MH; + private static final MethodHandle ASCHAR_MH; + private static final MethodHandle ASBYTE_MH; + private static final MethodHandle ASBOOLEAN_MH; + + static + { + Lookup lu = lookup(); + MethodType mt = + methodType(Object.class, + MethodHandle.class, Object.class, MethodHandle.class); + + try + { + AS_MH = lu.findVirtual(As.class, "compute", mt); + + ASLONG_MH = lu.findVirtual(AsLong.class, "compute", + mt.changeReturnType(long.class)); + + ASDOUBLE_MH = lu.findVirtual(AsDouble.class, "compute", + mt.changeReturnType(double.class)); + + ASINT_MH = lu.findVirtual(AsInt.class, "compute", + mt.changeReturnType(int.class)); + + ASFLOAT_MH = lu.findVirtual(AsFloat.class, "compute", + mt.changeReturnType(float.class)); + + ASSHORT_MH = lu.findVirtual(AsShort.class, "compute", + mt.changeReturnType(short.class)); + + ASCHAR_MH= lu.findVirtual(AsChar.class, "compute", + mt.changeReturnType(char.class)); + + ASBYTE_MH = lu.findVirtual(AsByte.class, "compute", + mt.changeReturnType(byte.class)); + + ASBOOLEAN_MH = lu.findVirtual(AsBoolean.class, "compute", + mt.changeReturnType(boolean.class)); + } + catch ( ReflectiveOperationException e ) + { + throw unchecked(e); + } + } + + @FunctionalInterface + private interface As + { + R compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } + + @FunctionalInterface + private interface AsLong + { + long compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } + + @FunctionalInterface + private interface AsDouble + { + double compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } + + @FunctionalInterface + private interface AsInt + { + int compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } + + @FunctionalInterface + private interface AsFloat + { + float compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } + + @FunctionalInterface + private interface AsShort + { + short compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } + + @FunctionalInterface + private interface AsChar + { + char compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } + + @FunctionalInterface + private interface AsByte + { + byte compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } + + @FunctionalInterface + private interface AsBoolean + { + boolean compute(MethodHandle h, T instance, MethodHandle gwt) + throws Throwable; + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SyntheticXMLReader.java b/pljava/src/main/java/org/postgresql/pljava/internal/SyntheticXMLReader.java index ca590c636..f542dec50 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/SyntheticXMLReader.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SyntheticXMLReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2019-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -103,6 +103,9 @@ public abstract class SyntheticXMLReader implements XMLReader */ public final FluentAttributes2 m_attributes = new FluentAttributes2(); + /** + * Enumeration of features standardized by SAX 2. + */ public enum SAX2FEATURE { EXTERNAL_GENERAL_ENTITIES("external-general-entities", null), @@ -184,6 +187,9 @@ public static SAX2FEATURE fromUri(String uri) } } + /** + * Enumeration of features originating with Apache. + */ public enum ApacheFeature { DISALLOW_DOCTYPE_DECL("disallow-doctype-decl", false), @@ -237,6 +243,9 @@ public static ApacheFeature fromUri(String uri) } } + /** + * Enumeration of properties standardized by SAX 2. + */ public enum SAX2PROPERTY { DECLARATION_HANDLER("declaration-handler", DeclHandler.class), diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/TriggerData.java b/pljava/src/main/java/org/postgresql/pljava/internal/TriggerData.java index 74368e0fb..f70586b9c 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/TriggerData.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/TriggerData.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -16,6 +16,7 @@ import java.sql.SQLException; import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.Invocation; import org.postgresql.pljava.TriggerException; import org.postgresql.pljava.jdbc.TriggerResultSet; @@ -35,18 +36,17 @@ public class TriggerData implements org.postgresql.pljava.TriggerData private boolean m_suppress = false; private final State m_state; - TriggerData(DualState.Key cookie, long resourceOwner, long pointer) + TriggerData(long pointer) { - m_state = new State(cookie, this, resourceOwner, pointer); + m_state = new State(this, pointer); } private static class State extends DualState.SingleGuardedLong { - private State( - DualState.Key cookie, TriggerData td, long ro, long hth) + private State(TriggerData td, long hth) { - super(cookie, td, ro, hth); + super(td, Invocation.current(), hth); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Tuple.java b/pljava/src/main/java/org/postgresql/pljava/internal/Tuple.java index ac4fc417f..7d4d29022 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Tuple.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Tuple.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -26,18 +26,24 @@ public class Tuple { private final State m_state; - Tuple(DualState.Key cookie, long resourceOwner, long pointer) + Tuple(long pointer) { - m_state = new State(cookie, this, resourceOwner, pointer); + m_state = new State(this, pointer); } private static class State extends DualState.SingleHeapFreeTuple { - private State( - DualState.Key cookie, Tuple t, long ro, long ht) + private State(Tuple t, long ht) { - super(cookie, t, ro, ht); + /* + * Passing null as the Lifespan means this will never be + * matched by a lifespanRelease call; that's appropriate (for now) as + * the Tuple copy is being made into JavaMemoryContext, which never + * gets reset, so only unreachability from the Java side + * will free it. + */ + super(t, null, ht); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/TupleDesc.java b/pljava/src/main/java/org/postgresql/pljava/internal/TupleDesc.java index 8dd5b343b..482ce2568 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/TupleDesc.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/TupleDesc.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -28,20 +28,26 @@ public class TupleDesc private final int m_size; private Class[] m_columnClasses; - TupleDesc(DualState.Key cookie, long resourceOwner, long pointer, int size) + TupleDesc(long pointer, int size) throws SQLException { - m_state = new State(cookie, this, resourceOwner, pointer); + m_state = new State(this, pointer); m_size = size; } private static class State extends DualState.SingleFreeTupleDesc { - private State( - DualState.Key cookie, TupleDesc td, long ro, long hth) + private State(TupleDesc td, long hth) { - super(cookie, td, ro, hth); + /* + * Passing null as the Lifespan means this will never be + * matched by a lifespanRelease call; that's appropriate (for now) as + * the TupleDesc copy is being made into JavaMemoryContext, which + * never gets reset, so only unreachability from the Java side + * will free it. + */ + super(td, null, hth); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/UnhandledPGException.java b/pljava/src/main/java/org/postgresql/pljava/internal/UnhandledPGException.java index 95c015321..67f9bfba1 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/UnhandledPGException.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/UnhandledPGException.java @@ -16,10 +16,9 @@ import static java.util.Arrays.copyOfRange; +import static org.postgresql.pljava.internal.Invocation.s_unhandled; import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; -import static org.postgresql.pljava.jdbc.Invocation.s_unhandled; - /** * A Java exception constructed over a {@link ServerException} that has been * thrown but not recovered from (as by rolling back to a prior savepoint) diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaWrapper.java b/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaWrapper.java index 44432000b..9f8c9b5db 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaWrapper.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaWrapper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2019-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -35,7 +35,18 @@ import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; +import org.postgresql.pljava.adt.spi.Datum; + import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.LifespanImpl.Addressed; + +import org.postgresql.pljava.model.MemoryContext; +import org.postgresql.pljava.model.ResourceOwner; + +import org.postgresql.pljava.pg.DatumImpl; +import org.postgresql.pljava.pg.DatumImpl.IStream; +import org.postgresql.pljava.pg.MemoryContextImpl; +import org.postgresql.pljava.pg.ResourceOwnerImpl; /** * Interface that wraps a PostgreSQL native variable-length ("varlena") datum; @@ -48,15 +59,8 @@ * Java code has written and closed it), after which it is no longer accessible * from Java. */ -public interface VarlenaWrapper extends Closeable +public interface VarlenaWrapper extends Closeable, DatumImpl { - /** - * Return the varlena address to native code and dissociate the varlena - * from Java. - * @param cookie Capability held by native code. - */ - long adopt(DualState.Key cookie) throws SQLException; - /** * Return a string describing this object in a way useful for debugging, * prefixed with the name (abbreviated for comfort) of the class of the @@ -74,8 +78,6 @@ public interface VarlenaWrapper extends Closeable */ String toString(Object o); - - /** * A class by which Java reads the content of a varlena. * @@ -83,7 +85,7 @@ public interface VarlenaWrapper extends Closeable * the native reference; the chosen resource owner must be one that will be * released no later than the memory context containing the varlena. */ - public static class Input implements VarlenaWrapper + public static class Input extends DatumImpl.Input implements VarlenaWrapper { private long m_parkedSize; private long m_bufferSize; @@ -91,7 +93,6 @@ public static class Input implements VarlenaWrapper /** * Construct a {@code VarlenaWrapper.Input}. - * @param cookie Capability held by native code. * @param resourceOwner Resource owner whose release will indicate that the * underlying varlena is no longer valid. * @param context Memory context in which the varlena is allocated. @@ -107,15 +108,16 @@ public static class Input implements VarlenaWrapper * @param buf Readable direct {@code ByteBuffer} constructed over the * varlena's data bytes. */ - private Input(DualState.Key cookie, long resourceOwner, + private Input(long resourceOwner, long context, long snapshot, long varlenaPtr, long parkedSize, long bufferSize, ByteBuffer buf) { m_parkedSize = parkedSize; m_bufferSize = bufferSize; m_state = new State( - cookie, this, resourceOwner, - context, snapshot, varlenaPtr, buf); + this, resourceOwner, + MemoryContextImpl.fromAddress(context), + snapshot, varlenaPtr, buf); } public void pin() throws SQLException @@ -167,12 +169,12 @@ public String toString(Object o) } @Override - public long adopt(DualState.Key cookie) throws SQLException + public long adopt() throws SQLException { m_state.pin(); try { - return m_state.adopt(cookie); + return m_state.adopt(); } finally { @@ -180,181 +182,24 @@ public long adopt(DualState.Key cookie) throws SQLException } } - public class Stream - extends ByteBufferInputStream implements VarlenaWrapper - { - /** - * A duplicate of the {@code VarlenaWrapper.Input}'s byte buffer, - * so its {@code position} and {@code mark} can be updated by the - * {@code InputStream} operations without affecting the original - * (therefore multiple {@code Stream}s may read one {@code Input}). - */ - private ByteBuffer m_movingBuffer; - - /* - * Overrides {@code ByteBufferInputStream} method and throws the - * exception type declared there. For other uses of pin in this - * class where SQLException is expected, just use - * {@code m_state.pin} directly. - */ - @Override - protected void pin() throws IOException - { - if ( ! m_open ) - throw new IOException("Read from closed VarlenaWrapper"); - try - { - Input.this.pin(); - } - catch ( SQLException e ) - { - throw new IOException(e.getMessage(), e); - } - } - - /* - * Unpin for use in {@code ByteBufferInputStream} or here; no - * throws-clause difference to blotch things up. - */ - protected void unpin() - { - Input.this.unpin(); - } - - @Override - public void close() throws IOException - { - if ( pinUnlessReleased() ) - return; - try - { - super.close(); - Input.this.close(); - } - finally - { - unpin(); - } - } - - @Override - public String toString(Object o) - { - return String.format("%s %s", - Input.this.toString(o), m_open ? "open" : "closed"); - } - - /** - * Apply a {@code Verifier} to the input data. - *

    - * This should only be necessary if the input wrapper is being used - * directly as an output item, and needs verification that it - * conforms to the format of the target type. - *

    - * The current position must be at the beginning of the stream. The - * verifier must leave it at the end to confirm the entire stream - * was examined. There should be no need to reset the position here, - * as the only anticipated use is during an {@code adopt}, and the - * native code will only care about the varlena's address. - */ - public void verify(Verifier v) throws SQLException - { - /* - * This is only called from some client code's adopt() method, - * calls to which are serialized through Backend.THREADLOCK - * anyway, so holding a pin here for the duration doesn't - * further limit concurrency. Hold m_state's monitor also to - * block any extraneous reading interleaved with the verifier. - */ - m_state.pin(); - try - { - ByteBuffer buf = buffer(); - synchronized ( m_state ) - { - if ( 0 != buf.position() ) - throw new SQLException( - "Variable-length input data to be verified " + - " not positioned at start", - "55000"); - InputStream dontCloseMe = new FilterInputStream(this) - { - @Override - public void close() throws IOException { } - }; - v.verify(dontCloseMe); - if ( 0 != buf.remaining() ) - throw new SQLException( - "Verifier finished prematurely"); - } - } - catch ( SQLException | RuntimeException e ) - { - throw e; - } - catch ( Exception e ) - { - throw new SQLException( - "Exception verifying variable-length data: " + - e.getMessage(), "XX000", e); - } - finally - { - m_state.unpin(); - } - } - - @Override - protected ByteBuffer buffer() throws IOException - { - try - { - if ( null == m_movingBuffer ) - { - ByteBuffer b = Input.this.buffer(); - m_movingBuffer = b.duplicate().order(b.order()); - } - return m_movingBuffer; - } - catch ( SQLException sqe ) - { - throw new IOException("Read from varlena failed", sqe); - } - } - - @Override - public long adopt(DualState.Key cookie) throws SQLException - { - Input.this.pin(); - try - { - if ( ! m_open ) - throw new SQLException( - "Cannot adopt VarlenaWrapper.Input after " + - "it is closed", "55000"); - return Input.this.adopt(cookie); - } - finally - { - Input.this.unpin(); - } - } - } - private static class State - extends DualState.SingleMemContextDelete + extends DualState.SingleMemContextDelete { private ByteBuffer m_buf; + private long m_resourceOwner; private long m_snapshot; private long m_varlena; private State( - DualState.Key cookie, Input vr, long resourceOwner, - long memContext, long snapshot, long varlenaPtr, ByteBuffer buf) + VarlenaWrapper.Input vr, long resourceOwner, + MemoryContext memContext, + long snapshot, long varlenaPtr, ByteBuffer buf) { - super(cookie, vr, resourceOwner, memContext); + super(vr, ResourceOwnerImpl.fromAddress(resourceOwner), + memContext); + m_resourceOwner = resourceOwner; // keep that address handy m_snapshot = snapshot; m_varlena = varlenaPtr; m_buf = null == buf ? buf : buf.asReadOnlyBuffer(); @@ -370,7 +215,8 @@ private ByteBuffer buffer() throws SQLException doInPG(() -> { m_buf = _detoast( - m_varlena, guardedLong(), m_snapshot, + m_varlena, + ((Addressed)memoryContext()).address(), m_snapshot, m_resourceOwner).asReadOnlyBuffer(); m_snapshot = 0; }); @@ -382,21 +228,22 @@ m_varlena, guardedLong(), m_snapshot, } } - private long adopt(DualState.Key cookie) throws SQLException + private long adopt() throws SQLException { - adoptionLock(cookie); + adoptionLock(); try { if ( 0 != m_snapshot ) { /* fetch, before snapshot released */ - m_varlena = _fetch(m_varlena, guardedLong()); + m_varlena = _fetch( + m_varlena, ((Addressed)memoryContext()).address()); } return m_varlena; } finally { - adoptionUnlock(cookie); + adoptionUnlock(); } } @@ -487,7 +334,6 @@ public class Output extends OutputStream implements VarlenaWrapper /** * Construct a {@code VarlenaWrapper.Output}. - * @param cookie Capability held by native code. * @param resourceOwner Resource owner whose release will indicate that * the underlying varlena is no longer valid. * @param context Pointer to memory context containing the underlying @@ -497,11 +343,12 @@ public class Output extends OutputStream implements VarlenaWrapper * @param buf Writable direct {@code ByteBuffer} constructed over (an * initial region of) the varlena's data bytes. */ - private Output(DualState.Key cookie, long resourceOwner, + private Output(long resourceOwner, long context, long varlenaPtr, ByteBuffer buf) { m_state = new State( - cookie, this, resourceOwner, context, varlenaPtr, buf); + this, ResourceOwnerImpl.fromAddress(resourceOwner), + MemoryContextImpl.fromAddress(context), varlenaPtr, buf); } /** @@ -649,7 +496,7 @@ public void free() throws IOException } @Override - public long adopt(DualState.Key cookie) throws SQLException + public long adopt() throws SQLException { m_state.pin(); try @@ -658,7 +505,7 @@ public long adopt(DualState.Key cookie) throws SQLException throw new SQLException( "Writing of VarlenaWrapper.Output not yet complete", "55000"); - return m_state.adopt(cookie); + return m_state.adopt(); } finally { @@ -689,11 +536,11 @@ private static class State private Verifier m_verifier; private State( - DualState.Key cookie, Output vr, - long resourceOwner, long memContext, long varlenaPtr, - ByteBuffer buf) + Output vr, + ResourceOwner resourceOwner, MemoryContext memContext, + long varlenaPtr, ByteBuffer buf) { - super(cookie, vr, resourceOwner, memContext); + super(vr, resourceOwner, memContext); m_varlena = varlenaPtr; m_buf = buf; } @@ -730,16 +577,16 @@ private ByteBuffer buffer(int desiredCapacity) throws SQLException } } - private long adopt(DualState.Key cookie) throws SQLException + private long adopt() throws SQLException { - adoptionLock(cookie); + adoptionLock(); try { return m_varlena; } finally { - adoptionUnlock(cookie); + adoptionUnlock(); } } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaXMLRenderer.java b/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaXMLRenderer.java index 11bc27548..2b58a8f24 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaXMLRenderer.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaXMLRenderer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2019-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -20,47 +20,40 @@ import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; +import org.postgresql.pljava.adt.spi.Datum; + +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; + +import org.postgresql.pljava.pg.DatumImpl; + /** * Class adapting a {@code ByteBufferXMLReader} to a - * {@code VarlenaWrapper.Input}. + * {@code Datum.Input}. */ public abstract class VarlenaXMLRenderer -extends ByteBufferXMLReader implements VarlenaWrapper +extends ByteBufferXMLReader implements DatumImpl { - private final VarlenaWrapper.Input m_input; + private final Datum.Input m_input; protected final CharsetDecoder m_decoder; /** - * A duplicate of the {@code VarlenaWrapper.Input}'s byte buffer, + * A duplicate of the {@code Datum.Input}'s byte buffer, * so its {@code position} can be updated by the * {@code XMLEventReader} operations without affecting the original * (therefore multiple streams may read one {@code Input}). */ private ByteBuffer m_movingBuffer; - public VarlenaXMLRenderer(VarlenaWrapper.Input input) throws SQLException + public VarlenaXMLRenderer(Datum.Input input) throws SQLException { m_input = input; - Charset cs = Session.implServerCharset(); - if ( null == cs ) - { - try - { - input.close(); - } - catch ( IOException e ) { } - throw new SQLFeatureNotSupportedException("SQLXML: no Java " + - "Charset found to match server encoding; perhaps set " + - "org.postgresql.server.encoding system property to a " + - "valid Java charset name for the same encoding?", "0A000"); - - } + Charset cs = SERVER_ENCODING.charset(); m_decoder = cs.newDecoder(); } @Override - public long adopt(DualState.Key cookie) throws SQLException + public long adopt() throws SQLException { throw new UnsupportedOperationException( "adopt() on a synthetic XML rendering"); @@ -75,7 +68,7 @@ public String toString() @Override public String toString(Object o) { - return m_input.toString(o); + return ((DatumImpl)m_input).toString(o); } @Override diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/PgNodeTreeAsXML.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/PgNodeTreeAsXML.java index 8d5de171b..731d7c4c6 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/PgNodeTreeAsXML.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/PgNodeTreeAsXML.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2019-2023 Tada AB and other contributors, as listed below. * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -28,7 +28,7 @@ import org.xml.sax.SAXException; -import org.postgresql.pljava.internal.VarlenaWrapper; +import org.postgresql.pljava.adt.spi.Datum; import org.postgresql.pljava.internal.VarlenaXMLRenderer; /** @@ -39,7 +39,7 @@ */ public class PgNodeTreeAsXML extends VarlenaXMLRenderer { - PgNodeTreeAsXML(VarlenaWrapper.Input vwi) throws SQLException + PgNodeTreeAsXML(Datum.Input vwi) throws SQLException { super(vwi); } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIConnection.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIConnection.java index 3ca2d14dc..82961c1a0 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIConnection.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIConnection.java @@ -46,32 +46,75 @@ import java.util.Calendar; import java.util.HashMap; import java.util.Iterator; +import java.util.List; // for SlotTester import java.util.Map; import java.util.Properties; import java.util.concurrent.Executor; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; +import org.postgresql.pljava.internal.Invocation; import org.postgresql.pljava.internal.Oid; import org.postgresql.pljava.internal.PgSavepoint; +import java.lang.reflect.Field; +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.internal.SPI; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; +import org.postgresql.pljava.model.Portal; +import static org.postgresql.pljava.model.Portal.Direction.FORWARD; +import org.postgresql.pljava.model.SlotTester; +import org.postgresql.pljava.model.TupleTableSlot; +import org.postgresql.pljava.pg.TupleTableSlotImpl; + /** * Provides access to the current connection (session) the Java stored * procedure is running in. It is returned from the driver manager * with * DriverManager.getConnection("jdbc:default:connection"); - * and cannot be managed in any way since it's already running inside - * a transaction. This means the following methods cannot be used. - *

      - *
    • commit()
    • - *
    • rollback()
    • - *
    • setAutoCommit()
    • - *
    • setTransactionIsolation()
    • - *
    + *

    + * PostgreSQL calls functions within a transaction and does not allow them to + * issue transaction control operations ({@code commit} / {@code rollback} / + * {@code setAutoCommit} / {@code setTransactionIsolation}). It can allow + * commit / rollback within a procedure or a {@code DO} block, if that + * procedure or {@code DO} block was not executed in an existing explicit + * transaction. * @author Thomas Hallgren */ -public class SPIConnection implements Connection +public class SPIConnection implements Connection, SlotTester { + @Override // SlotTester + public Portal unwrapAsPortal(ResultSet rs) throws SQLException + { + return ((SPIResultSet)rs).unwrapAsPortal(); + } + + @Override // SlotTester + @SuppressWarnings("deprecation") + public List test(String query) + { + try ( Statement s = createStatement() ) + { + ResultSet rs = s.executeQuery(query); + Portal p = unwrapAsPortal(rs); + return p.fetch(FORWARD, Portal.ALL); + } + catch ( SQLException e ) + { + throw unchecked(e); + } + } + + @Override // SlotTester + public Adapter adapterPlease(String cname, String field) + throws ReflectiveOperationException + { + Class cls = + Class.forName(cname).asSubclass(SlotTester.Visible.class); + Field f = cls.getField(field); + return (Adapter)f.get(null); + } + /** * The version number of the currently executing PostgreSQL * server. @@ -189,25 +232,31 @@ public void close() } /** - * It's not legal to do a commit within a call from SQL. - * @throws SQLException indicating that this feature is not supported. + * Commits the top-level transaction. + *

    + * PostgreSQL does not allow such an action from within a function, but it + * can be allowed within a procedure or DO block, if not executed within + * an existing explicit transaction. */ @Override public void commit() throws SQLException { - throw new UnsupportedFeatureException("Connection.commit"); + SPI.commit(); } /** - * It's not legal to do a rollback within a call from SQL. - * @throws SQLException indicating that this feature is not supported. + * Rolls back the top-level transaction. + *

    + * PostgreSQL does not allow such an action from within a function, but it + * can be allowed within a procedure or DO block, if not executed within + * an existing explicit transaction. */ @Override public void rollback() throws SQLException { - throw new UnsupportedFeatureException("Connection.rollback"); + SPI.rollback(); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSet.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSet.java index 673fe5eeb..a25c0e2b8 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSet.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSet.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -45,6 +45,8 @@ public class SPIResultSet extends ResultSetBase private boolean m_open; + private boolean m_portalUnwrapped; + SPIResultSet(SPIStatement statement, Portal portal, long maxRows) throws SQLException { @@ -57,6 +59,17 @@ public class SPIResultSet extends ResultSetBase m_open = true; } + public Portal unwrapAsPortal() throws SQLException + { + if ( ! m_open || null != m_table || null != m_currentRow + || null != m_nextRow || -1 != m_tableRow ) + throw new IllegalStateException( + "too late to unwrap SPIResultSet to Portal"); + m_portalUnwrapped = true; + close(); + return m_portal; + } + @Override public void close() throws SQLException @@ -64,7 +77,8 @@ public void close() if(m_open) { m_open = false; - m_portal.close(); + if ( ! m_portalUnwrapped ) + m_portal.close(); m_statement.resultSetClosed(this); m_table = null; m_tableRow = -1; diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromTuple.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromTuple.java index 2b99e01ba..005097748 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromTuple.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromTuple.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -52,11 +52,10 @@ public class SQLInputFromTuple extends SingleRowReader implements SQLInput * {@code HeapTupleHeader}, as well as the TupleDesc (Java object this time) * describing its structure. */ - public SQLInputFromTuple(DualState.Key cookie, long resourceOwner, - long heapTupleHeaderPointer, TupleDesc tupleDesc) + public SQLInputFromTuple(long heapTupleHeaderPointer, TupleDesc tupleDesc) throws SQLException { - super(cookie, resourceOwner, heapTupleHeaderPointer, tupleDesc); + super(heapTupleHeaderPointer, tupleDesc); m_index = 0; m_columns = tupleDesc.size(); } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLXMLImpl.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLXMLImpl.java index 8baae3dd7..81d79bc7e 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLXMLImpl.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLXMLImpl.java @@ -80,7 +80,8 @@ import org.w3c.dom.Node; import org.w3c.dom.Text; -import static org.postgresql.pljava.internal.Session.implServerCharset; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; + import org.postgresql.pljava.internal.VarlenaWrapper; import java.sql.SQLFeatureNotSupportedException; @@ -104,6 +105,8 @@ import java.io.FilterOutputStream; import java.io.OutputStreamWriter; +import static java.nio.charset.StandardCharsets.UTF_8; + import static javax.xml.transform.OutputKeys.ENCODING; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerFactory; @@ -177,13 +180,22 @@ /* ... for SQLXMLImpl.Readable.Synthetic */ +import java.io.StringWriter; +import javax.xml.transform.TransformerConfigurationException; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; import org.postgresql.pljava.internal.VarlenaXMLRenderer; import static org.postgresql.pljava.jdbc.TypeOid.PG_NODE_TREEOID; +/* ... for new model / adapter interoperability */ + +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.pg.DatumImpl; + /** * Implementation of {@link SQLXML} for the SPI connection. */ -public abstract class SQLXMLImpl implements SQLXML +public abstract class SQLXMLImpl implements SQLXML { private static final VarHandle s_backingVH; protected volatile V m_backing; @@ -193,7 +205,7 @@ public abstract class SQLXMLImpl implements SQLXML try { s_backingVH = lookup().findVarHandle( - SQLXMLImpl.class, "m_backing", VarlenaWrapper.class); + SQLXMLImpl.class, "m_backing", Datum.class); } catch ( ReflectiveOperationException e ) { @@ -322,11 +334,30 @@ static SQLException normalizedException(Exception e) + e.getMessage(), "XX000", e); } + /** + * Create readable SQLXML instance over a {@code Datum.Input}, recording + * the source type. + *

    + * The source type can be used to detect efforts to store this value into + * a destination of a different type, and apply a verifier for type safety. + */ + public static SQLXML newReadable( + Datum.Input datum, RegType pgType, boolean synthetic) + throws SQLException + { + int oid = pgType.oid(); + + if ( synthetic ) + return new Readable.Synthetic(datum, oid); + + return new Readable.PgXML<>(datum, oid); + } + /** * Create a new, initially empty and writable, SQLXML instance, whose * backing memory will in a transaction-scoped PostgreSQL memory context. */ - static SQLXML newWritable() + public static SQLXML newWritable() { return doInPG(() -> _newWritable()); } @@ -343,12 +374,12 @@ static SQLXML newWritable() * @param sx The SQLXML object to be adopted. * @param oid The PostgreSQL type ID the native code is expecting; * see Readable.adopt for why that can matter. - * @return The underlying {@code VarlenaWrapper} (which has its own + * @return The underlying {@code Datum} (which has its own * {@code adopt} method the native code will call next. * @throws SQLException if this {@code SQLXML} instance is not in the * proper state to be adoptable. */ - private static VarlenaWrapper adopt(SQLXML sx, int oid) throws SQLException + private static Datum adopt(SQLXML sx, int oid) throws SQLException { if ( sx instanceof Readable.PgXML || sx instanceof Writable ) return ((SQLXMLImpl)sx).adopt(oid); @@ -364,15 +395,15 @@ private static VarlenaWrapper adopt(SQLXML sx, int oid) throws SQLException /** * Allow native code to claim complete control over the - * underlying {@code VarlenaWrapper} and dissociate it from Java. + * underlying {@code Datum} and dissociate it from Java. * @param oid The PostgreSQL type ID the native code is expecting; * see Readable.adopt for why that can matter. - * @return The underlying {@code VarlenaWrapper} (which has its own + * @return The underlying {@code Datum} (which has its own * {@code adopt} method the native code will call next. * @throws SQLException if this {@code SQLXML} instance is not in the * proper state to be adoptable. */ - protected abstract VarlenaWrapper adopt(int oid) throws SQLException; + protected abstract Datum adopt(int oid) throws SQLException; /** * Return a description of this object useful for debugging (not the raw @@ -402,7 +433,7 @@ protected String toString(Object o) @SuppressWarnings("unchecked") // javac 24 first to warn here V backing = (V)s_backingVH.getAcquire(this); if ( null != backing ) - return backing.toString(o); + return ((DatumImpl)backing).toString(o); Class c = o.getClass(); String cn = c.getCanonicalName(); int pnl = c.getPackageName().length(); @@ -512,7 +543,7 @@ static InputStream correctedDeclStream( int markLimit = 1048576; // don't assume a markable stream's economical if ( ! is.markSupported() ) is = new BufferedInputStream(is); - else if ( is instanceof VarlenaWrapper ) // a VarlenaWrapper is, though + else if ( is instanceof Datum ) // a Datum is, though markLimit = Integer.MAX_VALUE; InputStream msis = new MarkableSequenceInputStream(pfis, rais, is); @@ -695,13 +726,13 @@ private static boolean useWrappingElement(InputStream is, Reader r) - static abstract class Readable + static abstract class Readable extends SQLXMLImpl { private static final VarHandle s_readableVH; protected volatile boolean m_readable = true; protected final int m_pgTypeID; - protected Charset m_serverCS = implServerCharset(); + protected Charset m_serverCS = SERVER_ENCODING.charset(); protected boolean m_wrapped = false; static @@ -720,25 +751,17 @@ static abstract class Readable /** * Create a readable instance, when called by native code (the * constructor is otherwise private, after all), passing an initialized - * {@code VarlenaWrapper} and the PostgreSQL type ID from which it has + * {@code Datum} and the PostgreSQL type ID from which it has * been created. - * @param vwi The already-created wrapper for reading the varlena from + * @param di The already-created wrapper for reading the varlena from * native memory. * @param oid The PostgreSQL type ID from which this instance is being * created (for why it matters, see {@code adopt}). */ - private Readable(V vwi, int oid) throws SQLException + private Readable(V di, int oid) throws SQLException { - super(vwi); + super(di); m_pgTypeID = oid; - if ( null == m_serverCS ) - { - free(); - throw new SQLFeatureNotSupportedException("SQLXML: no Java " + - "Charset found to match server encoding; perhaps set " + - "org.postgresql.server.encoding system property to a " + - "valid Java charset name for the same encoding?", "0A000"); - } } private V backingAndClearReadable() throws SQLException @@ -937,17 +960,17 @@ public T getSource(Class sourceClass) protected String toString(Object o) { return String.format("%s %sreadable %swrapped", - super.toString(o), (boolean)s_readableVH.getAcquire() + super.toString(o), (boolean)s_readableVH.getAcquire(this) ? "" : "not ", m_wrapped ? "" : "not "); } - static class PgXML - extends Readable + static class PgXML + extends Readable { - private PgXML(VarlenaWrapper.Input vwi, int oid) + private PgXML(Datum.Input di, int oid) throws SQLException { - super(vwi.new Stream(), oid); + super(di.inputStream(), oid); } /** @@ -979,18 +1002,17 @@ private PgXML(VarlenaWrapper.Input vwi, int oid) * with the PostgreSQL types. */ @Override - protected VarlenaWrapper adopt(int oid) throws SQLException + protected Datum adopt(int oid) throws SQLException { - VarlenaWrapper.Input.Stream vw = (VarlenaWrapper.Input.Stream) - s_backingVH.getAndSet(this, null); + T is = (T)s_backingVH.getAndSet(this, null); if ( ! (boolean)s_readableVH.getAcquire(this) ) throw new SQLNonTransientException( "SQLXML object has already been read from", "55000"); - if ( null == vw ) + if ( null == is ) backingIfNotFreed(); /* shorthand to throw the exception */ if ( m_pgTypeID != oid ) - vw.verify(new Verifier()); - return vw; + is.verify(new Verifier()::verify); + return is; } /* @@ -1000,7 +1022,7 @@ protected VarlenaWrapper adopt(int oid) throws SQLException */ @Override protected InputStream toBinaryStream( - VarlenaWrapper.Input.Stream backing, boolean neverWrap) + T backing, boolean neverWrap) throws SQLException, IOException { boolean[] wrapped = { false }; @@ -1012,7 +1034,7 @@ protected InputStream toBinaryStream( @Override protected Reader toCharacterStream( - VarlenaWrapper.Input.Stream backing, boolean neverWrap) + T backing, boolean neverWrap) throws SQLException, IOException { InputStream is = toBinaryStream(backing, neverWrap); @@ -1021,7 +1043,7 @@ protected Reader toCharacterStream( @Override protected Adjusting.XML.SAXSource toSAXSource( - VarlenaWrapper.Input.Stream backing) + T backing) throws SQLException, SAXException, IOException { InputStream is = toBinaryStream(backing, false); @@ -1030,7 +1052,7 @@ protected Adjusting.XML.SAXSource toSAXSource( @Override protected Adjusting.XML.StAXSource toStAXSource( - VarlenaWrapper.Input.Stream backing) + T backing) throws SQLException, XMLStreamException, IOException { InputStream is = toBinaryStream(backing, false); @@ -1039,7 +1061,7 @@ protected Adjusting.XML.StAXSource toStAXSource( @Override protected Adjusting.XML.DOMSource toDOMSource( - VarlenaWrapper.Input.Stream backing) + T backing) throws SQLException, SAXException, IOException, ParserConfigurationException @@ -1051,19 +1073,19 @@ protected Adjusting.XML.DOMSource toDOMSource( static class Synthetic extends Readable { - private Synthetic(VarlenaWrapper.Input vwi, int oid) + private Synthetic(Datum.Input di, int oid) throws SQLException { - super(xmlRenderer(oid, vwi), oid); + super(xmlRenderer(oid, di), oid); } private static VarlenaXMLRenderer xmlRenderer( - int oid, VarlenaWrapper.Input vwi) + int oid, Datum.Input di) throws SQLException { switch ( oid ) { - case PG_NODE_TREEOID: return new PgNodeTreeAsXML(vwi); + case PG_NODE_TREEOID: return new PgNodeTreeAsXML(di); default: throw new SQLNonTransientException( "no synthetic SQLXML support for Oid " + oid, "0A000"); @@ -1071,7 +1093,7 @@ private static VarlenaXMLRenderer xmlRenderer( } @Override - protected VarlenaWrapper adopt(int oid) throws SQLException + protected Datum adopt(int oid) throws SQLException { throw new SQLFeatureNotSupportedException( "adopt() on a synthetic SQLXML not yet supported", "0A000"); @@ -1105,6 +1127,7 @@ protected Adjusting.XML.SAXSource toSAXSource( return new AdjustingSAXSource(backing, new InputSource()); } + @Override protected Adjusting.XML.StAXSource toStAXSource( VarlenaXMLRenderer backing) throws SQLException, XMLStreamException, IOException @@ -1114,6 +1137,7 @@ protected Adjusting.XML.StAXSource toStAXSource( "0A000"); } + @Override protected Adjusting.XML.DOMSource toDOMSource( VarlenaXMLRenderer backing) throws @@ -1124,6 +1148,47 @@ protected Adjusting.XML.DOMSource toDOMSource( "synthetic SQLXML as DOMSource not yet supported", "0A000"); } + + /** + * Until there is better support for {@code toBinaryStream} and + * {@code toCharacterStream}, at least supply a working brute-force + * {@code toString} to support quick examination of values. + */ + @Override + public String getString() throws SQLException + { + XMLReader backing = + ((Readable)this) + .backingAndClearReadable(); + if ( null == backing ) + throw new SQLNonTransientException( + "Attempted use of getString on " + + "an unreadable SQLXML object", "55000"); + + SAXTransformerFactory saxtf = (SAXTransformerFactory) + SAXTransformerFactory.newDefaultInstance(); + try + { + TransformerHandler th = saxtf.newTransformerHandler(); + StringWriter w = new StringWriter(); + th.setResult(new StreamResult(w)); + + backing.setContentHandler(th); + backing.setDTDHandler(th); + backing.setProperty( + SAX2PROPERTY.LEXICAL_HANDLER.propertyUri(), th); + backing.parse(new InputSource()); + return w.toString(); + } + catch ( TransformerConfigurationException | IOException | + SAXException e ) + { + /* + * None of the above should really happen here. + */ + throw unchecked(e); + } + } } } @@ -1232,7 +1297,7 @@ static class Writable extends SQLXMLImpl { private static final VarHandle s_writableVH; private volatile boolean m_writable = true; - private Charset m_serverCS = implServerCharset(); + private Charset m_serverCS = SERVER_ENCODING.charset(); private DOMResult m_domResult; static @@ -1251,18 +1316,6 @@ static class Writable extends SQLXMLImpl private Writable(VarlenaWrapper.Output vwo) throws SQLException { super(vwo); - if ( null == m_serverCS ) - { - try - { - vwo.free(); - } - catch ( IOException ioe ) { } - throw new SQLFeatureNotSupportedException("SQLXML: no Java " + - "Charset found to match server encoding; perhaps set " + - "org.postgresql.server.encoding system property to a " + - "valid Java charset name for the same encoding?", "0A000"); - } } private VarlenaWrapper.Output backingAndClearWritable() @@ -1503,7 +1556,7 @@ protected VarlenaWrapper adopt(int oid) throws SQLException protected String toString(Object o) { return String.format("%s %swritable", super.toString(o), - (boolean)s_writableVH.getAcquire() ? "" : "not "); + (boolean)s_writableVH.getAcquire(this) ? "" : "not "); } } @@ -1548,7 +1601,7 @@ protected void verify(InputStream is) throws Exception { boolean[] wrapped = { false }; is = correctedDeclStream( - is, false, implServerCharset(), wrapped); + is, false, SERVER_ENCODING.charset(), wrapped); /* * The supplied XMLReader is never set up to do unwrapping, which is @@ -3477,7 +3530,7 @@ byte[] prefix(Charset serverCharset) throws IOException boolean canOmitVersion = true; // no declaration => 1.0 byte[] version = new byte[] { '1', '.', '0' }; boolean canOmitEncoding = - null == serverCharset || "UTF-8".equals(serverCharset.name()); + null == serverCharset || UTF_8.equals(serverCharset); boolean canOmitStandalone = true; byte[] parseResult = m_save.toByteArray(); @@ -3635,7 +3688,7 @@ void checkEncoding(Charset serverCharset, boolean strict) } } - if ( ! strict || "UTF-8".equals(serverCharset.name()) ) + if ( ! strict || UTF_8.equals(serverCharset) ) return; throw new SQLDataException( "XML does not declare a character set, and server encoding " + diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowReader.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowReader.java index 2b5d62cfa..7db34ad16 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowReader.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * Copyright (c) 2010, 2011 PostgreSQL Global Development Group * * All rights reserved. This program and the accompanying materials @@ -18,6 +18,7 @@ import static org.postgresql.pljava.internal.Backend.doInPG; import org.postgresql.pljava.internal.DualState; +import org.postgresql.pljava.internal.Invocation; import org.postgresql.pljava.internal.TupleDesc; /** @@ -35,10 +36,9 @@ public class SingleRowReader extends SingleRowResultSet private static class State extends DualState.SingleGuardedLong { - private State( - DualState.Key cookie, SingleRowReader srr, long ro, long hth) + private State(SingleRowReader srr, long hth) { - super(cookie, srr, ro, hth); + super(srr, Invocation.current(), hth); } /** @@ -73,18 +73,13 @@ private long getHeapTupleHeaderPtr() throws SQLException /** * Construct a {@code SingleRowReader} from a {@code HeapTupleHeader} * and a {@link TupleDesc TupleDesc}. - * @param cookie Capability obtained from native code to construct a - * {@code SingleRowReader} instance. - * @param resourceOwner Value identifying a scope in PostgreSQL during which - * the native state encapsulated here will be valid. * @param hth Native pointer to a PG {@code HeapTupleHeader} * @param tupleDesc A {@code TupleDesc}; the Java class this time. */ - public SingleRowReader(DualState.Key cookie, long resourceOwner, long hth, - TupleDesc tupleDesc) + public SingleRowReader(long hth, TupleDesc tupleDesc) throws SQLException { - m_state = new State(cookie, this, resourceOwner, hth); + m_state = new State(this, hth); m_tupleDesc = tupleDesc; } diff --git a/pljava/src/main/java/org/postgresql/pljava/mbeans/DualStateStatistics.java b/pljava/src/main/java/org/postgresql/pljava/mbeans/DualStateStatistics.java index f259bbc14..826190a8b 100644 --- a/pljava/src/main/java/org/postgresql/pljava/mbeans/DualStateStatistics.java +++ b/pljava/src/main/java/org/postgresql/pljava/mbeans/DualStateStatistics.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -30,7 +30,7 @@ public interface DualStateStatistics long getJavaUnreachable(); long getJavaReleased(); long getNativeReleased(); - long getResourceOwnerPasses(); + long getLifespanPasses(); long getReferenceQueuePasses(); long getReferenceQueueItems(); long getContendedLocks(); diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/AccessMethodImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/AccessMethodImpl.java new file mode 100644 index 000000000..ca4e89f5a --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/AccessMethodImpl.java @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.AMOID; // syscache + +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT1_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/** + * Implementation of the {@link AccessMethod AccessMethod} interface. + */ +class AccessMethodImpl extends Addressed +implements Nonshared, Named, AccessMethod +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return AMOID; + } + + /* Implementation of Named */ + + private static Simple name(AccessMethodImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.AMNAME, SIMPLE_INSTANCE); + } + + /* Implementation of AccessMethod */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + AccessMethodImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_HANDLER; + static final int SLOT_TYPE; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(AccessMethodImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(AccessMethodImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + + .withReceiverType(null) + .withDependent("handler", SLOT_HANDLER = i++) + .withDependent( "type", SLOT_TYPE = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute AMNAME; + static final Attribute AMHANDLER; + static final Attribute AMTYPE; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "amname", + "amhandler", + "amtype" + ).iterator(); + + AMNAME = itr.next(); + AMHANDLER = itr.next(); + AMTYPE = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static RegProcedure handler(AccessMethodImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") + RegProcedure p = (RegProcedure) + s.get(Att.AMHANDLER, REGPROCEDURE_INSTANCE); + return p; + } + + private static Type type(AccessMethodImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return typeFromCatalog(s.get(Att.AMTYPE, INT1_INSTANCE)); + } + + /* API methods */ + + @Override + public RegProcedure handler() + { + try + { + MethodHandle h = m_slots[SLOT_HANDLER]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Type type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (Type)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + private static Type typeFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'t': return Type.TABLE; + case (byte)'i': return Type.INDEX; + } + throw unchecked(new SQLException( + "unrecognized Type '" + (char)b + "' in catalog", + "XX000")); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/AclItem.java b/pljava/src/main/java/org/postgresql/pljava/pg/AclItem.java new file mode 100644 index 000000000..9af2f21ff --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/AclItem.java @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import static java.lang.Integer.lowestOneBit; +import static java.lang.Integer.numberOfTrailingZeros; + +import java.lang.annotation.Native; +import java.util.List; + +import java.nio.ByteBuffer; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; + +import static + org.postgresql.pljava.pg.CatalogObjectImpl.Factory.staticFormObjectId; + +import static org.postgresql.pljava.pg.ModelConstants.N_ACL_RIGHTS; +import static org.postgresql.pljava.pg.ModelConstants.PG_VERSION_NUM; + +/** + * Implementation of the {@link CatalogObject.Grant Grant} interface. + */ +public abstract class AclItem implements CatalogObject.Grant +{ + /* + * PostgreSQL defines these in include/nodes/parsenodes.h + */ + @Native static final short ACL_INSERT = 1 << 0; + @Native static final short ACL_SELECT = 1 << 1; + @Native static final short ACL_UPDATE = 1 << 2; + @Native static final short ACL_DELETE = 1 << 3; + @Native static final short ACL_TRUNCATE = 1 << 4; + @Native static final short ACL_REFERENCES = 1 << 5; + @Native static final short ACL_TRIGGER = 1 << 6; + @Native static final short ACL_EXECUTE = 1 << 7; + @Native static final short ACL_USAGE = 1 << 8; + @Native static final short ACL_CREATE = 1 << 9; + @Native static final short ACL_CREATE_TEMP = 1 << 10; + @Native static final short ACL_CONNECT = 1 << 11; + // below appearing in PG 15 + @Native static final short ACL_SET = 1 << 12; + @Native static final short ACL_ALTER_SYSTEM = 1 << 13; + // below appearing in PG 17 + @Native static final short ACL_MAINTAIN = 1 << 14; + + @Native static final int ACL_ID_PUBLIC = 0; + + @Native static final int OFFSET_ai_grantee = 0; + @Native static final int OFFSET_ai_grantor = 4; + @Native static final int OFFSET_ai_privs = 8; + + /** + * These one-letter abbreviations are to match the order of the bit masks + * declared above, following the {@code PRIVILEGE-ABBREVS-TABLE} in the + * PostgreSQL documentation, under Privileges, in the Data Definition + * chapter. + *

    + * Note that the order of the table in the documentation need not match + * the order of the bits above. This string must be ordered like the bits. + * It can also be found as {@code ACL_ALL_RIGHTS_STR} in + * {@code include/utils/acl.h}. + */ + private static final String s_abbr = "arwdDxtXUCTcsAm"; + + static + { + /* + * This is not a check for equality, because N_ACL_RIGHTS has grown + * (between PG 14 and 15, and between 16 and 17). So the string should + * include all the letters that might be used, and the assertion will + * catch if a new PG version has grown the count again. + * + * For now, assume that, in older versions, unused bits will be zero + * and we won't have to bother masking them off. + */ + assert N_ACL_RIGHTS <= s_abbr.length() : "AclItem abbreviations"; + assert + s_abbr.length() == s_abbr.codePoints().count() : "AclItem abbr BMP"; + } + + private final RegRole.Grantee m_grantee; + private final int m_grantor; // less often interesting + + protected AclItem(int grantee, int grantor) + { + m_grantee = + (RegRole.Grantee) staticFormObjectId(RegRole.CLASSID, grantee); + m_grantor = grantor; + } + + @Override public RegRole.Grantee to() + { + return m_grantee; + } + + @Override public RegRole by() + { + return staticFormObjectId(RegRole.CLASSID, m_grantor); + } + + /** + * Implementation of all non-OnRole subinterfaces of Grant. + *

    + * The distinct interfaces in the API are a type-safety veneer to help + * clients remember what privileges apply to what object types. Underneath, + * this class implements them all. + */ + public static class NonRole extends AclItem + implements + OnClass, OnNamespace, OnSetting, + CatalogObject.EXECUTE, CatalogObject.CREATE_TEMP, CatalogObject.CONNECT + { + private final int m_priv; + private final int m_goption; + + public NonRole(ByteBuffer b) + { + super(b.getInt(OFFSET_ai_grantee), b.getInt(OFFSET_ai_grantor)); + + if ( PG_VERSION_NUM < 160000 ) + { + assert OFFSET_ai_privs + Integer.BYTES == b.limit(); + int privs = b.getInt(OFFSET_ai_privs); + m_priv = (privs & 0xffff); + m_goption = (privs >>> 16); + return; + } + + assert OFFSET_ai_privs + Long.BYTES == b.limit(); + long privs = b.getLong(OFFSET_ai_privs); + m_priv = (int)(privs & 0xffffffff); + m_goption = (int)(privs >>> 32); + } + + private boolean priv(int mask) + { + return 0 != (m_priv & mask); + } + + private boolean goption(int mask) + { + return 0 != (m_goption & mask); + } + + @Override + public String toString() + { + StringBuilder sb = new StringBuilder(); + /* + * Should this not be sb.append(to().nameAsGrantee()) ? You'd think, + * but to match the text representation from PostgreSQL itself, the + * bare = is the right thing to show for public. + */ + if ( ! to().isPublic() ) + sb.append(to().name()); + sb.append('='); + int priv = m_priv; + int goption = m_goption; + while ( 0 != priv ) + { + int bit = lowestOneBit(priv); + priv ^= bit; + sb.append(s_abbr.charAt(numberOfTrailingZeros(bit))); + if ( 0 != (goption & bit) ) + sb.append('*'); + } + sb.append('/').append(by().name()); + return sb.toString(); + } + + @Override public boolean selectGranted() + { + return priv(ACL_SELECT); + } + + @Override public boolean selectGrantable() + { + return goption(ACL_SELECT); + } + + @Override public boolean insertGranted() + { + return priv(ACL_INSERT); + } + + @Override public boolean insertGrantable() + { + return goption(ACL_INSERT); + } + + @Override public boolean updateGranted() + { + return priv(ACL_UPDATE); + } + + @Override public boolean updateGrantable() + { + return goption(ACL_UPDATE); + } + + @Override public boolean referencesGranted() + { + return priv(ACL_REFERENCES); + } + + @Override public boolean referencesGrantable() + { + return goption(ACL_REFERENCES); + } + + @Override public boolean deleteGranted() + { + return priv(ACL_DELETE); + } + + @Override public boolean deleteGrantable() + { + return goption(ACL_DELETE); + } + + @Override public boolean truncateGranted() + { + return priv(ACL_TRUNCATE); + } + + @Override public boolean truncateGrantable() + { + return goption(ACL_TRUNCATE); + } + + @Override public boolean triggerGranted() + { + return priv(ACL_TRIGGER); + } + + @Override public boolean triggerGrantable() + { + return goption(ACL_TRIGGER); + } + + @Override public boolean createGranted() + { + return priv(ACL_CREATE); + } + + @Override public boolean createGrantable() + { + return goption(ACL_CREATE); + } + + @Override public boolean usageGranted() + { + return priv(ACL_USAGE); + } + + @Override public boolean usageGrantable() + { + return goption(ACL_USAGE); + } + + @Override public boolean executeGranted() + { + return priv(ACL_EXECUTE); + } + + @Override public boolean executeGrantable() + { + return goption(ACL_EXECUTE); + } + + @Override public boolean create_tempGranted() + { + return priv(ACL_CREATE_TEMP); + } + + @Override public boolean create_tempGrantable() + { + return goption(ACL_CREATE_TEMP); + } + + @Override public boolean connectGranted() + { + return priv(ACL_CONNECT); + } + + @Override public boolean connectGrantable() + { + return goption(ACL_CONNECT); + } + + @Override public boolean setGranted() + { + return priv(ACL_SET); + } + + @Override public boolean setGrantable() + { + return goption(ACL_SET); + } + + @Override public boolean alterSystemGranted() + { + return priv(ACL_ALTER_SYSTEM); + } + + @Override public boolean alterSystemGrantable() + { + return goption(ACL_ALTER_SYSTEM); + } + + @Override public boolean maintainGranted() + { + return priv(ACL_MAINTAIN); + } + + @Override public boolean maintainGrantable() + { + return goption(ACL_MAINTAIN); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/AttributeImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/AttributeImpl.java new file mode 100644 index 000000000..e9c923f7e --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/AttributeImpl.java @@ -0,0 +1,1054 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import static java.util.Objects.requireNonNull; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.Checked; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.SwitchPointCache.setConstant; + +import org.postgresql.pljava.model.*; +import static org.postgresql.pljava.model.MemoryContext.JavaMemoryContext; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.MemoryContextImpl.allocatingIn; +import static org.postgresql.pljava.pg.ModelConstants.*; +import static org.postgresql.pljava.pg.TupleDescImpl.Blessed; +import static org.postgresql.pljava.pg.TupleDescImpl.Ephemeral; +import static org.postgresql.pljava.pg.TupleTableSlotImpl.heapTupleGetLightSlot; + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import org.postgresql.pljava.pg.adt.NameAdapter; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCOLLATION_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.*; + +import org.postgresql.pljava.annotation.BaseUDT.Alignment; +import org.postgresql.pljava.annotation.BaseUDT.Storage; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/** + * Implementation of the {@link Attribute Attribute} interface. + */ +abstract class AttributeImpl extends Addressed +implements + Nonshared, Named, + AccessControlled, Attribute +{ + // syscache id is ATTNUM; two key components: attrelid, attnum + // remember to account for ATTRIBUTE_FIXED_PART_SIZE when from tupledesc + + abstract SwitchPoint cacheSwitchPoint(); + + private static final Function s_initializer; + + /* Implementation of CatalogObject */ + + @Override + public > T of(RegClass.Known c) + { + throw new UnsupportedOperationException("of() on an Attribute"); + } + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return RegClass.CLASSID; + } + + /** + * Overrides {@code cacheDescriptor} to correctly return the descriptor + * for {@code pg_attribute}. + *

    + * Because of the unusual addressing scheme for attributes, where + * the {@code classId} refers to {@code pg_class}, the inherited method + * would return the wrong descriptor. + */ + @Override + TupleDescriptor cacheDescriptor() + { + return CLASS.tupleDescriptor(); + } + + /** + * An attribute exists for as long as it has a non-invalidated containing + * tuple descriptor that can supply a byte buffer, whether or not it appears + * in the catalog. + */ + @Override + public boolean exists() + { + try + { + return null != rawBuffer(); + } + catch ( IllegalStateException e ) + { + return false; + } + } + + /** + * Fetch the entire tuple for this attribute from the PG {@code syscache}. + *

    + * The containing {@code TupleDescriptor} supplies a + * {@link #partialTuple partialTuple} covering the first + * {@code ATTRIBUTE_FIXED_PART_SIZE} bytes of this, where most often-needed + * properties are found, so this will be called only on requests for + * the properties that aren't found in that prefix. + */ + private static TupleTableSlot cacheTuple(AttributeImpl o) + { + ByteBuffer heapTuple; + + /* + * See this method in CatalogObjectImpl.Addressed for more on the choice + * of memory context and lifespan. + */ + try ( Checked.AutoCloseable ac = + allocatingIn(JavaMemoryContext()) ) + { + heapTuple = _searchSysCacheCopy2(ATTNUM, o.oid(), o.subId()); + if ( null == heapTuple ) + return null; + } + return heapTupleGetLightSlot(o.cacheDescriptor(), heapTuple, null); + } + + /* + * The super implementation nulls the TUPLE slot permanently; this + * class has RAWBUFFER and PARTIALTUPLE slots used similarly, so null those + * too. Transient will in turn override this and null nothing at all; its + * instances have the invalid Oid as a matter of course. + * + * It may well be that no circumstances exist where this version is called. + */ + @Override + void makeInvalidInstance(MethodHandle[] slots) + { + super.makeInvalidInstance(slots); + setConstant(slots, SLOT_RAWBUFFER, null); + setConstant(slots, SLOT_PARTIALTUPLE, null); + } + + /* Implementation of Named and AccessControlled */ + + private static Simple name(AttributeImpl o) throws SQLException + { + TupleTableSlot t = o.partialTuple(); + return + t.get(t.descriptor().sqlGet(Anum_pg_attribute_attname), + NameAdapter.SIMPLE_INSTANCE); + } + + private static List grants(AttributeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.ATTACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of Attribute */ + + AttributeImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_RAWBUFFER; + static final int SLOT_PARTIALTUPLE; + + static final int SLOT_TYPE; + static final int SLOT_LENGTH; + static final int SLOT_DIMENSIONS; + // static final int SLOT_CACHEDOFFSET; -- read fresh every time, no slot + static final int SLOT_BYVALUE; + static final int SLOT_ALIGNMENT; + static final int SLOT_STORAGE; + // static final int SLOT_COMPRESSION; -- add this + static final int SLOT_NOTNULL; + static final int SLOT_HASDEFAULT; + static final int SLOT_HASMISSING; + static final int SLOT_IDENTITY; + static final int SLOT_GENERATED; + static final int SLOT_DROPPED; + static final int SLOT_LOCAL; + static final int SLOT_INHERITANCECOUNT; + static final int SLOT_COLLATION; + static final int SLOT_OPTIONS; + static final int SLOT_FDWOPTIONS; + // static final int SLOT_MISSINGVALUE; -- add this + + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(AttributeImpl.class) + .withLookup(lookup()) + .withSwitchPoint(AttributeImpl::cacheSwitchPoint) + .withSlots(o -> o.m_slots) + .withCandidates(AttributeImpl.class.getDeclaredMethods()) + + /* + * First declare some slots whose consuming API methods are found + * on inherited interfaces. This requires some adjustment of method + * types so that run-time adaptation isn't needed. + */ + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withReturnType(null) // cancel adjustment from above + .withDependent( "grants", SLOT_ACL) + + /* + * Next come slots where the compute and API methods are here. + */ + .withReceiverType(null) + .withDependent( "rawBuffer", SLOT_RAWBUFFER = i++) + .withDependent("partialTuple", SLOT_PARTIALTUPLE = i++) + + .withDependent( "type", SLOT_TYPE = i++) + .withDependent( "length", SLOT_LENGTH = i++) + .withDependent( "dimensions", SLOT_DIMENSIONS = i++) + .withDependent( "byValue", SLOT_BYVALUE = i++) + .withDependent( "alignment", SLOT_ALIGNMENT = i++) + .withDependent( "storage", SLOT_STORAGE = i++) + .withDependent( "notNull", SLOT_NOTNULL = i++) + .withDependent( "hasDefault", SLOT_HASDEFAULT = i++) + .withDependent( "hasMissing", SLOT_HASMISSING = i++) + .withDependent( "identity", SLOT_IDENTITY = i++) + .withDependent( "generated", SLOT_GENERATED = i++) + .withDependent( "dropped", SLOT_DROPPED = i++) + .withDependent( "local", SLOT_LOCAL = i++) + .withDependent("inheritanceCount", SLOT_INHERITANCECOUNT = i++) + .withDependent( "collation", SLOT_COLLATION = i++) + .withDependent( "options", SLOT_OPTIONS = i++) + .withDependent( "fdwoptions", SLOT_FDWOPTIONS = i++) + + .build(); + NSLOTS = i; + } + + static class Att + { + static final Attribute ATTACL; + static final Attribute ATTNDIMS; + static final Attribute ATTSTORAGE; + static final Attribute ATTHASDEF; + static final Attribute ATTHASMISSING; + static final Attribute ATTIDENTITY; + static final Attribute ATTGENERATED; + static final Attribute ATTISLOCAL; + static final Attribute ATTINHCOUNT; + static final Attribute ATTCOLLATION; + static final Attribute ATTOPTIONS; + static final Attribute ATTFDWOPTIONS; + + static + { + Iterator itr = CLASS.tupleDescriptor().project( + "attacl", + "attndims", + "attstorage", + "atthasdef", + "atthasmissing", + "attidentity", + "attgenerated", + "attislocal", + "attinhcount", + "attcollation", + "attoptions", + "attfdwoptions" + ).iterator(); + + ATTACL = itr.next(); + ATTNDIMS = itr.next(); + ATTSTORAGE = itr.next(); + ATTHASDEF = itr.next(); + ATTHASMISSING = itr.next(); + ATTIDENTITY = itr.next(); + ATTGENERATED = itr.next(); + ATTISLOCAL = itr.next(); + ATTINHCOUNT = itr.next(); + ATTCOLLATION = itr.next(); + ATTOPTIONS = itr.next(); + ATTFDWOPTIONS = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + /** + * Obtain the raw, heap-formatted readable byte buffer over this attribute. + *

    + * Because this is the {@code AttributeImpl} class, a few of the critical + * properties will be read directly via ByteBuffer methods, rather than + * using the {@code TupleTableSlot.get} API where a working + * {@code Attribute} must be supplied. + *

    + * The raw buffer is what the containing {@code TupleDescImpl} supplies, and + * it cuts off at {@code ATTRIBUTE_FIXED_PART_SIZE}. Retrieving properties + * beyond that point will require using {@code cacheTuple()} to fetch + * the whole tuple from the {@code syscache}. + */ + private static ByteBuffer rawBuffer(AttributeImpl o) + { + return + ((TupleDescImpl)o.containingTupleDescriptor()).slice(o.subId() - 1); + } + + /** + * A {@code TupleTableSlot} formed over the {@link #rawBuffer rawBuffer}, + * which holds only the first {@code ATTRIBUTE_FIXED_PART_SIZE} bytes of + * the full {@code pg_attribute} tuple. + *

    + * Supports the regular {@code TupleTableSlot.get} API for most properties + * (the ones that appear in the first {@code ATTRIBUTE_FIXED_PART_SIZE} + * bytes, and aren't needed for {@code TupleTableSlot.get} itself to work). + */ + private static TupleTableSlot partialTuple(AttributeImpl o) + { + return new TupleTableSlotImpl.Heap( + CLASS, o.cacheDescriptor(), o.rawBuffer(), null); + } + + private static RegType type(AttributeImpl o) + { + ByteBuffer b = o.rawBuffer(); + assert 4 == SIZEOF_pg_attribute_atttypid : "sizeof atttypid changed"; + assert 4 == SIZEOF_pg_attribute_atttypmod : "sizeof atttypmod changed"; + return + CatalogObjectImpl.Factory.formMaybeModifiedType( + b.getInt(OFFSET_pg_attribute_atttypid), + b.getInt(OFFSET_pg_attribute_atttypmod)); + } + + private static short length(AttributeImpl o) + { + ByteBuffer b = o.rawBuffer(); + assert 2 == SIZEOF_pg_attribute_attlen : "sizeof attlen changed"; + return b.getShort(OFFSET_pg_attribute_attlen); + } + + private static int dimensions(AttributeImpl o) throws SQLException + { + TupleTableSlot s = o.partialTuple(); + return s.get(Att.ATTNDIMS, INT4_INSTANCE); + } + + private static boolean byValue(AttributeImpl o) + { + ByteBuffer b = o.rawBuffer(); + assert 1 == SIZEOF_pg_attribute_attbyval : "sizeof attbyval changed"; + return 0 != b.get(OFFSET_pg_attribute_attbyval); + } + + private static Alignment alignment(AttributeImpl o) + { + ByteBuffer b = o.rawBuffer(); + assert 1 == SIZEOF_pg_attribute_attalign : "sizeof attalign changed"; + return alignmentFromCatalog(b.get(OFFSET_pg_attribute_attalign)); + } + + private static Storage storage(AttributeImpl o) throws SQLException + { + TupleTableSlot s = o.partialTuple(); + return + storageFromCatalog( + s.get(Att.ATTSTORAGE, INT1_INSTANCE)); + } + + private static boolean notNull(AttributeImpl o) + { + ByteBuffer b = o.rawBuffer(); + assert + 1 == SIZEOF_pg_attribute_attnotnull : "sizeof attnotnull changed"; + return 0 != b.get(OFFSET_pg_attribute_attnotnull); + } + + private static boolean hasDefault(AttributeImpl o) throws SQLException + { + TupleTableSlot s = o.partialTuple(); + return s.get(Att.ATTHASDEF, BOOLEAN_INSTANCE); + } + + private static boolean hasMissing(AttributeImpl o) throws SQLException + { // not 9.5 + TupleTableSlot s = o.partialTuple(); + return s.get(Att.ATTHASMISSING, BOOLEAN_INSTANCE); + } + + private static Identity identity(AttributeImpl o) throws SQLException + { // not 9.5 + TupleTableSlot s = o.partialTuple(); + byte v = s.get(Att.ATTIDENTITY, INT1_INSTANCE); + return identityFromCatalog(v); + } + + private static Generated generated(AttributeImpl o) throws SQLException + { // not 9.5 + TupleTableSlot s = o.partialTuple(); + byte v = s.get(Att.ATTGENERATED, INT1_INSTANCE); + return generatedFromCatalog(v); + } + + private static boolean dropped(AttributeImpl o) + { + ByteBuffer b = o.rawBuffer(); + assert + 1 == SIZEOF_pg_attribute_attisdropped + : "sizeof attisdropped changed"; + return 0 != b.get(OFFSET_pg_attribute_attisdropped); + } + + private static boolean local(AttributeImpl o) throws SQLException + { + TupleTableSlot s = o.partialTuple(); + return s.get(Att.ATTISLOCAL, BOOLEAN_INSTANCE); + } + + private static int inheritanceCount(AttributeImpl o) throws SQLException + { + TupleTableSlot s = o.partialTuple(); + return s.get(Att.ATTINHCOUNT, INT4_INSTANCE); + } + + private static RegCollation collation(AttributeImpl o) throws SQLException + { + TupleTableSlot s = o.partialTuple(); + return s.get(Att.ATTCOLLATION, REGCOLLATION_INSTANCE); + } + + private static Map options(AttributeImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ATTOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + + private static Map fdwoptions(AttributeImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ATTFDWOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + + /* private methods using cache slots like API methods do */ + + private ByteBuffer rawBuffer() + { + try + { + MethodHandle h = m_slots[SLOT_RAWBUFFER]; + return (ByteBuffer)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + private TupleTableSlot partialTuple() + { + try + { + MethodHandle h = m_slots[SLOT_PARTIALTUPLE]; + return (TupleTableSlot)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* API methods */ + + @Override + public RegType type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public short length() + { + try + { + MethodHandle h = m_slots[SLOT_LENGTH]; + return (short)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public int dimensions() + { + try + { + MethodHandle h = m_slots[SLOT_DIMENSIONS]; + return (int)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean byValue() + { + try + { + MethodHandle h = m_slots[SLOT_BYVALUE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Alignment alignment() + { + try + { + MethodHandle h = m_slots[SLOT_ALIGNMENT]; + return (Alignment)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Storage storage() + { + try + { + MethodHandle h = m_slots[SLOT_STORAGE]; + return (Storage)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean notNull() + { + try + { + MethodHandle h = m_slots[SLOT_NOTNULL]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasDefault() + { + try + { + MethodHandle h = m_slots[SLOT_HASDEFAULT]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasMissing() // not 9.5 + { + try + { + MethodHandle h = m_slots[SLOT_HASMISSING]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Identity identity() // not 9.5 + { + try + { + MethodHandle h = m_slots[SLOT_IDENTITY]; + return (Identity)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Generated generated() // not 9.5 + { + try + { + MethodHandle h = m_slots[SLOT_GENERATED]; + return (Generated)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean dropped() + { + try + { + MethodHandle h = m_slots[SLOT_HASMISSING]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean local() + { + try + { + MethodHandle h = m_slots[SLOT_LOCAL]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public int inheritanceCount() + { + try + { + MethodHandle h = m_slots[SLOT_INHERITANCECOUNT]; + return (int)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegCollation collation() + { + try + { + MethodHandle h = m_slots[SLOT_COLLATION]; + return (RegCollation)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Map options() + { + try + { + MethodHandle h = m_slots[SLOT_OPTIONS]; + return (Map)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Map fdwoptions() + { + try + { + MethodHandle h = m_slots[SLOT_FDWOPTIONS]; + return (Map)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + // missingValue + + @Override + public TupleDescriptor containingTupleDescriptor() + { + return relation().tupleDescriptor(); + } + + /** + * An attribute that belongs to a full-fledged cataloged composite type. + *

    + * It holds a reference to the relation that defines the composite type + * layout. While that can always be found from the class and object IDs + * of the object address, that is too much fuss for as often as + * {@code relation()} is called. + */ + static class Cataloged extends AttributeImpl + { + private final RegClassImpl m_relation; + + Cataloged(RegClassImpl relation) + { + m_relation = requireNonNull(relation); + } + + @Override + SwitchPoint cacheSwitchPoint() + { + return m_relation.cacheSwitchPoint(); + } + + @Override + public RegClass relation() + { + return m_relation; + } + } + + /** + * An attribute that belongs to a transient {@code TupleDescriptor}, not + * to any relation in the catalog (and therefore isn't really + * a {@code CatalogObject}, though it still pretends to be one). + *

    + * For now, this is simply a subclass of {@code AttributeImpl} to inherit + * most of the same machinery, and simply overrides and disables the methods + * of a real {@code CatalogObject}. In an alternative, it could be an + * independent implementation of the {@code Attribute} interface, but that + * could require more duplication of implementation. A cost of this + * implementation is that every instance will carry around one unused + * {@code CatalogObjectImpl.m_objectAddress} field. + */ + static class Transient extends AttributeImpl + { + private static final RegClass s_invalidClass = + CatalogObjectImpl.Factory.staticFormObjectId( + RegClass.CLASSID, InvalidOid); + + private final TupleDescriptor m_containingTupleDescriptor; + private final int m_attnum; + + SwitchPoint cacheSwitchPoint() + { + return + ((RegTypeImpl)m_containingTupleDescriptor.rowType()) + .cacheSwitchPoint(); + } + + Transient(TupleDescriptor td, int attnum) + { + m_containingTupleDescriptor = requireNonNull(td); + assert 0 < attnum : "nonpositive attnum in transient attribute"; + m_attnum = attnum; + } + + /* + * Do no nulling of slots (not even what the superclass method does) + * when created with the invalid Oid. *All* Transient instances have + * the invalid Oid! + */ + @Override + void makeInvalidInstance(MethodHandle[] slots) + { + } + + @Override + public int oid() + { + return InvalidOid; + } + + @Override + public int classOid() + { + return RegClass.CLASSID.oid(); + } + + @Override + public int subId() + { + return m_attnum; + } + + /** + * Returns true for an attribute of a transient {@code TupleDescriptor}, + * even though {@code oid()} will return {@code InvalidOid}. + *

    + * It's not clear any other convention would be less weird. + */ + @Override + public boolean isValid() + { + return true; + } + + /** + * Equality test for {@code AttributeImpl.Transient}. + *

    + * Instances of {@code Transient} are used both in + * {@link Ephemeral Ephemeral} tuple descriptors and in + * {@link Blessed Blessed} ones. This method will not treat as equal + * attributes belonging to any two distinct ephemeral descriptors, and + * naturally the attribute at this attribute's position in this + * attribute's containing descriptor can only be this attribute, so + * reference equality is necessary and sufficient. + *

    + * Reference equality also suffices for the {@code Blessed} case, as + * PostgreSQL at present keeps such row types unique for the life of the + * backend and does not invalidate them; the first + * {@code TupleDescriptor} returned by the corresponding {@code RegType} + * will therefore be the only one it can return. + *

    + * Should the (weakly cached) {@code RegType} instance be GC'd and a + * new one later instantiated for the same row type, a different tuple + * descriptor could result, but a {@code TupleDescriptorImpl.Blessed} + * holds a strong reference to its row type, which therefore can't go + * unreachable until the tuple descriptor has also; at any given time + * there can be no more than one in play. + */ + @Override + public boolean equals(Object other) + { + return this == other; + } + + /** + * Hash code for {@code AttributeImpl.Transient}. + *

    + * As reference equality is used for the {@code equals} test, + * {@code System.identityHashCode} is used here. + */ + @Override + public int hashCode() + { + return System.identityHashCode(this); + } + + @Override + public RegClass relation() + { + return s_invalidClass; + } + + @Override + public TupleDescriptor containingTupleDescriptor() + { + return m_containingTupleDescriptor; + } + } + + /** + * A transient attribute belonging to a synthetic tuple descriptor with + * one element of a specified {@code RegType}. + *

    + * Such a singleton tuple descriptor allows the {@code TupleTableSlot} API + * to be used as-is for related applications like array element access. + *

    + * Most methods simply delegate to the associated RegType. + */ + static class OfType extends Transient + { + private final RegType m_type; + + OfType(TupleDescriptor td, RegType type) + { + super(td, 1); + m_type = requireNonNull(type); + } + + @Override + public boolean exists() + { + return true; + } + + @Override + public Simple name() + { + return Simple.None.INSTANCE; + } + + @Override + public RegType type() + { + return m_type; + } + + @Override + public short length() + { + return m_type.length(); + } + + @Override + public int dimensions() + { + return m_type.dimensions(); + } + + @Override + public boolean byValue() + { + return m_type.byValue(); + } + + @Override + public Alignment alignment() + { + return m_type.alignment(); + } + + @Override + public Storage storage() + { + return m_type.storage(); + } + + @Override + public boolean notNull() + { + return m_type.notNull(); + } + + @Override + public boolean hasDefault() + { + return false; + } + + @Override + public boolean hasMissing() // not 9.5 + { + return false; + } + + @Override + public Identity identity() // not 9.5 + { + return Identity.INAPPLICABLE; + } + + @Override + public Generated generated() // not 9.5 + { + return Generated.INAPPLICABLE; + } + + @Override + public boolean dropped() + { + return false; + } + + @Override + public boolean local() + { + return true; + } + + @Override + public int inheritanceCount() + { + return 0; + } + + @Override + public RegCollation collation() + { + return m_type.collation(); + } + } + + private static Identity identityFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'\0': return Identity.INAPPLICABLE; + case (byte) 'a': return Identity.GENERATED_ALWAYS; + case (byte) 'd': return Identity.GENERATED_BY_DEFAULT; + } + throw unchecked(new SQLException( + "unrecognized Identity '" + (char)b + "' in catalog", "XX000")); + } + + private static Generated generatedFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'\0': return Generated.INAPPLICABLE; + case (byte) 's': return Generated.STORED; + } + throw unchecked(new SQLException( + "unrecognized Generated '" + (char)b + "' in catalog", "XX000")); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/CatalogObjectImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/CatalogObjectImpl.java new file mode 100644 index 000000000..bdb3244fe --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/CatalogObjectImpl.java @@ -0,0 +1,1620 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.TargetList.Projection; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.CacheMap; +import org.postgresql.pljava.internal.Checked; +import org.postgresql.pljava.internal.DualState; // for javadoc +import org.postgresql.pljava.internal.Invocation; +import org.postgresql.pljava.internal.SwitchPointCache; // for javadoc +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.SwitchPointCache.setConstant; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.adt.Array.AsFlatList; +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.model.*; +import org.postgresql.pljava.model.RegType.Unresolved; // for javadoc +import static org.postgresql.pljava.model.MemoryContext.JavaMemoryContext; + +import static org.postgresql.pljava.pg.MemoryContextImpl.allocatingIn; +import org.postgresql.pljava.pg.ModelConstants; +import static org.postgresql.pljava.pg.ModelConstants.PG_VERSION_NUM; +import static org.postgresql.pljava.pg.ModelConstants.LANGOID; +import static org.postgresql.pljava.pg.ModelConstants.PROCOID; +import static org.postgresql.pljava.pg.ModelConstants.TRFOID; +import static org.postgresql.pljava.pg.ModelConstants.TYPEOID; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_LONG; +import static org.postgresql.pljava.pg.TupleTableSlotImpl.heapTupleGetLightSlot; + +import org.postgresql.pljava.pg.adt.ArrayAdapter; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCLASS_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import org.postgresql.pljava.pg.adt.Primitives; +import org.postgresql.pljava.pg.adt.TextAdapter; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +import java.io.IOException; + +import java.lang.annotation.Native; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import static java.lang.ref.Reference.reachabilityFence; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.sql.SQLException; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.Optional; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.IntPredicate; +import java.util.function.Supplier; + +import static java.util.stream.Stream.iterate; + +/** + * Implementation of the {@link CatalogObject CatalogObject} API for the + * PL/Java case of JVM running in the PostgreSQL backend process. + */ +public class CatalogObjectImpl implements CatalogObject +{ + /** + * ByteBuffer representing the PostgreSQL object address: {@code classid}, + * {@code objid}, {@code objsubid}. + *

    + * This buffer has to be retained as a key in the lookup data structure + * anyway, so this class will keep just one reference to the buffer, and + * read the values from it as needed. + *

    + * From the moment of construction here, the buffer must be treated as + * immutable. It may not actually be immutable: there is no way to alter an + * existing ByteBuffer to be readonly, but only to obtain a readonly copy, + * and the lookup data structure may have no API to reliably replace the key + * of an entry. But no reference to it should escape the lookup structure + * and this object, where it should be treated as if it cannot be written. + */ + private final ByteBuffer m_objectAddress; + + /** + * Hold the address during construction so it can be retrieved by this + * constructor without having to fuss with it in every subclass. + *

    + * Largely a notation convenience; it can be done the longwinded way if it + * proves a bottleneck. + */ + private static final ThreadLocal + s_address = new ThreadLocal<>(); + + protected CatalogObjectImpl() // only TriggerImpl outside this file + { + ByteBuffer b = s_address.get(); + + /* + * Here is a bit of a hack. No CatalogObjectImpl should ever be without + * its address buffer, with AttributeImpl.Transient being the sole + * exception. It supplies null, and overrides all the methods that rely + * on it. Perhaps it should simply be an independent implementation of + * the Attribute interface, rather than extending this class and wasting + * the address slot, but for now, this is the way it works. + */ + if ( null != b ) + assert saneByteBuffer(b, 12, "CatalogObjectImpl address"); + else if ( ! (this instanceof AttributeImpl.Transient) ) + throw new IllegalStateException( + "CatalogObjectImpl constructed without its address buffer"); + + m_objectAddress = b; + } + + public static CatalogObject of(int objId) + { + return Factory.form(InvalidOid, objId, 0); + } + + public static > + T of(RegClass.Known classId, int objId) + { + return Factory.staticFormObjectId(classId, objId); + } + + static boolean saneByteBuffer(ByteBuffer bb, int cap, String tag) + { + assert null != bb : tag + " null"; + assert cap == bb.capacity() : tag + " unexpected size"; + assert 0 == bb.position() : tag + " unexpected position"; + assert nativeOrder() == bb.order() : tag + " unexpected byte order"; + return true; + } + + @Override + protected final CatalogObjectImpl clone() throws CloneNotSupportedException + { + throw new CloneNotSupportedException(); + } + + @Override + public int oid() + { + return m_objectAddress.getInt(4); + } + + @Override + @SuppressWarnings("unchecked") + public > T of(RegClass.Known c) + { + if ( classOid() == ((CatalogObjectImpl)c).oid() ) + return (T) this; + if ( classValid() && isValid() ) + throw new IllegalStateException(String.format( + "cannot make %s a CatalogObject of class %s", this, c)); + return Factory.staticFormObjectId(c, oid()); + } + + public int classOid() + { + return m_objectAddress.getInt(0); + } + + public int subId() + { + return m_objectAddress.getInt(8); + } + + @Override + public boolean isValid() + { + return InvalidOid != oid(); + } + + public boolean classValid() + { + return InvalidOid != classOid(); + } + + @Override + public boolean equals(Object other) + { + if ( this == other ) + return true; + if ( ! (other instanceof CatalogObjectImpl) ) + return false; + return + m_objectAddress.equals(((CatalogObjectImpl)other).m_objectAddress); + } + + @Override + public int hashCode() + { + return m_objectAddress.hashCode(); + } + + @Override + public String toString() + { + Class c = getClass(); + c = iterate(c, Objects::nonNull, Class::getSuperclass) + .flatMap(c1 -> Arrays.stream(c1.getInterfaces())) + .filter(CatalogObject.class::isAssignableFrom) + .filter(i -> CatalogObject.class.getModule().equals(i.getModule())) + .findFirst().get(); + String pfx = c.getCanonicalName(); + return pfx.substring(1 + c.getPackageName().length()) + '[' + + Integer.toUnsignedString(classOid()) + ',' + + Integer.toUnsignedString(oid()) + ',' + + Integer.toUnsignedString(subId()) + ']'; + } + + /** + * Provider of the {@link CatalogObject.Factory CatalogObject.Factory} + * service, linking the {@link org.postgresql.pljava.model} API to the + * implementations in this package. + */ + public static final class Factory extends CatalogObject.Factory + { + public Factory() { } + + /* + * Include one @Native-annotated constant here to trigger header + * generation for this class. The generated header also includes + * all the static primitive constants inherited from Factory, so + * they all can be statically checked against the PostgreSQL values + * in ModelConstants.c. + */ + @Native static final int InvalidOid = CatalogObject.InvalidOid; + + private static final CacheMap + s_map = CacheMap.newConcurrent( + () -> ByteBuffer.allocate(12).order(nativeOrder())); + + @Override + protected > RegClass.Known + formClassIdImpl(int classId, Class clazz) + { + return staticFormClassId(classId, clazz); + } + + @Override + protected > + T formObjectIdImpl( + RegClass.Known classId, int objId, IntPredicate versionTest) + { + return staticFormObjectId(classId, objId, versionTest); + } + + @Override + protected RegRole.Grantee publicGranteeImpl() + { + return (RegRole.Grantee)form(AuthIdRelationId, InvalidOid, 0); + } + + @Override + protected Database currentDatabaseImpl(RegClass.Known classId) + { + return staticFormObjectId(classId, _currentDatabase()); + } + + private static native int _currentDatabase(); + + @Override + protected CharsetEncoding serverEncoding() + { + return CharsetEncodingImpl.serverEncoding(); + } + + @Override + protected CharsetEncoding clientEncoding() + { + return CharsetEncodingImpl.clientEncoding(); + } + + @Override + protected CharsetEncoding encodingFromOrdinal(int ordinal) + { + return CharsetEncodingImpl.fromOrdinal(ordinal); + } + + @Override + protected long fetchAll() + { + switch ( SIZEOF_LONG ) + { + case Long.BYTES: return Long.MAX_VALUE; + case Integer.BYTES: return Integer.MAX_VALUE; + default: + throw new UnsupportedOperationException( + "define FETCH_ALL on platform with SIZEOF_LONG=" + + SIZEOF_LONG); + } + } + + @Override + protected CharsetEncoding encodingFromName(String name) + { + return CharsetEncodingImpl.fromName(name); + } + + @Override + protected ResourceOwner resourceOwner(int which) + { + return ResourceOwnerImpl.known(which); + } + + @Override + protected MemoryContext memoryContext(int which) + { + return MemoryContextImpl.known(which); + } + + @Override + protected MemoryContext upperMemoryContext() + { + return Invocation.upperExecutorContext(); + } + + @SuppressWarnings("unchecked") + static > RegClass.Known + staticFormClassId(int classId, Class clazz) + { + return (RegClass.Known)form(RelationRelationId, classId, 0); + } + + static > + T staticFormObjectId(RegClass.Known classId, int objId) + { + return staticFormObjectId(classId, objId, v -> true); + } + + @SuppressWarnings("unchecked") + static > + T staticFormObjectId( + RegClass.Known classId, int objId, IntPredicate versionTest) + { + return (T)form(((CatalogObjectImpl)classId).oid(), + versionTest.test(PG_VERSION_NUM) ? objId : InvalidOid, 0); + } + + @SuppressWarnings("unchecked") + static > + T findObjectId(RegClass.Known classId, int objId) + { + CacheMap.Entry e = s_map.find(k -> + k.putInt(classId.oid()).putInt(objId).putInt(0)); + if ( null == e ) + return null; + return (T)e.get(); // may be null if it's been found unreachable + } + + static void forEachValue(Consumer action) + { + s_map.forEachValue(action); + } + + static RegType formMaybeModifiedType(int typeId, int typmod) + { + if ( -1 == typmod ) + return (RegType)form(TypeRelationId, typeId, 0); + + int subId = (0 == typmod) ? -1 : typmod; + + RegType result = + (RegType)s_map.weaklyCache( + b -> b.putInt(TypeRelationId).putInt(typeId).putInt(subId), + b -> + { + if ( RECORDOID == typeId ) + return + constructWith(RegTypeImpl.Blessed::new, b); + /* + * Look up the unmodified base type. This is a plain + * find(), not a cache(), because ConcurrentHashMap's + * computeIfAbsent contract requires that the action + * "must not attempt to update any other mappings of + * this map." If not found, we will have to return null + * from this attempt, then retry after caching the base. + */ + CacheMap.Entry e = s_map.find(k -> + k.putInt(TypeRelationId).putInt(typeId).putInt(0)); + if ( null == e ) + return null; + RegTypeImpl.NoModifier base = + (RegTypeImpl.NoModifier)e.get(); + if ( null == base ) // e isn't a strong reference + return null; + + return constructWith( + () -> new RegTypeImpl.Modified(base), b); + } + ); + + if ( null != result ) + return result; + + RegTypeImpl.NoModifier base = + (RegTypeImpl.NoModifier)form(TypeRelationId, typeId, 0); + + return + (RegType)s_map.weaklyCache( + b -> b.putInt(TypeRelationId).putInt(typeId).putInt(subId), + b -> constructWith( + () -> new RegTypeImpl.Modified(base), b)); + } + + static CatalogObject form(int classId, int objId, int objSubId) + { + assert classId != TypeRelationId || 0 == objSubId : + "nonzero objSubId passed to form() for a type"; + + /* + * As attributes aren't built here anymore, there is now no valid + * use of this method with a nonzero objSubId. See formAttribute. + */ + if ( 0 != objSubId ) + throw new UnsupportedOperationException( + "CatalogObjectImpl.Factory.form with nonzero objSubId"); + + Supplier ctor = + Optional.ofNullable(ctorIfKnown(classId, objId, objSubId)) + .orElseGet(() -> + InvalidOid == classId + ? CatalogObjectImpl::new : Addressed::new); + + return + s_map.weaklyCache( + b -> b.putInt(classId).putInt(objId).putInt(objSubId), + b -> constructWith(ctor, b) + ); + } + + /** + * Called only by {@code TupleDescImpl}, which is the only way + * cataloged attribute instances should be formed. + *

    + * {@code TupleDescImpl} is expected and trusted to supply only valid + * (positive) attribute numbers, and a {@code Supplier} that will + * construct the attribute with a reference to its correct corresponding + * {@code RegClass} (not checked here). Because {@code TupleDescImpl} + * constructs a bunch of attributes at once, that reduces overhead. + */ + static Attribute formAttribute( + int relId, int attNum, Supplier ctor) + { + assert attNum > 0 : "formAttribute attribute number validity"; + return (Attribute) + s_map.weaklyCache( + b -> b.putInt(RelationRelationId) + .putInt(relId).putInt(attNum), + b -> constructWith(ctor, b) + ); + } + + /** + * Invokes a supplied {@code CatalogObjectImpl} constructor, with the + * {@code ByteBuffer} containing its address in thread-local storage, + * so it isn't necessary for all constructors of all subtypes to pass + * the thing all the way up. + */ + static CatalogObjectImpl constructWith( + Supplier ctor, ByteBuffer b) + { + try + { + s_address.set(b); + return ctor.get(); + } + finally + { + s_address.remove(); + } + } + + /** + * Returns the constructor for the right subtype of + * {@code CatalogObject} if the classId identifies one + * for which an implementation is available; null otherwise. + */ + static Supplier ctorIfKnown( + int classId, int objId, int objSubId) + { + /* + * Used to read a static field of whatever class we will return + * a constructor for, to ensure its static initializer has already + * run and cannot be triggered by the instance creation, which + * happens within the CacheMap's computeIfAbsent and therefore could + * pose a risk of deadlock if the class must also create instances + * to populate its own statics. + */ + RegClass fieldRead = null; + + try + { + switch ( classId ) + { + case TableSpaceRelationId: + fieldRead = Tablespace.CLASSID; + return TablespaceImpl::new; + case TypeRelationId: + fieldRead = RegType.CLASSID; + return typeConstructorFor(objId); + case ProcedureRelationId: + fieldRead = RegProcedure.CLASSID; + return RegProcedureImpl::new; + case AuthIdRelationId: + fieldRead = RegRole.CLASSID; + return RegRoleImpl::new; + case DatabaseRelationId: + fieldRead = Database.CLASSID; + return DatabaseImpl::new; + case ForeignServerRelationId: + fieldRead = ForeignServer.CLASSID; + return ForeignServerImpl::new; + case ForeignDataWrapperRelationId: + fieldRead = ForeignDataWrapper.CLASSID; + return ForeignDataWrapperImpl::new; + case AccessMethodRelationId: + fieldRead = AccessMethod.CLASSID; + return AccessMethodImpl::new; + case ConstraintRelationId: + fieldRead = Constraint.CLASSID; + return ConstraintImpl::new; + case LanguageRelationId: + fieldRead = ProceduralLanguage.CLASSID; + return ProceduralLanguageImpl::new; + case NamespaceRelationId: + fieldRead = RegNamespace.CLASSID; + return RegNamespaceImpl::new; + case OperatorRelationId: + fieldRead = RegOperator.CLASSID; + return RegOperatorImpl::new; + case TriggerRelationId: + fieldRead = Trigger.CLASSID; + return TriggerImpl::new; + case ExtensionRelationId: + fieldRead = Extension.CLASSID; + return ExtensionImpl::new; + case CollationRelationId: + fieldRead = RegCollation.CLASSID; + return RegCollationImpl::new; + case TransformRelationId: + fieldRead = Transform.CLASSID; + return TransformImpl::new; + case TSDictionaryRelationId: + fieldRead = RegDictionary.CLASSID; + return RegDictionaryImpl::new; + case TSConfigRelationId: + fieldRead = RegConfig.CLASSID; + return RegConfigImpl::new; + case RelationRelationId: + fieldRead = RegClass.CLASSID; + assert 0 == objSubId : + "CatalogObjectImpl.Factory.form attribute"; + if ( null != ctorIfKnown(objId, InvalidOid, 0) ) + return RegClassImpl.Known::new; + return RegClassImpl::new; + default: + return null; + } + } + finally + { + reachabilityFence(fieldRead); // insist the read really happens + } + } + + /** + * Called from native code with a relation oid when one relation's + * metadata has been invalidated, or with {@code InvalidOid} to flush + * all relation metadata. + */ + private static void invalidateRelation(int relOid) + { + assert threadMayEnterPG() : "RegClass invalidate thread"; + + List sps = new ArrayList<>(); + List postOps = new ArrayList<>(); + + if ( InvalidOid != relOid ) + { + RegClassImpl c = (RegClassImpl) + findObjectId(RegClass.CLASSID, relOid); + if ( null != c ) + c.invalidate(sps, postOps); + } + else // invalidate all RegClass instances + { + forEachValue(o -> + { + if ( o instanceof RegClassImpl ) + ((RegClassImpl)o).invalidate(sps, postOps); + }); + } + + if ( sps.isEmpty() ) + return; + + SwitchPoint.invalidateAll(sps.stream().toArray(SwitchPoint[]::new)); + + postOps.forEach(Runnable::run); + } + + /** + * Called from native code with the {@code catcache} hash of the type + * Oid (inconvenient, as that is likely different from the hash Java + * uses), or zero to flush metadata for all cached types. + */ + private static void syscacheInvalidate( + int cbIndex, int cacheId, int oidHash) + { + assert threadMayEnterPG() : "RegType invalidate thread"; + + List sps = new ArrayList<>(); + List postOps = new ArrayList<>(); + + Class targetClass = s_sysInvalClasses[cbIndex]; + + forEachValue(o -> + { + if ( ! targetClass.isInstance(o) ) + return; + if ( 0 == oidHash || oidHash == murmurhash32(o.oid()) ) + ((Addressed)targetClass.cast(o)) + .invalidate(sps, postOps); + }); + + if ( sps.isEmpty() ) + return; + + SwitchPoint.invalidateAll(sps.stream().toArray(SwitchPoint[]::new)); + + postOps.forEach(Runnable::run); + } + + /** + * Returns a constructor for an ordinary {@code NoModifier} + * instance or an {@code Unresolved} instance, as determined by + * the PostgreSQL-version-specific set of PostgreSQL pseudotypes + * that require resolution to actual types used at a given call site. + *

    + * At present, the same {@link Unresolved Unresolved} class is used for + * both families of polymorphic pseudotype as well as the truly + * anything-goes the {@code ANY} type. + */ + static Supplier typeConstructorFor(int oid) + { + switch ( oid ) + { + // Polymorphic family 1 + case ANYARRAYOID: + case ANYELEMENTOID: + case ANYNONARRAYOID: + case ANYENUMOID: + case ANYRANGEOID: + return RegTypeImpl.Unresolved::new; + case ANYMULTIRANGEOID: + if ( PG_VERSION_NUM >= 140000 ) + return RegTypeImpl.Unresolved::new; + else + return RegTypeImpl.NoModifier::new; + + // Polymorphic family 2 + case ANYCOMPATIBLEOID: + case ANYCOMPATIBLEARRAYOID: + case ANYCOMPATIBLENONARRAYOID: + case ANYCOMPATIBLERANGEOID: + if ( PG_VERSION_NUM >= 130000 ) + return RegTypeImpl.Unresolved::new; + else + return RegTypeImpl.NoModifier::new; + case ANYCOMPATIBLEMULTIRANGEOID: + if ( PG_VERSION_NUM >= 140000 ) + return RegTypeImpl.Unresolved::new; + else + return RegTypeImpl.NoModifier::new; + + // The wild-west wildcard "any" type + case ANYOID: + return RegTypeImpl.Unresolved::new; + default: + return RegTypeImpl.NoModifier::new; + } + } + + /* + * Oids of the polymorphic types. If there is ever a call to expose + * them in API like other type constants, these can be moved to + * CatalogObject.Factory with the rest of those, but for now it may be + * enough for the internal RegTypeImpl to know about them. + */ + @Native public static final int ANYOID = 2276; + + // ANYARRAYOID is inherited because API has RegType.ANYARRAY + @Native public static final int ANYELEMENTOID = 2283; + @Native public static final int ANYNONARRAYOID = 2776; + @Native public static final int ANYENUMOID = 3500; + @Native public static final int ANYRANGEOID = 3831; + @Native public static final int ANYMULTIRANGEOID = 4537; + + @Native public static final int ANYCOMPATIBLEMULTIRANGEOID = 4538; + @Native public static final int ANYCOMPATIBLEOID = 5077; + @Native public static final int ANYCOMPATIBLEARRAYOID = 5078; + @Native public static final int ANYCOMPATIBLENONARRAYOID = 5079; + @Native public static final int ANYCOMPATIBLERANGEOID = 5080; + + /* + * A relation ID that won't be used to construct a full-blown catalog + * object, but used in RegClassImpl. + */ + @Native public static final int ForeignTableRelationId = 3118; + + /* + * Indices into arrays used for syscache invalidation callbacks. + * One of these is a boolean native array the C callback can check + * and return quickly if there is nothing from that cache to invalidate. + * At least one more array indexed the same way is in Java and used + * in syscacheInvalidate. + */ + @Native public static final int LANGOID_CB = 0; + @Native public static final int PROCOID_CB = 1; + @Native public static final int TRFOID_CB = 2; + @Native public static final int TYPEOID_CB = 3; + @Native public static final int SYSCACHE_CBS = 4; + + /** + * An array mapping a syscache invalidation callback index to the Java + * class used for instances of the corresponding catalog class. + */ + private static final Class[] s_sysInvalClasses; + + static + { + @SuppressWarnings("unchecked") + Class[] cs = + (Class[])new Class [ SYSCACHE_CBS ]; + cs [ LANGOID_CB ] = ProceduralLanguageImpl.class; + cs [ PROCOID_CB ] = RegProcedureImpl.class; + cs [ TRFOID_CB ] = TransformImpl.class; + cs [ TYPEOID_CB ] = RegTypeImpl.class; + s_sysInvalClasses = cs; + } + + /** + * A {@code ByteBuffer} that windows a C boolean array, one for each + * registered syscache invalidation callback, updated (see the {@link + * CatalogObjectImpl.Addressed#sysCacheInvalArmed sysCacheInvalArmed} + * method below) to reflect whether the Java {@code CacheMap} contains + * any instances of the corresponding class that could need + * invalidation. + */ + private static final ByteBuffer s_sysCacheInvalArmed = + CatalogObjectImpl.Addressed._windowSysCacheInvalArmed() + .order(nativeOrder()); + } + + /* + * Go ahead and reserve fixed slot offsets for the common tuple/name/ + * namespace/owner/acl slots all within Addressed; those that + * correspond to interfaces a given subclass doesn't implement won't + * get used. Being fussier about it here would only complicate the code. + */ + static final int SLOT_TUPLE = 0; + static final int SLOT_NAME = 1; + static final int SLOT_NAMESPACE = 2; + static final int SLOT_OWNER = 3; + static final int SLOT_ACL = 4; + static final int NSLOTS = 5; + + /** + * Base class for every catalog object that has an {@link #oid oid} + * identifying a row in a catalog table identified by + * a {@link #classId classId}. + */ + static class Addressed> + extends CatalogObjectImpl implements CatalogObject.Addressed + { + /** + * Copy this constant here so it can be inherited without ceremony + * by subclasses of Addressed, which may need it when initializing + * attribute projections. Putting the copy up in CatalogObject itself + * is a problem if another compilation unit does import static of both + * CatalogObjectImpl.* and ModelConstants.* but there is little reason + * anyone would import CatalogObjectImpl.Addressed.*. + */ + static final int PG_VERSION_NUM = ModelConstants.PG_VERSION_NUM; + + /** + * Invalidation {@code SwitchPoint} for catalog objects that do not have + * their own selective invalidation callbacks. + *

    + * PostgreSQL only has a limited number of callback slots, so we do not + * consume one for every type of catalog object. Many will simply depend + * on this {@code SwitchPoint}, which will be invalidated at every + * transaction, subtransaction, or command counter change. + *

    + * XXX This is not strictly conservative: those are common points where + * PostgreSQL processes invalidations, but there are others (such as + * lock acquisitions) less easy to predict or intercept. + */ + static final SwitchPoint[] s_globalPoint = { new SwitchPoint() }; + + /** + * Obtains a {@code ByteBuffer} that windows the C boolean array + * indicating which syscache callbacks are currently 'armed'. + */ + private static native ByteBuffer _windowSysCacheInvalArmed(); + + /** + * Initializer for only the {@code SLOT_TUPLE} slot of a + * {@code CatalogObjectImpl.Addressed} or subclass. + *

    + * This initializer uses + * {@link #cacheTuple(CatalogObjectImpl.Addressed) cacheTuple} to + * populate the slot, which is appropriate for the common case where the + * subclass overrides {@link #cacheId() cacheId} to return the + * identifier of a syscache to be searched by a single oid. + */ + static final Function s_initializer; + + /** + * {@link SwitchPointCache SwitchPointCache}-managed slots, the + * foundation for cached values returned by API methods of + * {@code CatalogObjectImpl.Addressed} subclasses. + *

    + * The assignment of this field happens in this class's constructor, but + * when a subclass is being instantiated, the subclass constructor + * supplies the array. The array length is determined by the number of + * slots needed by the subclass, whose own slots begin after the + * {@link #NSLOTS NSLOTS} initial ones reserved above. + *

    + * Each array element is a "slot", and contains a {@link MethodHandle} + * of two arguments and a return type specialized to the type of the + * value to be cached there. API methods for returning cached values + * do so by invoking the method handle with two arguments, the receiver + * object and the handle itself, and returning its result. + *

    + * On the first call, or the first again after invalidation caused by + * DDL changes, the method handle will invoke a "computation method". + * By convention, the computation method has the same name as the API + * method, but is static, taking the object instance as its only + * parameter rather than as an instance method's receiver. Such naming + * is merely a convention; the association between each slot and its + * computation method is determined by a + * {@link SwitchPointCache.Builder#withDependent withDependent} call as + * the initializer for the slots array is being built. In each subclass, + * a static initializer uses {@link SwitchPointCache.Builder} to + * construct an initializer + * ({@code Function}) that will be saved + * in a static, and applied in the instance constructor to + * a freshly-allocated array, installing the initial method handles in + * its slots. + *

    + * On subsequent uses of a slot, until invalidation is triggered, the + * method handle found there will typically disregard its arguments and + * return, as a constant, the value the computation method returned. + *

    + * When a computation method runs, it runs on "the PG thread" (see + * {@link DualState DualState} for more on what "the PG thread" means + * under the different settings of {@code pljava.java_thread_pg_entry}), + * so without further ceremony it may assume it is serialized with + * respect to other computation methods, and perform actions, such as + * JNI calls, for which that thread must be used. + */ + final MethodHandle[] m_slots; + + static + { + s_initializer = + new Builder<>(CatalogObjectImpl.Addressed.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withSlots(o -> o.m_slots) + .withDependent("cacheTuple", SLOT_TUPLE) + .build(); + } + + /** + * Writes the 'armed' status for a specific C syscache invalidation + * callback, specified by the index defined for it in + * {@link CatalogObjectImpl.Factory Factory}. + *

    + * When the status for a given syscache is not armed, its C callback + * returns immediately with no Java upcall. + */ + protected static void sysCacheInvalArmed(int index, boolean inUse) + { + CatalogObjectImpl.Factory.s_sysCacheInvalArmed.put( + index, (byte)(inUse ? 1 : 0)); + } + + /** + * A computation method for retrieving the "cache tuple", suited to the + * common case where a subclass overrides {@link #cacheId cacheId} to + * return the ID of a syscache searchable with a single {@code oid} key. + */ + static TupleTableSlot cacheTuple(CatalogObjectImpl.Addressed o) + { + ByteBuffer heapTuple; + + /* + * The longest we can hold a tuple (non-copied) from syscache is + * for the life of CurrentResourceOwner. We may want to cache the + * thing for longer, if we can snag invalidation messages for it. + * So, call _searchSysCacheCopy, in the JavaMemoryContext, which is + * immortal; we'll arrange below to explicitly free our copy later. + */ + try ( Checked.AutoCloseable ac = + allocatingIn(JavaMemoryContext()) ) + { + heapTuple = _searchSysCacheCopy1(o.cacheId(), o.oid()); + if ( null == heapTuple ) + return null; + } + + /* + * Because our copy is in an immortal memory context, we can + * pass null as the lifespan below. The DualState manager + * created for the TupleTableSlot will therefore not have + * any nativeStateReleased action; on javaStateUnreachable or + * javaStateReleased, it will free the tuple copy. + */ + return heapTupleGetLightSlot(o.cacheDescriptor(), heapTuple, null); + } + + /** + * Find a tuple in the PostgreSQL {@code syscache}, returning a copy + * made in the current memory context. + *

    + * The key(s) in PostgreSQL are really {@code Datum}; perhaps this + * should be refined to rely on {@link Datum.Accessor Datum.Accessor} + * somehow, once that implements store methods. For present purposes, + * we only need to support 32-bit integers, which will be zero-extended + * to {@code Datum} width. + */ + static native ByteBuffer _searchSysCacheCopy1(int cacheId, int key1); + + /** + * Find a tuple in the PostgreSQL {@code syscache}, returning a copy + * made in the current memory context. + *

    + * The key(s) in PostgreSQL are really {@code Datum}; perhaps this + * should be refined to rely on {@link Datum.Accessor Datum.Accessor} + * somehow, once that implements store methods. For present purposes, + * we only need to support 32-bit integers, which will be zero-extended + * to {@code Datum} width. + */ + static native ByteBuffer _searchSysCacheCopy2( + int cacheId, int key1, int key2); + + /** + * Search the table classId for at most one row with the Oid + * objId in column oidCol, using the index + * indexOid if it is not {@code InvalidOid}, returning null + * or a copy of the tuple in the current memory context. + *

    + * The returned tuple should be like one obtained from {@code syscache} + * in having no external TOAST pointers. The tuple descriptor is passed + * so that {@code toast_flatten_tuple} can be called if necessary. + */ + static native ByteBuffer _sysTableGetByOid( + int classId, int objId, int oidCol, int indexOid, long tupleDesc); + + /** + * Calls {@code lookup_rowtype_tupdesc_noerror} in the PostgreSQL + * {@code typcache}, returning a byte buffer over the result, or null + * if there isn't one (such as when called with a type oid that doesn't + * represent a composite type). + *

    + * Beware that "noerror" does not prevent an ugly {@code ereport} if + * the oid doesn't represent an existing type at all. + *

    + * Only to be called by {@code RegTypeImpl}. Declaring it here allows + * that class to be kept pure Java. + *

    + * This is used when we know we will be caching the result, so + * the native code will already have further incremented + * the reference count (for a counted descriptor) and released the pin + * {@code lookup_rowtype_tupdesc} took, thereby waiving leaked-reference + * warnings. We will hold on to the result until an invalidation message + * tells us not to. + *

    + * If the descriptor is not reference-counted, ordinarily it would be of + * dubious longevity, but when obtained from the {@code typcache}, + * such a descriptor is good for the life of the process (clarified + * in upstream commit bbc227e). + */ + static native ByteBuffer _lookupRowtypeTupdesc(int oid, int typmod); + + /** + * Return a byte buffer mapping the tuple descriptor + * for relid, using only the PostgreSQL + * {@code relcache}. + *

    + * Only to be called by {@code RegClassImpl}. Declaring it here allows + * that class to be kept pure Java. + *

    + * Other descriptor lookups on a {@code RegClass} are done by handing + * off to an associated row {@code RegType}, when there is one, which + * will look in + * the {@code typcache}. But finding the associated row {@code RegType} + * isn't something {@code RegClass} can do before it has obtained this + * crucial tuple descriptor for its own structure, and also there are + * relation kinds (index and toast, anyway) which have no type entry. + *

    + * This method shall increment the reference count; the caller will pass + * the byte buffer directly to a {@code TupleDescImpl} constructor, + * which assumes that has already happened. The reference count shall be + * incremented without registering the descriptor for leak warnings. + */ + static native ByteBuffer _tupDescBootstrap(int relid); + + private Addressed() + { + this(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + /** + * Constructor for use by a subclass that supplies a slots array + * (assumed to have length at least NSLOTS). + *

    + * It is the responsibility of the subclass to initialize the slots + * (including the first NSLOTS ones defined here; s_initializer can be + * used for those, if the default global-switchpoint behavior it offers + * is appropriate). + *

    + * Some subclasses may do oddball things, such as RegTypeImpl.Modified + * sharing the slots array of its base NoModifier instance. + *

    + * Any class that will do such a thing must also hold a strong reference + * to whatever instance the slots array 'belongs' to; a reference to + * just the array can't be counted on to keep the other instance live. + */ + Addressed(MethodHandle[] slots) + { + if ( InvalidOid == oid() ) + makeInvalidInstance(slots); + m_slots = slots; + } + + /** + * Called from an invalidation callback in {@code Factory} to set up + * the invalidation of this catalog object's metadata. + *

    + * Adds this object's {@code SwitchPoint} to the caller's list so that, + * if more than one is to be invalidated, that can be done in bulk. Adds + * to postOps any operations the caller should conclude with + * after invalidating the {@code SwitchPoint}. + *

    + * This implementation does nothing (other than to assert false, when + * assertions are enabled). It should be overridden in those subclasses + * that do more fine-grained invalidation than simply relying on + * {@code s_globalPoint}. + */ + void invalidate(List sps, List postOps) + { + assert false : "unhandled invalidation of " + toString(); + } + + /** + * Adjust cache slots when constructing an invalid instance. + *

    + * This implementation stores a permanent null (insensitive to + * invalidation) in {@code SLOT_TUPLE}, which will cause {@code exists} + * to return false and other dependent methods to fail. + *

    + * An instance method because {@code AttributeImpl.Transient} will + * have to override it; those things have the invalid Oid in real life. + */ + void makeInvalidInstance(MethodHandle[] slots) + { + setConstant(slots, SLOT_TUPLE, null); + } + + @Override + public RegClass.Known classId() + { + /* + * By design, this class must be an implementation of + * T (extends CatalogObject.Addressed) for the same T that is + * the parameter of its classId. + */ + @SuppressWarnings("unchecked") + Class thisClass = (Class)getClass(); + + return CatalogObjectImpl.Factory.staticFormClassId( + classOid(), thisClass); + } + + @Override + public boolean exists() + { + return null != cacheTuple(); + } + + /** + * Returns the {@link TupleDescriptor} for the catalog table whose rows + * define instances of this class. + *

    + * This implementation calls {@link #classId classId} and then + * {@link RegClass#tupleDescriptor tupleDescriptor} on that. A subclass + * may override when it can supply the decriptor more efficiently, and + * must override in the few cases ({@link AttributeImpl AttributeImpl}, + * for example) where that isn't the right way to get it. + */ + TupleDescriptor cacheDescriptor() + { + return classId().tupleDescriptor(); + } + + /** + * Returns, from the proper catalog table, the cached tuple that defines + * this instance of this class. + */ + TupleTableSlot cacheTuple() + { + try + { + MethodHandle h = m_slots[SLOT_TUPLE]; + return (TupleTableSlot)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * Returns the ID of a syscache that is searchable with a single oid key + * to retrieve the {@link #cacheTuple cacheTuple} defining this instance + * of this class. + *

    + * This implementation throws {@code UnsupportedOperationException} and + * must be overridden in every subclass, unless a subclass supplies its + * own computation method for {@code cacheTuple} that does not use this. + */ + int cacheId() + { + throw notyet(); + } + + /** + * Default {@code toString} method for {@code Addressed} and subclasses. + *

    + * Extends {@link CatalogObjectImpl#toString CatalogObjectImpl.toString} + * by adding the name (if this is an instance of + * {@link CatalogObject.Named Named}) or qualified name (if an + * instance of {@link CatalogObject.Namespaced Namespaced}), if + * available. + */ + @Override + public String toString() + { + String prefix = super.toString(); + if ( this instanceof CatalogObject.Named ) + { + try + { + CatalogObject.Named named = (CatalogObject.Named)this; + if ( ! exists() ) + return prefix; + if ( this instanceof CatalogObject.Namespaced ) + { + CatalogObject.Namespaced spaced = + (CatalogObject.Namespaced)this; + RegNamespace ns = spaced.namespace(); + if ( ns.exists() ) + return prefix + spaced.qualifiedName(); + return prefix + "(" + ns + ")." + named.name(); + } + return prefix + named.name(); + } + catch ( LinkageError e ) + { + /* + * Do nothing; LinkageError is expected when testing in, + * for example, jshell, and not in a PostgreSQL backend. + */ + } + } + return prefix; + } + + /** + * Utility class to create a {@link Projection Projection} using + * attribute names that may be conditional (on something like + * {@link #PG_VERSION_NUM PG_VERSION_NUM}). + *

    + * {@code alsoIf} adds strings to the list, if the condition is true, or + * the same number of nulls if the condition is false. + *

    + * {@code project} filters the list to only the non-null values, using + * those to form a {@code Projection} and obtain its iterator of + * attributes. + *

    + * This class then implements its own iterator of attributes, iterating + * for the length of the original name list, drawing from the + * Projection's iterator where a non-null name was saved, or producing + * null (and not incrementing the Projection's iterator) where a null + * was saved. + *

    + * The iterator can be used in a sequence of static final initializers, + * such that the final fields will end up containing the wanted + * Attribute instances where applicable, or null where not. + */ + static class AttNames implements Iterator + { + private ArrayList strings = new ArrayList<>(); + + private Iterator myItr; + private Projection it; + private Iterator itsItr; + + AttNames alsoIf(boolean p, String... toAdd) + { + if ( p ) + for ( String s : toAdd ) + strings.add(s); + else + for ( String s : toAdd ) + strings.add(null); + return this; + } + + AttNames project(Projection p) + { + String[] filtered = strings + .stream().filter(Objects::nonNull).toArray(String[]::new); + it = p.project(filtered); + itsItr = it.iterator(); + myItr = strings.iterator(); + return this; + } + + /** + * Returns a further projection of the one derived from the names. + *

    + * Caters to cases (so far only one in RegTypeImpl) where a + * computation method will want a projection of multiple attributes, + * instead of a single attribute. + *

    + * In the expected usage, the attribute arguments will have been + * supplied from the iterator, and will be null where the expected + * attributes do not exist. In that case, null must be returned for + * the projection. + */ + Projection project(Attribute... atts) + { + if ( Arrays.stream(atts).anyMatch(Objects::isNull) ) + return null; + return it.project(atts); + } + + @Override + public boolean hasNext() + { + return myItr.hasNext(); + } + + @Override + public Attribute next() + { + String myNext = myItr.next(); + if ( null == myNext ) + return null; + return itsItr.next(); + } + } + + /** + * Constructs a new {@link AttNames AttNames} instance and begins + * populating it, adding names unconditionally. + */ + static AttNames attNames(String... names) + { + return new AttNames().alsoIf(true, names); + } + } + + /** + * Mixin supplying a {@code shared()} method that returns false without + * having to materialize the {@code classId}. + */ + interface Nonshared> + extends CatalogObject.Addressed + { + @Override + default boolean shared() + { + return false; + } + } + + /** + * Mixin supplying a {@code shared()} method that returns true without + * having to materialize the {@code classId}. + */ + interface Shared> + extends CatalogObject.Addressed + { + @Override + default boolean shared() + { + return true; + } + } + + /* + * Note to self: name() should, of course, fail or return null + * when ! isValid(). That seems generally sensible, but code + * in interface RegRole contains the first conscious reliance on it. + */ + /** + * Mixin that supplies the implementation of + * {@link CatalogObject.Named CatalogObject.Named}. + */ + interface Named> + extends CatalogObject.Named + { + @Override + default T name() + { + try + { + MethodHandle h = + ((CatalogObjectImpl.Addressed)this).m_slots[SLOT_NAME]; + return (T)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + } + + /** + * Mixin that supplies the implementation of + * {@link CatalogObject.Namespaced CatalogObject.Namespaced}. + */ + interface Namespaced> + extends Named, CatalogObject.Namespaced + { + @Override + default RegNamespace namespace() + { + try + { + MethodHandle h = + ((CatalogObjectImpl.Addressed)this).m_slots[SLOT_NAMESPACE]; + return (RegNamespace)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + } + + /** + * Mixin that supplies the implementation of + * {@link CatalogObject.Owned CatalogObject.Owned}. + */ + interface Owned extends CatalogObject.Owned + { + @Override + default RegRole owner() + { + try + { + MethodHandle h = + ((CatalogObjectImpl.Addressed)this).m_slots[SLOT_OWNER]; + return (RegRole)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + } + + /** + * Mixin that supplies the implementation of + * {@link CatalogObject.AccessControlled CatalogObject.AccessControlled}. + */ + interface AccessControlled + extends CatalogObject.AccessControlled + { + @Override + default List grants() + { + try + { + MethodHandle h = + ((CatalogObjectImpl.Addressed)this).m_slots[SLOT_ACL]; + /* + * The value stored in the slot comes from GrantAdapter, which + * returns undifferentiated List, to be confidently + * narrowed here to List. + */ + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + default List grants(RegRole grantee) + { + throw notyet(); + } + } + + /** + * Instances of {@link ArrayAdapter ArrayAdapter} for types used + * in the catalogs. + *

    + * A holder interface so these won't be instantiated unless wanted. + */ + public interface ArrayAdapters + { + ArrayAdapter> REGCLASS_LIST_INSTANCE = + new ArrayAdapter<>(REGCLASS_INSTANCE, + AsFlatList.of(AsFlatList::nullsIncludedCopy)); + + ArrayAdapter> REGTYPE_LIST_INSTANCE = + new ArrayAdapter<>(REGTYPE_INSTANCE, + AsFlatList.of(AsFlatList::nullsIncludedCopy)); + + /** + * List of {@code Identifier.Simple} from an array of {@code TEXT} + * that represents SQL identifiers. + */ + ArrayAdapter> TEXT_NAME_LIST_INSTANCE = + new ArrayAdapter<>(TextAdapter.INSTANCE, + /* + * A custom array contract is an anonymous class, not just a + * lambda, so the compiler will record the actual type arguments + * with which it specializes the generic contract. + */ + new Adapter.Contract.Array<>() + { + @Override + public List construct( + int nDims, int[] dimsAndBounds, As adapter, + TupleTableSlot.Indexed slot) + throws SQLException + { + int n = slot.elements(); + Identifier.Simple[] names = new Identifier.Simple[n]; + for ( int i = 0; i < n; ++ i ) + names[i] = + Identifier.Simple.fromCatalog( + slot.get(i, adapter)); + return List.of(names); + } + }); + + /** + * List of {@code RegProcedure.ArgMode} from an array of {@code "char"}. + */ + ArrayAdapter> ARGMODE_LIST_INSTANCE = + new ArrayAdapter<>(Primitives.INT1_INSTANCE, + new Adapter.Contract.Array<>() + { + @Override + public List construct( + int nDims, int[] dimsAndBounds, AsByte adapter, + TupleTableSlot.Indexed slot) + throws SQLException + { + int n = slot.elements(); + RegProcedure.ArgMode[] modes = + new RegProcedure.ArgMode[n]; + for ( int i = 0; i < n; ++ i ) + { + byte in = slot.get(i, adapter); + switch ( in ) + { + case (byte)'i': + modes[i] = RegProcedure.ArgMode.IN; + break; + case (byte)'o': + modes[i] = RegProcedure.ArgMode.OUT; + break; + case (byte)'b': + modes[i] = RegProcedure.ArgMode.INOUT; + break; + case (byte)'v': + modes[i] = RegProcedure.ArgMode.VARIADIC; + break; + case (byte)'t': + modes[i] = RegProcedure.ArgMode.TABLE; + break; + default: + throw new UnsupportedOperationException( + String.format("Unrecognized " + + "procedure/function argument mode " + + "value %#x", in)); + } + } + return List.of(modes); + } + }); + + /** + * {@code Map} from an array of {@code TEXT} + * that represents 'reloptions' (as used on relations, attributes, and + * foreign wrappers / servers / tables, at least). + *

    + * The {@code String} value is never expected to be null (PostgreSQL's + * {@code transformRelOptions} will have substituted {@code true} where + * an option with no value was parsed), and this adapter will + * assume the first {@code '='} in each element delimits the + * key from the value (that is, that no key can be an SQL delimited + * identifier containing {@code '='}, though PostgreSQL as of 17 does + * not enforce that). + */ + ArrayAdapter> RELOPTIONS_INSTANCE = + new ArrayAdapter<>(TextAdapter.INSTANCE, + new Adapter.Contract.Array<>() + { + @Override + public Map construct( + int nDims, int[] dimsAndBounds, As adapter, + TupleTableSlot.Indexed slot) + throws SQLException + { + int n = slot.elements(); + @SuppressWarnings("unchecked") + Map.Entry[] entries = + new Map.Entry[n]; + for ( int i = 0; i < n; ++ i ) + { + String s = slot.get(i, adapter); + int pos = s.indexOf('='); + try + { + entries[i] = Map.entry( + Identifier.Simple.fromCatalog( + s.substring(0, pos)), + s.substring(1 + pos)); + } + catch ( StringIndexOutOfBoundsException e ) + { + throw new AssertionError( + "transformed reloption with no =", e); + } + } + return Map.ofEntries(entries); + } + }); + } + + private static final StackWalker s_walker = + StackWalker.getInstance(Set.of(), 2); + + static UnsupportedOperationException notyet() + { + String what = s_walker.walk(s -> s + .skip(1) + .map(StackWalker.StackFrame::toStackTraceElement) + .findFirst() + .map(e -> " " + e.getClassName() + "." + e.getMethodName()) + .orElse("") + ); + return new UnsupportedOperationException( + "CatalogObject API" + what); + } + + static UnsupportedOperationException notyet(String what) + { + return new UnsupportedOperationException( + "CatalogObject API " + what); + } + + /** + * The Oid hash function used by the backend's Oid-based catalog caches + * to identify the entries affected by invalidation events. + *

    + * From hashutils.h. + */ + static int murmurhash32(int h) + { + h ^= h >>> 16; + h *= 0x85ebca6b; + h ^= h >>> 13; + h *= 0xc2b2ae35; + h ^= h >>> 16; + return h; + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/CharsetEncodingImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/CharsetEncodingImpl.java new file mode 100644 index 000000000..56549bf7f --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/CharsetEncodingImpl.java @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.nio.BufferOverflowException; +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; +import java.nio.CharBuffer; + +import java.nio.charset.CharacterCodingException; +import java.nio.charset.Charset; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CoderResult; +import static java.nio.charset.StandardCharsets.ISO_8859_1; +import static java.nio.charset.StandardCharsets.US_ASCII; +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.util.regex.Pattern; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.CacheMap; + +import org.postgresql.pljava.model.CharsetEncoding; + +import static org.postgresql.pljava.pg.ModelConstants.NAMEDATALEN; +import static org.postgresql.pljava.pg.ModelConstants.PG_ENCODING_BE_LAST; +import static org.postgresql.pljava.pg.ModelConstants.PG_LATIN1; +import static org.postgresql.pljava.pg.ModelConstants.PG_SQL_ASCII; +import static org.postgresql.pljava.pg.ModelConstants.PG_UTF8; + +/** + * Implementation of the {@link CharsetEncoding CharsetEncoding} interface. + */ +class CharsetEncodingImpl implements CharsetEncoding +{ + private static final CacheMap s_byOrdinal = + CacheMap.newConcurrent( + () -> ByteBuffer.allocate(4).order(nativeOrder())); + + private static final ByteBuffer s_nameWindow = + ByteBuffer.allocateDirect(NAMEDATALEN); + + private static final Pattern s_name_sqlascii = Pattern.compile( + "(?i)(?:X[-_]?+)?+(?:PG)?+SQL[-_]?+ASCII"); + + private static final String s_property = "org.postgresql.server.encoding"; + + /** + * Only called once to initialize the {@code SERVER_ENCODING} static. + *

    + * Doesn't use {@code fromOrdinal}, because that method will check against + * {@code SERVER_ENCODING}. + */ + static CharsetEncoding serverEncoding() + { + String charsetOverride = System.getProperty(s_property); + CharsetEncoding result = doInPG(() -> + { + int ordinal = EarlyNatives._serverEncoding(); + return s_byOrdinal.softlyCache( + b -> b.putInt(ordinal), + b -> new CharsetEncodingImpl(ordinal, charsetOverride) + ); + }); + if ( null != result.charset() ) + { + System.setProperty(s_property, result.charset().name()); + return result; + } + throw new UnsupportedOperationException( + "No Java Charset found for PostgreSQL server encoding " + + "\"" + result.name() + "\" (" + result.ordinal() +"). Consider " + + "adding -D" + s_property + "=... in pljava.vmoptions."); + } + + static CharsetEncoding clientEncoding() + { + return doInPG(() -> fromOrdinal(EarlyNatives._clientEncoding())); + } + + static CharsetEncoding fromOrdinal(int ordinal) + { + if ( SERVER_ENCODING.ordinal() == ordinal ) + return SERVER_ENCODING; + return s_byOrdinal.softlyCache( + b -> b.putInt(ordinal), + b -> doInPG(() -> new CharsetEncodingImpl(ordinal, null)) + ); + } + + static CharsetEncoding fromName(String name) + { + try + { + return doInPG(() -> + { + s_nameWindow.clear(); + /* + * Charset names should all be ASCII, according to IANA, + * which neatly skirts a "how do I find the encoder for + * the name of my encoding?" conundrum. + */ + CharsetEncoder e = US_ASCII.newEncoder(); + CoderResult r = e.encode( + CharBuffer.wrap(name), s_nameWindow, true); + if ( r.isUnderflow() ) + r = e.flush(s_nameWindow); + if ( ! r.isUnderflow() ) + r.throwException(); + /* + * PG will want a NUL-terminated string (and yes, the NAME + * datatype is limited to NAMEDATALEN - 1 encoded octets + * plus the NUL, so if this doesn't fit, overflow exception + * is the right outcome). + */ + s_nameWindow.put((byte)0).flip(); + int o = EarlyNatives._nameToOrdinal(s_nameWindow); + if ( -1 != o ) + return fromOrdinal(o); + if ( s_name_sqlascii.matcher(name).matches() ) + return fromOrdinal(PG_SQL_ASCII); + throw new IllegalArgumentException( + "no such PostgreSQL character encoding: \"" + + name + "\""); + } + ); + } + catch ( BufferOverflowException | CharacterCodingException e ) + { + throw new IllegalArgumentException( + "no such PostgreSQL character encoding: \"" + name + "\"", e); + } + } + + private final int m_ordinal; + private final String m_name; + private final String m_icuName; + private final Charset m_charset; + + private CharsetEncodingImpl(int ordinal, String charsetOverride) + { + assert threadMayEnterPG(); + ByteBuffer b = EarlyNatives._ordinalToName(ordinal); + if ( null == b ) + throw new IllegalArgumentException( + "no such PostgreSQL character encoding: " + ordinal); + + m_ordinal = ordinal; + + try + { + m_name = US_ASCII.newDecoder().decode(b).toString(); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError( + "PG encoding " + ordinal + " has a non-ASCII name"); + } + + String altName = null; + if ( usableOnServer() ) + { + b = EarlyNatives._ordinalToIcuName(ordinal); + if ( null != b ) + { + try + { + altName = US_ASCII.newDecoder().decode(b).toString(); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError( + "PG encoding " + ordinal + " has a non-ASCII ICU name"); + } + } + } + m_icuName = altName; + + Charset c = null; + if ( null == charsetOverride ) + { + switch ( ordinal ) + { + case PG_LATIN1 : c = ISO_8859_1; break; + case PG_UTF8 : c = UTF_8 ; break; + default: + } + } + else + altName = charsetOverride; + + if ( null == c ) + { + try + { + c = Charset.forName(null != altName ? altName : m_name); + } + catch ( IllegalArgumentException e ) + { + } + } + m_charset = c; + } + + @Override + public int ordinal() + { + return m_ordinal; + } + + @Override + public String name() + { + return m_name; + } + + @Override + public String icuName() + { + return m_icuName; + } + + @Override + public boolean usableOnServer() + { + return 0 <= m_ordinal && m_ordinal <= PG_ENCODING_BE_LAST; + } + + @Override + public Charset charset() + { + return m_charset; + } + + @Override + public String toString() + { + return "CharsetEncoding[" + m_ordinal + "]" + m_name; + } + + private static class EarlyNatives + { + private static native int _serverEncoding(); + private static native int _clientEncoding(); + private static native int _nameToOrdinal(ByteBuffer nulTerminated); + private static native ByteBuffer _ordinalToName(int ordinal); + private static native ByteBuffer _ordinalToIcuName(int ordinal); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ConstraintImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/ConstraintImpl.java new file mode 100644 index 000000000..f6c506d73 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ConstraintImpl.java @@ -0,0 +1,760 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.List; + +import java.util.function.Function; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.adt.Array.AsFlatList; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.CONSTROID; // syscache + +import org.postgresql.pljava.pg.adt.ArrayAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.CONSTRAINT_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCLASS_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGOPERATOR_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT1_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT2_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; +import static org.postgresql.pljava.pg.adt.XMLAdapter.SYNTHETIC_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link Constraint Constraint} interface. + */ +class ConstraintImpl extends Addressed +implements Nonshared, Namespaced, Constraint +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return CONSTROID; + } + + /* Implementation of Named, Namespaced */ + + private static Simple name(ConstraintImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.CONNAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(ConstraintImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.CONNAMESPACE, REGNAMESPACE_INSTANCE); + } + + /* Implementation of Constraint */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + ConstraintImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_TYPE; + static final int SLOT_DEFERRABLE; + static final int SLOT_DEFERRED; + static final int SLOT_VALIDATED; + static final int SLOT_RELID; + static final int SLOT_TYPID; + static final int SLOT_INDID; + static final int SLOT_PARENTID; + static final int SLOT_FRELID; + static final int SLOT_FUPDTYPE; + static final int SLOT_FDELTYPE; + static final int SLOT_FMATCHTYPE; + static final int SLOT_ISLOCAL; + static final int SLOT_INHCOUNT; + static final int SLOT_NOINHERIT; + static final int SLOT_PFEQOP; + static final int SLOT_PPEQOP; + static final int SLOT_FFEQOP; + static final int SLOT_EXCLOP; + static final int NSLOTS; + + private static final Adapter.Array> OPRLIST_INSTANCE; + + private static final Adapter.Array I2ARRAY_INSTANCE; + + static + { + OPRLIST_INSTANCE = new ArrayAdapter<>( + REGOPERATOR_INSTANCE, AsFlatList.of(AsFlatList::nullsIncludedCopy)); + + I2ARRAY_INSTANCE = INT2_INSTANCE.a1().build(); + + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(ConstraintImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(ConstraintImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + + .withReceiverType(null) + .withDependent( "type", SLOT_TYPE = i++) + .withDependent( "deferrable", SLOT_DEFERRABLE = i++) + .withDependent( "deferred", SLOT_DEFERRED = i++) + .withDependent( "validated", SLOT_VALIDATED = i++) + .withDependent( "onTable", SLOT_RELID = i++) + .withDependent( "onDomain", SLOT_TYPID = i++) + .withDependent( "index", SLOT_INDID = i++) + .withDependent( "parent", SLOT_PARENTID = i++) + .withDependent("referencedTable", SLOT_FRELID = i++) + .withDependent( "updateAction", SLOT_FUPDTYPE = i++) + .withDependent( "deleteAction", SLOT_FDELTYPE = i++) + .withDependent( "matchType", SLOT_FMATCHTYPE = i++) + .withDependent( "isLocal", SLOT_ISLOCAL = i++) + .withDependent( "inheritCount", SLOT_INHCOUNT = i++) + .withDependent( "noInherit", SLOT_NOINHERIT = i++) + .withDependent( "pfEqOp", SLOT_PFEQOP = i++) + .withDependent( "ppEqOp", SLOT_PPEQOP = i++) + .withDependent( "ffEqOp", SLOT_FFEQOP = i++) + .withDependent( "exclOp", SLOT_EXCLOP = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute CONNAME; + static final Attribute CONNAMESPACE; + static final Attribute CONTYPE; + static final Attribute CONDEFERRABLE; + static final Attribute CONDEFERRED; + static final Attribute CONVALIDATED; + static final Attribute CONRELID; + static final Attribute CONTYPID; + static final Attribute CONINDID; + static final Attribute CONPARENTID; + static final Attribute CONFRELID; + static final Attribute CONFUPDTYPE; + static final Attribute CONFDELTYPE; + static final Attribute CONFMATCHTYPE; + static final Attribute CONISLOCAL; + static final Attribute CONINHCOUNT; + static final Attribute CONNOINHERIT; + static final Attribute CONKEY; + static final Attribute CONFKEY; + static final Attribute CONPFEQOP; + static final Attribute CONPPEQOP; + static final Attribute CONFFEQOP; + static final Attribute CONFDELSETCOLS; + static final Attribute CONEXCLOP; + static final Attribute CONBIN; + + static + { + AttNames itr = attNames( + "conname", + "connamespace", + "contype", + "condeferrable", + "condeferred", + "convalidated", + "conrelid", + "contypid", + "conindid", + "conparentid", + "confrelid", + "confupdtype", + "confdeltype", + "confmatchtype", + "conislocal", + "coninhcount", + "connoinherit", + "conkey", + "confkey", + "conpfeqop", + "conppeqop", + "conffeqop", + "conexclop", + "conbin" + ).alsoIf(PG_VERSION_NUM >= 150000, + "confdelsetcols" + ).project(CLASSID.tupleDescriptor()); + + CONNAME = itr.next(); + CONNAMESPACE = itr.next(); + CONTYPE = itr.next(); + CONDEFERRABLE = itr.next(); + CONDEFERRED = itr.next(); + CONVALIDATED = itr.next(); + CONRELID = itr.next(); + CONTYPID = itr.next(); + CONINDID = itr.next(); + CONPARENTID = itr.next(); + CONFRELID = itr.next(); + CONFUPDTYPE = itr.next(); + CONFDELTYPE = itr.next(); + CONFMATCHTYPE = itr.next(); + CONISLOCAL = itr.next(); + CONINHCOUNT = itr.next(); + CONNOINHERIT = itr.next(); + CONKEY = itr.next(); + CONFKEY = itr.next(); + CONPFEQOP = itr.next(); + CONPPEQOP = itr.next(); + CONFFEQOP = itr.next(); + CONEXCLOP = itr.next(); + CONBIN = itr.next(); + CONFDELSETCOLS = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static Type type(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.CONTYPE, INT1_INSTANCE); + switch ( b ) + { + case (byte)'c': return Type.CHECK; + case (byte)'f': return Type.FOREIGN_KEY; + case (byte)'n': return Type.NOT_NULL; + case (byte)'p': return Type.PRIMARY_KEY; + case (byte)'u': return Type.UNIQUE; + case (byte)'t': return Type.CONSTRAINT_TRIGGER; + case (byte)'x': return Type.EXCLUSION; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized constraint type value %#x", b)); + } + } + + private static boolean deferrable(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONDEFERRABLE, BOOLEAN_INSTANCE); + } + + private static boolean deferred(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONDEFERRED, BOOLEAN_INSTANCE); + } + + private static boolean validated(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONVALIDATED, BOOLEAN_INSTANCE); + } + + private static RegClass onTable(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONRELID, REGCLASS_INSTANCE); + } + + private static RegType onDomain(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONTYPID, REGTYPE_INSTANCE); + } + + private static RegClass index(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONINDID, REGCLASS_INSTANCE); + } + + private static Constraint parent(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONPARENTID, CONSTRAINT_INSTANCE); + } + + private static RegClass referencedTable(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONFRELID, REGCLASS_INSTANCE); + } + + private static ReferentialAction updateAction(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.CONFUPDTYPE, INT1_INSTANCE); + return refActionFromCatalog(b, "upd"); + } + + private static ReferentialAction deleteAction(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.CONFDELTYPE, INT1_INSTANCE); + return refActionFromCatalog(b, "del"); + } + + private static MatchType matchType(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.CONFMATCHTYPE, INT1_INSTANCE); + switch ( b ) + { + case (byte)'f': return MatchType.FULL; + case (byte)'p': return MatchType.PARTIAL; + case (byte)'s': return MatchType.SIMPLE; + case (byte)' ': return null; // not a foreign key constraint + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized constraint match type value %#x", b)); + } + } + + private static boolean isLocal(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONISLOCAL, BOOLEAN_INSTANCE); + } + + private static short inheritCount(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONINHCOUNT, INT2_INSTANCE); + } + + private static boolean noInherit(ConstraintImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONNOINHERIT, BOOLEAN_INSTANCE); + } + + private static List pfEqOp(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONPFEQOP, OPRLIST_INSTANCE); + } + + private static List ppEqOp(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONPPEQOP, OPRLIST_INSTANCE); + } + + private static List ffEqOp(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONFFEQOP, OPRLIST_INSTANCE); + } + + private static List exclOp(ConstraintImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.CONEXCLOP, OPRLIST_INSTANCE); + } + + /* API methods */ + + @Override + public Type type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (Type)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean deferrable() + { + try + { + MethodHandle h = m_slots[SLOT_DEFERRABLE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean deferred() + { + try + { + MethodHandle h = m_slots[SLOT_DEFERRED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean validated() + { + try + { + MethodHandle h = m_slots[SLOT_VALIDATED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegClass onTable() + { + try + { + MethodHandle h = m_slots[SLOT_RELID]; + return (RegClass)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType onDomain() + { + try + { + MethodHandle h = m_slots[SLOT_TYPID]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegClass index() + { + try + { + MethodHandle h = m_slots[SLOT_INDID]; + return (RegClass)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Constraint parent() + { + try + { + MethodHandle h = m_slots[SLOT_PARENTID]; + return (Constraint)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegClass referencedTable() + { + try + { + MethodHandle h = m_slots[SLOT_FRELID]; + return (RegClass)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public ReferentialAction updateAction() + { + try + { + MethodHandle h = m_slots[SLOT_FUPDTYPE]; + return (ReferentialAction)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public ReferentialAction deleteAction() + { + try + { + MethodHandle h = m_slots[SLOT_FDELTYPE]; + return (ReferentialAction)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public MatchType matchType() + { + try + { + MethodHandle h = m_slots[SLOT_FMATCHTYPE]; + return (MatchType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean isLocal() + { + try + { + MethodHandle h = m_slots[SLOT_ISLOCAL]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public short inheritCount() + { + try + { + MethodHandle h = m_slots[SLOT_INHCOUNT]; + return (short)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean noInherit() + { + try + { + MethodHandle h = m_slots[SLOT_NOINHERIT]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Projection key() + { + /* + * The reason for not caching this Projection is it depends on + * another CatalogObject (the RegClass, for its TupleDescriptor) + * that may be invalidated separately. Getting SwitchPointCache + * to guard validity with more than one SwitchPoint could be an + * interesting project, but not for today. + */ + TupleTableSlot s = cacheTuple(); + short[] indices = s.get(Att.CONKEY, I2ARRAY_INSTANCE); + if ( null == indices ) + return null; + return onTable().tupleDescriptor().sqlProject(indices); + } + + @Override + public Projection fkey() + { + /* + * See key() above for notes. + */ + TupleTableSlot s = cacheTuple(); + short[] indices = s.get(Att.CONFKEY, I2ARRAY_INSTANCE); + if ( null == indices ) + return null; + return referencedTable().tupleDescriptor().sqlProject(indices); + } + + @Override + public List pfEqOp() + { + try + { + MethodHandle h = m_slots[SLOT_PFEQOP]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List ppEqOp() + { + try + { + MethodHandle h = m_slots[SLOT_PPEQOP]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List ffEqOp() + { + try + { + MethodHandle h = m_slots[SLOT_FFEQOP]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Projection fdelSetColumns() + { + /* + * See key() above for notes. + */ + if ( null == Att.CONFDELSETCOLS ) // missing in this PG version + return null; + TupleTableSlot s = cacheTuple(); + short[] indices = s.get(Att.CONFDELSETCOLS, I2ARRAY_INSTANCE); + if ( null == indices ) + return null; + return onTable().tupleDescriptor().sqlProject(indices); + } + + @Override + public List exclOp() + { + try + { + MethodHandle h = m_slots[SLOT_EXCLOP]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public SQLXML bin() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + TupleTableSlot s = cacheTuple(); + return s.get(Att.CONBIN, SYNTHETIC_INSTANCE); + } + + private static ReferentialAction refActionFromCatalog(byte b, String event) + { + switch ( b ) + { + case (byte)'a': return ReferentialAction.NONE; + case (byte)'r': return ReferentialAction.RESTRICT; + case (byte)'c': return ReferentialAction.CASCADE; + case (byte)'n': return ReferentialAction.SET_NULL; + case (byte)'d': return ReferentialAction.SET_DEFAULT; + case (byte)' ': return null; // not a foreign key constraint + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized referential integrity %s action value %#x", + event, b)); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/DatabaseImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/DatabaseImpl.java new file mode 100644 index 000000000..2fb3685a6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/DatabaseImpl.java @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; + +import java.util.function.Function; + +import org.postgresql.pljava.Adapter.As; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.DATABASEOID; // syscache + +import org.postgresql.pljava.pg.adt.EncodingAdapter; +import org.postgresql.pljava.pg.adt.GrantAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.NameAdapter.AS_STRING_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.TABLESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT4_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/** + * Implementation of the {@link Database Database} interface. + */ +class DatabaseImpl extends Addressed +implements + Shared, Named, Owned, + AccessControlled, Database +{ + private static final Function s_initializer; + + private static final As COLLCTYPEADAPTER = + PG_VERSION_NUM >= 150000 ? TextAdapter.INSTANCE : AS_STRING_INSTANCE; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return DATABASEOID; + } + + /* Implementation of Named, Owned, AccessControlled */ + + private static Simple name(DatabaseImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.DATNAME, SIMPLE_INSTANCE); + } + + private static RegRole owner(DatabaseImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.DATDBA, REGROLE_INSTANCE); + } + + private static List grants(DatabaseImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.DATACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of Database */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + DatabaseImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_ENCODING; + static final int SLOT_COLLATE; + static final int SLOT_CTYPE; + static final int SLOT_TEMPLATE; + static final int SLOT_ALLOWCONNECTION; + static final int SLOT_CONNECTIONLIMIT; + static final int SLOT_TABLESPACE; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(DatabaseImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(DatabaseImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "encoding", SLOT_ENCODING = i++) + .withDependent( "collate", SLOT_COLLATE = i++) + .withDependent( "ctype", SLOT_CTYPE = i++) + .withDependent( "template", SLOT_TEMPLATE = i++) + .withDependent("allowConnection", SLOT_ALLOWCONNECTION = i++) + .withDependent("connectionLimit", SLOT_CONNECTIONLIMIT = i++) + .withDependent( "tablespace", SLOT_TABLESPACE = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute DATNAME; + static final Attribute DATDBA; + static final Attribute DATACL; + static final Attribute ENCODING; + static final Attribute DATCOLLATE; + static final Attribute DATCTYPE; + static final Attribute DATISTEMPLATE; + static final Attribute DATALLOWCONN; + static final Attribute DATCONNLIMIT; + static final Attribute DATTABLESPACE; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "datname", + "datdba", + "datacl", + "encoding", + "datcollate", + "datctype", + "datistemplate", + "datallowconn", + "datconnlimit", + "dattablespace" + ).iterator(); + + DATNAME = itr.next(); + DATDBA = itr.next(); + DATACL = itr.next(); + ENCODING = itr.next(); + DATCOLLATE = itr.next(); + DATCTYPE = itr.next(); + DATISTEMPLATE = itr.next(); + DATALLOWCONN = itr.next(); + DATCONNLIMIT = itr.next(); + DATTABLESPACE = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static CharsetEncoding encoding(DatabaseImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ENCODING, EncodingAdapter.INSTANCE); + } + + private static String collate(DatabaseImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.DATCOLLATE, COLLCTYPEADAPTER); + } + + private static String ctype(DatabaseImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.DATCTYPE, COLLCTYPEADAPTER); + } + + private static boolean template(DatabaseImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.DATISTEMPLATE, BOOLEAN_INSTANCE); + } + + private static boolean allowConnection(DatabaseImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.DATALLOWCONN, BOOLEAN_INSTANCE); + } + + private static int connectionLimit(DatabaseImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.DATCONNLIMIT, INT4_INSTANCE); + } + + private static Tablespace tablespace(DatabaseImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.DATTABLESPACE, TABLESPACE_INSTANCE); + } + + /* API methods */ + + @Override + public CharsetEncoding encoding() + { + try + { + MethodHandle h = m_slots[SLOT_ENCODING]; + return (CharsetEncoding)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String collate() + { + try + { + MethodHandle h = m_slots[SLOT_COLLATE]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String ctype() + { + try + { + MethodHandle h = m_slots[SLOT_CTYPE]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean template() + { + try + { + MethodHandle h = m_slots[SLOT_TEMPLATE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean allowConnection() + { + try + { + MethodHandle h = m_slots[SLOT_ALLOWCONNECTION]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public int connectionLimit() + { + try + { + MethodHandle h = m_slots[SLOT_CONNECTIONLIMIT]; + return (int)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Tablespace tablespace() + { + try + { + MethodHandle h = m_slots[SLOT_TABLESPACE]; + return (Tablespace)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/DatumImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/DatumImpl.java new file mode 100644 index 000000000..3e4c906b8 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/DatumImpl.java @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.io.Closeable; +import java.io.FilterInputStream; +import java.io.InputStream; +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.adt.spi.Verifier; + +import org.postgresql.pljava.internal.ByteBufferInputStream; +import org.postgresql.pljava.internal.VarlenaWrapper; // javadoc + +import static org.postgresql.pljava.pg.CatalogObjectImpl.notyet; + +/** + * Contains implementation for {@link Datum Datum}. + *

    + * This is also implemented by {@link VarlenaWrapper VarlenaWrapper}, which is + * carried over from PL/Java 1.5.x, where it could originally be constructed + * only from native code. It has been minimally adapted to fit into this new + * scheme, and in future should fit more cleanly. + */ +public interface DatumImpl extends Datum +{ + @Override + default void verify(Verifier.OfBuffer v) throws SQLException + { + throw notyet(); + } + + @Override + default void verify(Verifier.OfStream v) throws SQLException + { + throw notyet(); + } + + /** + * Dissociate the datum from Java and return its address to native code. + */ + long adopt() throws SQLException; + + default String toString(Object o) + { + Class c = (null == o ? this : o).getClass(); + String cn = c.getCanonicalName(); + int pnl = c.getPackageName().length(); + return cn.substring(1 + pnl); + } + + /** + * Implementation of {@link Datum.Input Datum.Input}. + */ + abstract class Input implements Datum.Input, DatumImpl + { + @Override + public String toString() + { + return toString(this); + } + + @Override + public IStream inputStream() throws SQLException + { + return new IStream<>(this); + } + + @Override + public void verify(Verifier.OfStream v) throws SQLException + { + try ( IStream is = inputStream() ) + { + is.verify(v); + } + catch ( IOException e ) + { + throw new SQLException( + "Exception verifying Datum content: " + + e.getMessage(), "XX000", e); + } + } + + /** + * A Datum.Input copied onto the Java heap to depend on no native state, + * so {@code pin} and {@code unpin} are no-ops. + */ + static class JavaCopy extends DatumImpl.Input + { + private ByteBuffer m_buffer; + + public JavaCopy(ByteBuffer b) + { + assert ! b.isDirect() : + "ByteBuffer passed to a JavaCopy Datum constructor is direct"; + m_buffer = b; + } + + @Override + public String toString(Object o) + { + return String.format("%s %s", + super.toString(o), m_buffer); + } + + @Override + public ByteBuffer buffer() throws SQLException + { + ByteBuffer b = m_buffer; + if ( b == null ) + throw new SQLException("Datum used after close", "55000"); + return b; + } + + @Override + public void close() throws IOException + { + m_buffer = null; + } + + @Override + public long adopt() throws SQLException + { + throw new UnsupportedOperationException( + "XXX Datum JavaCopy.adopt"); + } + } + } + + /** + * {@link InputStream InputStream} view of a {@code Datum.Input}. + */ + public static class IStream + extends ByteBufferInputStream implements DatumImpl + { + protected final T m_datum; + + /** + * A duplicate of the {@code Datum.Input}'s byte buffer, + * so its {@code position} and {@code mark} can be updated by the + * {@code InputStream} operations without affecting the original + * (therefore multiple {@code Stream}s may read one {@code Input}). + */ + private final ByteBuffer m_movingBuffer; + + protected IStream(T datum) throws SQLException + { + m_datum = datum; + ByteBuffer b = datum.buffer(); + m_movingBuffer = b.duplicate().order(b.order()); + } + + @Override + public String toString(Object o) + { + return String.format("%s %s", + m_datum.toString(o), m_open ? "open" : "closed"); + } + + @Override + protected void pin() throws IOException + { + if ( ! m_open ) + throw new IOException("Read from closed Datum"); + try + { + m_datum.pin(); + } + catch ( SQLException e ) + { + throw new IOException(e.getMessage(), e); + } + } + + @Override + protected void unpin() + { + m_datum.unpin(); + } + + @Override + protected ByteBuffer buffer() + { + return m_movingBuffer; + } + + @Override + public void close() throws IOException + { + if ( m_datum.pinUnlessReleased() ) + return; + try + { + super.close(); + m_datum.close(); + } + finally + { + unpin(); + } + } + + @Override + public long adopt() throws SQLException + { + m_datum.pin(); + try + { + if ( ! m_open ) + throw new SQLException( + "Cannot adopt Datum.Input after " + + "it is closed", "55000"); + return m_datum.adopt(); + } + finally + { + m_datum.unpin(); + } + } + + /** + * Apply a {@code Verifier} to the input data. + *

    + * This should only be necessary if the input wrapper is being used + * directly as an output item, and needs verification that it + * conforms to the format of the target type. + *

    + * The current position must be at the beginning of the stream. The + * verifier must leave it at the end to confirm the entire stream + * was examined. There should be no need to reset the position here, + * as the only anticipated use is during an {@code adopt}, and the + * native code will only care about the varlena's address. + */ + public void verify(Verifier.OfStream v) throws SQLException + { + /* + * This is only called from some client code's adopt() method, + * calls to which are serialized through Backend.THREADLOCK + * anyway, so holding a pin here for the duration doesn't + * further limit concurrency. Hold m_lock's monitor also to + * block any extraneous reading interleaved with the verifier. + */ + m_datum.pin(); + try + { + ByteBuffer buf = buffer(); + synchronized ( m_lock ) + { + if ( 0 != buf.position() ) + throw new SQLException( + "Input data to be verified " + + " not positioned at start", + "55000"); + InputStream dontCloseMe = new FilterInputStream(this) + { + @Override + public void close() throws IOException { } + }; + v.verify(dontCloseMe); + if ( 0 != buf.remaining() ) + throw new SQLException( + "Verifier finished prematurely"); + } + } + catch ( SQLException | RuntimeException e ) + { + throw e; + } + catch ( Exception e ) + { + throw new SQLException( + "Exception verifying Datum content: " + + e.getMessage(), "XX000", e); + } + finally + { + m_datum.unpin(); + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/DatumUtils.java b/pljava/src/main/java/org/postgresql/pljava/pg/DatumUtils.java new file mode 100644 index 000000000..af1d641fe --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/DatumUtils.java @@ -0,0 +1,1304 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.io.Closeable; +import java.io.IOException; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import static java.nio.ByteOrder.nativeOrder; +import java.nio.BufferUnderflowException; +import java.nio.IntBuffer; +import java.nio.LongBuffer; + +import java.sql.SQLException; + +import java.util.BitSet; +import java.util.List; + +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.MemoryContext; +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.ResourceOwner; +import static org.postgresql.pljava.model.MemoryContext.TopTransactionContext; +import static + org.postgresql.pljava.model.ResourceOwner.TopTransactionResourceOwner; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.DualState; +import org.postgresql.pljava.internal.LifespanImpl.Addressed; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.notyet; +import static org.postgresql.pljava.pg.ModelConstants.*; + +/** + * Implementations of {@link Datum.Accessor} and a collection of related + * static methods. + */ +public /*XXX*/ class DatumUtils +{ + static final boolean BIG_ENDIAN = ByteOrder.BIG_ENDIAN == nativeOrder(); + + public static TupleTableSlot.Indexed indexedTupleSlot( + RegType type, int elements, ByteBuffer nulls, ByteBuffer values) + { + TupleDescriptor td = new TupleDescImpl.OfType(type); + return new TupleTableSlotImpl.Heap.Indexed(td, elements, nulls, values); + } + + public static long addressOf(ByteBuffer bb) + { + if ( bb.isDirect() ) + return _addressOf(bb); + throw new IllegalArgumentException( + "addressOf(non-direct " + bb + ")"); + } + + public static long fetchPointer(ByteBuffer bb, int offset) + { + return Accessor.ByReference.Deformed.s_pointerAccessor + .getLongZeroExtended(bb, offset); + } + + public static void storePointer(ByteBuffer bb, int offset, long value) + { + /* + * Stopgap implementation; use s_pointer_accessor as above once + * accessors have store methods. + */ + if ( 4 == SIZEOF_DATUM ) + bb.putInt(offset, (int)value); + else + bb.putLong(offset, value); + } + + public static ByteBuffer asReadOnlyNativeOrder(ByteBuffer bb) + { + if ( null == bb ) + return bb; + if ( ! bb.isReadOnly() ) + bb = bb.asReadOnlyBuffer(); + return bb.order(nativeOrder()); + } + + static ByteBuffer mapFixedLength(long nativeAddress, int length) + { + if ( 0 == nativeAddress ) + return null; + ByteBuffer bb = _map(nativeAddress, length); + return asReadOnlyNativeOrder(bb); + } + + public static ByteBuffer mapFixedLength( + ByteBuffer bb, int offset, int length) + { + // Java 13: bb.slice(offset, length).order(bb.order()) + ByteBuffer bnew = bb.duplicate(); + bnew.position(offset).limit(offset + length); + return bnew.slice().order(bb.order()); + } + + static ByteBuffer mapCString(long nativeAddress) + { + if ( 0 == nativeAddress ) + return null; + ByteBuffer bb = _mapCString(nativeAddress); + if ( null == bb ) + { + /* + * This may seem an odd exception to throw in this case (the + * native code found no NUL terminator within the maximum size + * allowed for a ByteBuffer), but it is the same exception that + * would be thrown by the mapCString(ByteBuffer,int) flavor if + * it found no NUL within the bounds of its source buffer. + */ + throw new BufferUnderflowException(); + } + return asReadOnlyNativeOrder(bb); + } + + public static ByteBuffer mapCString(ByteBuffer bb, int offset) + { + ByteBuffer bnew = bb.duplicate(); + int i = offset; + while ( 0 != bnew.get(i) ) + ++i; + bnew.position(offset).limit(i); + return bnew.slice().order(bb.order()); + } + + static Datum.Input mapVarlena(long nativeAddress, + ResourceOwner resowner, MemoryContext memcontext) + { + long ro = ((Addressed)resowner).address(); + long mc = ((Addressed)memcontext).address(); + return doInPG(() -> _mapVarlena(null, nativeAddress, ro, mc)); + } + + static Datum.Input mapVarlena(ByteBuffer bb, long offset, + ResourceOwner resowner, MemoryContext memcontext) + { + long ro = ((Addressed)resowner).address(); + long mc = ((Addressed)memcontext).address(); + return doInPG(() -> _mapVarlena(bb, offset, ro, mc)); + } + + /** + * For now, just return the inline size (the size to be skipped if stepping + * over this varlena in a heap tuple). + *

    + * This is a reimplementation of some of the top of {@code postgres.h}, so + * that this common operation can be done without a JNI call to the C code. + */ + public static int inspectVarlena(ByteBuffer bb, int offset) + { + byte b1 = bb.get(offset); + byte shortbit; + int tagsize; + + if ( BIG_ENDIAN ) + { + shortbit = (byte)(b1 & 0x80); + if ( 0 == shortbit ) // it has a four-byte header and we're aligned + { + // here is where to discern if it's inline compressed if we care + return bb.getInt(offset) & 0x3FFFFFFF; + } + if ( shortbit != b1 ) // it is inline and short + return b1 & 0x7F; + } + else // little endian + { + shortbit = (byte)(b1 & 0x01); + if ( 0 == shortbit ) // it has a four-byte header and we're aligned + { + // here is where to discern if it's inline compressed if we care + return bb.getInt(offset) >>> 2; + } + if ( shortbit != b1 ) // it is inline and short + return b1 >>> 1 & 0x7F; + } + + /* + * If we got here, it is a TOAST pointer of some kind. Its identifying + * tag is the next byte, and its total size is VARHDRSZ_EXTERNAL plus + * something that depends on the tag. + */ + switch ( bb.get(offset + 1) ) + { + case VARTAG_INDIRECT: + tagsize = SIZEOF_varatt_indirect; + break; + case VARTAG_EXPANDED_RO: + case VARTAG_EXPANDED_RW: + tagsize = SIZEOF_varatt_expanded; + break; + case VARTAG_ONDISK: + tagsize = SIZEOF_varatt_external; + break; + default: + throw new AssertionError("unrecognized TOAST vartag"); + } + + return VARHDRSZ_EXTERNAL + tagsize; + } + + static Datum.Input asAlwaysCopiedDatum( + ByteBuffer bb, int offset, int length) + { + byte[] bytes = new byte [ length ]; + // Java 13: bb.get(offset, bytes); + ((ByteBuffer)bb.duplicate().position(offset)).get(bytes); + ByteBuffer copy = ByteBuffer.wrap(bytes); + return new DatumImpl.Input.JavaCopy(asReadOnlyNativeOrder(copy)); + } + + /** + * Turns a {@link BitSet} into a direct-allocated {@link ByteBuffer} whose + * address can be passed in C code to PostgreSQL's {@code bms_copy} and used + * as a {@code bitmapset}. + *

    + * While the {@code ByteBuffer} is direct-allocated, it is allocated by + * Java, not by {@code palloc}, and the PostgreSQL code must not be allowed + * to try to grow, shrink, or {@code pfree} it. Hence the {@code bms_copy}. + *

    + * If the result of operations in C will be wanted in Java, the fuss of + * allocating a different direct {@code ByteBuffer} for the result can be + * avoided by carefully letting the C code update this + * {@code bitmapset} in place, so that no resizing or freeing can occur. + * That can be done by OR-ing one extra bit into the Java {@code BitSet} in + * advance, at an index higher than the bits of interest, and having the C + * code manipulate only the lower-indexed bits (such as by using a + * {@code bms_prev_member} loop unrolled with one first call unused). + *

    + * While the {@code ByteBuffer} returned is read-only (as far as Java is + * concerned), if it is updated in place by C code, it can be passed + * afterward to {@link #fromBitmapset fromBitmapset} to recover the result. + */ + static ByteBuffer toBitmapset(BitSet b) + { + if ( BITS_PER_BITMAPWORD == Long.SIZE ) + { + long[] ls = b.toLongArray(); + int size = OFFSET_Bitmapset_words + ls.length * Long.BYTES; + ByteBuffer bb = + ByteBuffer.allocateDirect(size + Long.BYTES - 1) + .alignedSlice(Long.BYTES); + bb.order(nativeOrder()); + if ( PG_VERSION_NUM >= 160000 ) + { + assert SIZEOF_NodeTag == Integer.BYTES : "sizeof NodeTag"; + bb.putInt(T_Bitmapset); + } + assert SIZEOF_INT == Integer.BYTES : "sizeof int"; + bb.putInt(ls.length).position(OFFSET_Bitmapset_words); + LongBuffer dst = bb.asLongBuffer(); + dst.put(ls); + return asReadOnlyNativeOrder(bb.rewind()); + } + else if ( BITS_PER_BITMAPWORD == Integer.SIZE ) + { + byte[] bs = b.toByteArray(); + int widthLessOne = Integer.BYTES - 1; + int size = (bs.length + widthLessOne) & ~widthLessOne; + int words = size / Integer.BYTES; + size += OFFSET_Bitmapset_words; + ByteBuffer bb = + ByteBuffer.allocateDirect(size + Integer.BYTES - 1) + .alignedSlice(Integer.BYTES); + bb.order(nativeOrder()); + if ( PG_VERSION_NUM >= 160000 ) + { + assert SIZEOF_NodeTag == Integer.BYTES : "sizeof NodeTag"; + bb.putInt(T_Bitmapset); + } + assert SIZEOF_INT == Integer.BYTES : "sizeof int"; + bb.putInt(words).position(OFFSET_Bitmapset_words); + IntBuffer dst = bb.asIntBuffer(); + + IntBuffer src = ByteBuffer.wrap(bs) + .order(ByteOrder.LITTLE_ENDIAN).asIntBuffer(); + dst.put(src); + return asReadOnlyNativeOrder(bb.rewind()); + } + else + throw new AssertionError( + "no support for BITS_PER_BITMAPWORD " + BITS_PER_BITMAPWORD); + } + + static BitSet fromBitmapset(ByteBuffer bb) + { + bb.rewind().order(nativeOrder()); + if ( PG_VERSION_NUM >= 160000 ) + { + assert SIZEOF_NodeTag == Integer.BYTES : "sizeof NodeTag"; + if ( T_Bitmapset != bb.getInt() ) + throw new AssertionError("not a bitmapset: " + bb); + } + assert SIZEOF_INT == Integer.BYTES : "sizeof int"; + int words = bb.getInt(); + bb.position(OFFSET_Bitmapset_words); + + if ( BITS_PER_BITMAPWORD == Long.SIZE ) + { + LongBuffer lb = bb.asLongBuffer(); + if ( words > lb.remaining() ) + throw new AssertionError("corrupted bitmapset: " + bb); + if ( words < lb.remaining() ) + lb.limit(words); + return BitSet.valueOf(lb); + } + else if ( BITS_PER_BITMAPWORD == Integer.SIZE ) + { + IntBuffer src = bb.asIntBuffer(); + if ( words > src.remaining() ) + throw new AssertionError("corrupted bitmapset: " + bb); + if ( words < src.remaining() ) + src.limit(words); + ByteBuffer le = + ByteBuffer.allocate(bb.position() + words * Integer.SIZE); + le.order(ByteOrder.LITTLE_ENDIAN); + IntBuffer dst = le.asIntBuffer(); + dst.put(src); + return BitSet.valueOf(le); + } + else + throw new AssertionError( + "no support for BITS_PER_BITMAPWORD " + BITS_PER_BITMAPWORD); + } + + static BitSet fromBitmapset(long nativeAddress) + { + /* + * Null is the PostgreSQL representation of an empty bitmapset. + */ + if ( 0 == nativeAddress ) + return new BitSet(); + + ByteBuffer bb = doInPG(() -> _mapBitmapset(nativeAddress)); + return fromBitmapset(bb.asReadOnlyBuffer()); + } + + private static native long _addressOf(ByteBuffer bb); + + private static native ByteBuffer _map(long nativeAddress, int length); + + private static native ByteBuffer _mapBitmapset(long nativeAddress); + + private static native ByteBuffer _mapCString(long nativeAddress); + + /* + * Uses offset as address directly if bb is null. + */ + private static native Datum.Input _mapVarlena( + ByteBuffer bb, long offset, long resowner, long memcontext); + + /** + * Abstract superclass of datum accessors. + *

    + * Accessors handle fixed-length types no wider than a {@code Datum}; for + * such types, they support access as all suitable Java primitive types as + * well as {@code Datum}. For wider or variable-length types, only + * {@code Datum} access applies. For by-value, only power-of-2 sizes and + * corresponding alignments allowed: + * [source]. + *

    + * The primitive-typed methods all have {@code SignExtended} and + * {@code ZeroExtended} flavors (except for {@code short} and {@code char} + * where the flavor is explicit, and {@code byte} which has no narrower + * type to extend). The {@code get} methods return the specified type, + * which means the choice of flavor will have no detectable effect on the + * return value when the value being read is exactly that width (as always + * in Java, a {@code long}, {@code int}, or {@code byte} will be treated as + * signed); the flavor will make a difference if the method is used to read + * a value that is actually narrower (say, {@code getLongZeroExtended} or + * {@code getLongSignExtended} on an {@code int}-sized field). + * @param prevents inadvertent mixing up of accessors built over + * different native-memory access access schemes, such as + * {@link ByteBuffer ByteBuffer} or, in future, {@code MemorySegment}. + * @param prevents inadvertent mixing up of accessors that differ in + * their expected {@link Datum.Layout Datum.Layout}. + */ + abstract static class Accessor + implements Datum.Accessor + { + static Datum.Accessor forDeformed( + boolean byValue, short length) + { + if ( byValue ) + return ByValue.Deformed.ACCESSORS [ length ]; + if ( 0 <= length ) + { + /* + * specific by-reference accessors are always available for + * lengths up to Long.BYTES, even in 4-byte-datum builds. The + * by-reference value doesn't have to fit in a Datum, and it + * may be useful to access it as a Java primitive. + */ + if ( Long.BYTES >= length ) + return ByReference.Deformed.ACCESSORS [ length ]; + return ByReference.Deformed.ACCESSORS [ + ByReference.FIXED_ACCESSOR_INDEX + ]; + } + if ( -1 == length ) + return ByReference.Deformed.ACCESSORS [ + ByReference.VARLENA_ACCESSOR_INDEX + ]; + if ( -2 == length ) + return ByReference.Deformed.ACCESSORS [ + ByReference.CSTRING_ACCESSOR_INDEX + ]; + throw new IllegalArgumentException( + "invalid attribute length: "+length); + } + + static Datum.Accessor forHeap( + boolean byValue, short length) + { + if ( byValue ) + return ByValue.Heap.ACCESSORS [ length ]; + if ( 0 <= length ) + { + /* + * specific by-reference accessors are always available for + * lengths up to Long.BYTES, even in 4-byte-datum builds. The + * by-reference value doesn't have to fit in a Datum, and it + * may be useful to access it as a Java primitive. + */ + if ( Long.BYTES >= length ) + return ByReference.Heap.ACCESSORS [ length ]; + return ByReference.Heap.ACCESSORS [ + ByReference.FIXED_ACCESSOR_INDEX + ]; + } + if ( -1 == length ) + return ByReference.Heap.ACCESSORS [ + ByReference.VARLENA_ACCESSOR_INDEX + ]; + if ( -2 == length ) + return ByReference.Heap.ACCESSORS [ + ByReference.CSTRING_ACCESSOR_INDEX + ]; + throw new IllegalArgumentException( + "invalid attribute length: "+length); + } + + @Override + public long getLongSignExtended(B buf, int off) + { + return getIntSignExtended(buf, off); + } + + @Override + public long getLongZeroExtended(B buf, int off) + { + return Integer.toUnsignedLong(getIntZeroExtended(buf, off)); + } + + @Override + public double getDouble(B buf, int off) + { + return getFloat(buf, off); + } + + @Override + public int getIntSignExtended(B buf, int off) + { + return getShort(buf, off); + } + + @Override + public int getIntZeroExtended(B buf, int off) + { + return getChar(buf, off); + } + + @Override + public float getFloat(B buf, int off) + { + throw new AccessorWidthException(); + } + + @Override + public short getShort(B buf, int off) + { + return getByte(buf, off); + } + + @Override + public char getChar(B buf, int off) + { + return (char)Byte.toUnsignedInt(getByte(buf, off)); + } + + @Override + public byte getByte(B buf, int off) + { + throw new AccessorWidthException(); + } + + @Override + public boolean getBoolean(B buf, int off) + { + return 0 != getLongZeroExtended(buf, off); + } + + @Override + public Datum.Input getDatum(B buf, int off, Attribute a) + { + throw new AccessorWidthException(); + } + + /** + * Superclass of accessors for by-value types. + */ + static class ByValue + extends Accessor + { + /** + * Superclass of accessors for by-value types in deformed layout. + *

    + * Convention: when invoking a deformed accessor method, the offset + * shall be a multiple of + * {@link ModelConstants#SIZEOF_DATUM SIZEOF_DATUM}. + */ + static class Deformed extends ByValue + { + @SuppressWarnings("unchecked") + static final ByValue[] ACCESSORS = + new ByValue[ 1 + SIZEOF_DATUM ]; + static + { + ByValue none = new ByValue<>(); + ( + (8 == SIZEOF_DATUM) + ? List.>of( + none, + new DV81(), new DV82(), none, new DV84(), + none, none, none, new DV88() + ) + : List.>of( + none, + new DV41(), new DV42(), none, new DV44() + ) + ).toArray(ACCESSORS); + } + } + + /** + * Superclass of accessors for by-value types in heap layout. + *

    + * Convention: when invoking a heap accessor method, the offset + * shall already have been adjusted for alignment (according to + * PostgreSQL's alignment rules, that is, so the right value will be + * accessed). Java's {@code ByteBuffer} API will still check and + * possibly split accesses according to the hardware's rules; + * there's no way to talk it out of that, so there's little to gain + * by being more clever here. + */ + static class Heap extends ByValue + { + @SuppressWarnings("unchecked") + static final ByValue[] ACCESSORS = + new ByValue[ 1 + SIZEOF_DATUM ]; + static + { + ByValue none = new ByValue<>(); + ( + (8 == SIZEOF_DATUM) + ? List.>of( + none, + new HV1(), new HV2(), none, new HV4(), + none, none, none, new HV8() + ) + : List.>of( + none, + new HV1(), new HV2(), none, new HV4() + ) + ).toArray(ACCESSORS); + } + } + } + + /** + * By-value accessor for 8-byte fields in deformed layout with 8-byte + * datums. + */ + static class DV88 extends ByValue.Deformed + { + @Override + public long getLongSignExtended(ByteBuffer bb, int off) + { + return bb.getLong(off); + } + @Override + public long getLongZeroExtended(ByteBuffer bb, int off) + { + return bb.getLong(off); + } + @Override + public double getDouble(ByteBuffer bb, int off) + { + return bb.getDouble(off); + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + return asAlwaysCopiedDatum(bb, off, 8); + } + } + + /** + * By-value accessor for 4-byte fields in deformed layout with 8-byte + * datums. + */ + static class DV84 extends ByValue.Deformed + { + @Override + public int getIntSignExtended(ByteBuffer bb, int off) + { + long r = bb.getLong(off); + return (int)r; + } + @Override + public int getIntZeroExtended(ByteBuffer bb, int off) + { + long r = bb.getLong(off); + return (int)r; + } + @Override + public float getFloat(ByteBuffer bb, int off) + { + return bb.getFloat(off); + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + if ( BIG_ENDIAN ) + off += SIZEOF_DATUM - 4; + return asAlwaysCopiedDatum(bb, off, 4); + } + } + + /** + * By-value accessor for 2-byte fields in deformed layout with 8-byte + * datums. + */ + static class DV82 extends ByValue.Deformed + { + @Override + public short getShort(ByteBuffer bb, int off) + { + long r = bb.getLong(off); + return (short)r; + } + @Override + public char getChar(ByteBuffer bb, int off) + { + long r = bb.getLong(off); + return (char)r; + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + if ( BIG_ENDIAN ) + off += SIZEOF_DATUM - 2; + return asAlwaysCopiedDatum(bb, off, 2); + } + } + + /** + * By-value accessor for 1-byte fields in deformed layout with 8-byte + * datums. + */ + static class DV81 extends ByValue.Deformed + { + @Override + public byte getByte(ByteBuffer bb, int off) + { + long r = bb.getLong(off); + return (byte)r; + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + if ( BIG_ENDIAN ) + off += SIZEOF_DATUM - 1; + return asAlwaysCopiedDatum(bb, off, 1); + } + } + + /** + * By-value accessor for 4-byte fields in deformed layout with 4-byte + * datums. + */ + static class DV44 extends ByValue.Deformed + { + @Override + public int getIntSignExtended(ByteBuffer bb, int off) + { + return bb.getInt(off); + } + @Override + public int getIntZeroExtended(ByteBuffer bb, int off) + { + return bb.getInt(off); + } + @Override + public float getFloat(ByteBuffer bb, int off) + { + return bb.getFloat(off); + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + return asAlwaysCopiedDatum(bb, off, 4); + } + } + + /** + * By-value accessor for 2-byte fields in deformed layout with 4-byte + * datums. + */ + static class DV42 extends ByValue.Deformed + { + @Override + public short getShort(ByteBuffer bb, int off) + { + int r = bb.getInt(off); + return (short)r; + } + @Override + public char getChar(ByteBuffer bb, int off) + { + int r = bb.getInt(off); + return (char)r; + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + if ( BIG_ENDIAN ) + off += SIZEOF_DATUM - 2; + return asAlwaysCopiedDatum(bb, off, 2); + } + } + + /** + * By-value accessor for 1-byte fields in deformed layout with 4-byte + * datums. + */ + static class DV41 extends ByValue.Deformed + { + @Override + public byte getByte(ByteBuffer bb, int off) + { + int r = bb.getInt(off); + return (byte)r; + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + if ( BIG_ENDIAN ) + off += SIZEOF_DATUM - 1; + return asAlwaysCopiedDatum(bb, off, 1); + } + } + + /** + * By-value accessor for 8-byte fields in heap layout. + */ + static class HV8 extends ByValue.Heap + { + @Override + public long getLongSignExtended(ByteBuffer bb, int off) + { + return bb.getLong(off); + } + @Override + public long getLongZeroExtended(ByteBuffer bb, int off) + { + return bb.getLong(off); + } + @Override + public double getDouble(ByteBuffer bb, int off) + { + return bb.getDouble(off); + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + return asAlwaysCopiedDatum(bb, off, 8); + } + } + + /** + * By-value accessor for 4-byte fields in heap layout. + */ + static class HV4 extends ByValue.Heap + { + @Override + public int getIntSignExtended(ByteBuffer bb, int off) + { + return bb.getInt(off); + } + @Override + public int getIntZeroExtended(ByteBuffer bb, int off) + { + return bb.getInt(off); + } + @Override + public float getFloat(ByteBuffer bb, int off) + { + return bb.getFloat(off); + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + return asAlwaysCopiedDatum(bb, off, 4); + } + } + + /** + * By-value accessor for 2-byte fields in heap layout. + */ + static class HV2 extends ByValue.Heap + { + @Override + public short getShort(ByteBuffer bb, int off) + { + return bb.getShort(off); + } + @Override + public char getChar(ByteBuffer bb, int off) + { + return bb.getChar(off); + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + return asAlwaysCopiedDatum(bb, off, 2); + } + } + + /** + * By-value accessor for 1-byte fields in heap layout. + */ + static class HV1 extends ByValue.Heap + { + @Override + public byte getByte(ByteBuffer bb, int off) + { + return bb.get(off); + } + @Override + public Datum.Input getDatum(ByteBuffer bb, int off, Attribute a) + { + return asAlwaysCopiedDatum(bb, off, 1); + } + } + + /** + * Abstract superclass of accessors for by-reference types. + *

    + * There are always length-specific accessors for each length through 8, + * even in 4-byte-datum builds, plus accessors for fixed lengths greater + * than 8, cstrings, and varlenas. + */ + /* + * In the ByReference case, the accessors for Deformed and Heap differ + * only in what the map*Reference() methods do, so the accessors are + * all made inner classes of Impl, and instantiated in the + * constructors of its subclasses Deformed and Heap, so they have access + * to the right copy/map methods by enclosure rather than inheritance. + * The constructor of each (Heap and Deformed) is invoked just once, + * statically, to populate the ACCESSORS arrays. + */ + abstract static class ByReference + extends Accessor + { + static final int FIXED_ACCESSOR_INDEX = 9; + static final int CSTRING_ACCESSOR_INDEX = 10; + static final int VARLENA_ACCESSOR_INDEX = 11; + static final int ACCESSORS_ARRAY_LENGTH = 12; + + /** + * Superclass of accessors for by-reference types in deformed + * layout. + */ + static final class Deformed extends Impl + { + @SuppressWarnings("unchecked") + static final ByReference[] ACCESSORS = + new ByReference[ACCESSORS_ARRAY_LENGTH]; + + static final ByValue s_pointerAccessor; + + static + { + new Deformed(); + s_pointerAccessor = + ByValue.Deformed.ACCESSORS[SIZEOF_DATUM]; + } + + private Deformed() + { + List.>of( + this, + new R1<>(), new R2<>(), new R3<>(), new R4<>(), + new R5<>(), new R6<>(), new R7<>(), new R8<>(), + new Fixed<>(), new CString<>(), new Varlena<>() + ).toArray(ACCESSORS); + } + + @Override + protected ByteBuffer mapFixedLengthReference( + ByteBuffer bb, int off, int len) + { + long p = s_pointerAccessor.getLongZeroExtended(bb, off); + return mapFixedLength(p, len); + } + + @Override + protected ByteBuffer mapCStringReference(ByteBuffer bb, int off) + { + long p = s_pointerAccessor.getLongZeroExtended(bb, off); + return mapCString(p); + } + + @Override + protected Datum.Input mapVarlenaReference(ByteBuffer b, int off, + ResourceOwner ro, MemoryContext mc) + { + long p = s_pointerAccessor.getLongZeroExtended(b, off); + return mapVarlena(p, ro, mc); + } + } + + /** + * Superclass of accessors for by-reference types in heap layout. + *

    + * Convention: when invoking a heap accessor method, the offset + * shall already have been adjusted for alignment (according to + * PostgreSQL's alignment rules, that is, so the right value will be + * accessed). Java's {@code ByteBuffer} API will still check and + * possibly split accesses according to the hardware's rules; + * there's no way to talk it out of that, so there's little to gain + * by being more clever here. + */ + /* + * The ByReference case includes accessors for non-power-of-two + * sizes. To keep things simple here, they just put the widest + * accesses first, which should be as good as it gets in the most + * expected case where the initial offset is aligned, and Java will + * make other cases work too. + */ + static class Heap extends Impl + { + @SuppressWarnings("unchecked") + static final ByReference[] ACCESSORS = + new ByReference[ACCESSORS_ARRAY_LENGTH]; + + static + { + new Heap(); + } + + private Heap() + { + List.>of( + this, + new R1<>(), new R2<>(), new R3<>(), new R4<>(), + new R5<>(), new R6<>(), new R7<>(), new R8<>(), + new Fixed<>(), new CString<>(), new Varlena<>() + ).toArray(ACCESSORS); + } + + @Override + protected ByteBuffer mapFixedLengthReference( + ByteBuffer bb, int off, int len) + { + return mapFixedLength(bb, off, len); + } + + @Override + protected ByteBuffer mapCStringReference(ByteBuffer bb, int off) + { + return mapCString(bb, off); + } + + @Override + protected Datum.Input mapVarlenaReference(ByteBuffer b, int off, + ResourceOwner ro, MemoryContext mc) + { + return mapVarlena(b, off, ro, mc); + } + } + + /** + * Abstract class declaring the by-reference memory-mapping methods + * whose behavior is determined by layout. + */ + abstract static class Impl + extends ByReference + { + abstract ByteBuffer mapFixedLengthReference( + ByteBuffer bb, int off, int len); + + abstract ByteBuffer mapCStringReference(ByteBuffer bb, int off); + + /* + * If the varlena is a TOAST pointer and can be parked until + * needed by pinning a snapshot, ro is the ResourceOwner it will + * be pinned to. If the content gets fetched, uncompressed, or + * copied, it will be into a new memory context with mc as its + * parent. + */ + abstract Datum.Input mapVarlenaReference(ByteBuffer bb, int off, + ResourceOwner ro, MemoryContext mc); + + Datum.Input copyFixedLengthReference( + ByteBuffer b, int off, int len) + { + return + asAlwaysCopiedDatum( + mapFixedLengthReference(b, off, len), 0, len); + } + + class R8 extends ByReference + { + @Override + public long getLongSignExtended(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 8).getLong(); + } + @Override + public long getLongZeroExtended(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 8).getLong(); + } + @Override + public double getDouble(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 8).getDouble(); + } + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + return copyFixedLengthReference(bb, off, 8); + } + } + + class R7 extends ByReference + { + @Override + public long getLongSignExtended(ByteBuffer bb, int off) + { + long r = getLongZeroExtended(bb, off); + return r | (0L - ((r & 0x80_0000_0000_0000L) << 1)); + } + @Override + public long getLongZeroExtended(ByteBuffer bb, int off) + { + ByteBuffer mb = mapFixedLengthReference(bb, off, 7); + long r; + if ( BIG_ENDIAN ) + { + r = Integer.toUnsignedLong(mb.getInt()) << 24; + r |= (long)mb.getChar() << 8; + r |= Byte.toUnsignedLong(mb.get()); + return r; + } + r = Integer.toUnsignedLong(mb.getInt()); + r |= (long)mb.getChar() << 32; + r |= Byte.toUnsignedLong(mb.get()) << 48; + return r; + } + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + return copyFixedLengthReference(bb, off, 7); + } + } + + class R6 extends ByReference + { + @Override + public long getLongSignExtended(ByteBuffer bb, int off) + { + long r = getLongZeroExtended(bb, off); + return r | (0L - ((r & 0x8000_0000_0000L) << 1)); + } + @Override + public long getLongZeroExtended(ByteBuffer bb, int off) + { + ByteBuffer mb = mapFixedLengthReference(bb, off, 6); + long r; + if ( BIG_ENDIAN ) + { + r = Integer.toUnsignedLong(mb.getInt()) << 16; + r |= (long)mb.getChar(); + return r; + } + r = Integer.toUnsignedLong(mb.getInt()); + r |= (long)mb.getChar() << 32; + return r; + } + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + return copyFixedLengthReference(bb, off, 6); + } + } + + class R5 extends ByReference + { + @Override + public long getLongSignExtended(ByteBuffer bb, int off) + { + long r = getLongZeroExtended(bb, off); + return r | (0L - ((r & 0x80_0000_0000L) << 1)); + } + @Override + public long getLongZeroExtended(ByteBuffer bb, int off) + { + ByteBuffer mb = mapFixedLengthReference(bb, off, 5); + long r; + if ( BIG_ENDIAN ) + { + r = Integer.toUnsignedLong(mb.getInt()) << 8; + r |= Byte.toUnsignedLong(mb.get()); + return r; + } + r = Integer.toUnsignedLong(mb.getInt()); + r |= Byte.toUnsignedLong(mb.get()) << 32; + return r; + } + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + return copyFixedLengthReference(bb, off, 5); + } + } + + class R4 extends ByReference + { + @Override + public int getIntSignExtended(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 4).getInt(); + } + @Override + public int getIntZeroExtended(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 4).getInt(); + } + @Override + public float getFloat(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 4).getFloat(); + } + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + return copyFixedLengthReference(bb, off, 4); + } + } + + class R3 extends ByReference + { + @Override + public int getIntSignExtended(ByteBuffer bb, int off) + { + int r = getIntZeroExtended(bb, off); + return r | (0 - ((r & 0x80_0000) << 1)); + } + @Override + public int getIntZeroExtended(ByteBuffer bb, int off) + { + ByteBuffer mb = mapFixedLengthReference(bb, off, 3); + int r; + if ( BIG_ENDIAN ) + { + r = (int)mb.getChar() << 8; + r |= Byte.toUnsignedInt(mb.get()); + return r; + } + r = (int)mb.getChar(); + r |= Byte.toUnsignedInt(mb.get()) << 16; + return r; + } + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + return copyFixedLengthReference(bb, off, 3); + } + } + + class R2 extends ByReference + { + @Override + public short getShort(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 2).getShort(); + } + @Override + public char getChar(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 2).getChar(); + } + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + return copyFixedLengthReference(bb, off, 2); + } + } + + class R1 extends ByReference + { + @Override + public byte getByte(ByteBuffer bb, int off) + { + return mapFixedLengthReference(bb, off, 1).get(); + } + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + return copyFixedLengthReference(bb, off, 1); + } + } + + class Fixed extends ByReference + { + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + int len = a.length(); + if ( len <= NAMEDATALEN ) + return copyFixedLengthReference(bb, off, len); + // XXX even copy bigger ones, for now + return copyFixedLengthReference(bb, off, len); + } + } + + class CString extends ByReference + { + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + ByteBuffer bnew = mapCStringReference(bb, off); + // XXX for now, return a Java copy regardless of size + return asAlwaysCopiedDatum(bnew, 0, bnew.remaining()); + } + } + + class Varlena extends ByReference + { + @Override + public Datum.Input getDatum( + ByteBuffer bb, int off, Attribute a) + { + // XXX no control over resowner and context for now + return mapVarlenaReference(bb, off, + TopTransactionResourceOwner(), + TopTransactionContext()); + } + } + } + } + } + + private static class AccessorWidthException extends RuntimeException + { + AccessorWidthException() + { + super(null, null, false, false); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ExprContextImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/ExprContextImpl.java new file mode 100644 index 000000000..dc3174031 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ExprContextImpl.java @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import static java.util.Arrays.copyOf; +import java.util.BitSet; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; + +import org.postgresql.pljava.internal.DualState; +import static org.postgresql.pljava.internal.DualState.m; +import org.postgresql.pljava.internal.LifespanImpl; + +import org.postgresql.pljava.PLJavaBasedLanguage; // javadoc + +import org.postgresql.pljava.model.MemoryContext; + +/** + * A lazily-created mirror of a PostgreSQL ExprContext, exposing only as much + * as might be useful in the dispatching of set-returning functions. + *

    + * There is no PL/Java API interface that corresponds to this model; it is only + * used in the implementation module. It may be exposed in the API as an object + * that implements {@code Lifespan} but is otherwise unspecified. + *

    + * PostgreSQL is creating, resetting, and deleting ExprContexts all the time, + * and most of them will never be visible in PL/Java; one of these objects only + * gets created when Java code specifically requests a reference to a particular + * context, generally to make it the {@code Lifespan} of some PL/Java object + * that should be invalidated when the context is shut down. + *

    + * Once an instance of this class has been instantiated and before it escapes to + * calling Java code, it is registered for a shutdown callback on the + * underlying PostgreSQL context so it can track its life cycle and invalidate, + * when the time comes, any objects it has been used as the "owner" of. + * All creation, traversal, and mutation has to happen on the PG thread. Once + * published and while valid, an instance can be observed by other threads. + *

    + * Events that can occur in the life of an ExprContext: + *

    + *
    Rescan
    PostgreSQL calls {@code ReScanExprContext} before re-scanning + * its plan node from the start. Its callbacks are invoked and left + * unregistered. + *
    Free
    PostgreSQL calls {@code FreeExprContext} when done with the node, + * whether on a successful path or in error cleanup. All callbacks are left + * unregistered, but will have been invoked only in the successful case. In + * either case, the backing native structure is then freed. + *
    + *

    + * Because {@code FreeExprContext} in error cleanup does not invoke callbacks, + * it is quite possible for the native memory backing an ExprContext to vanish + * without notice, so any values of interest from the structure (chiefly its + * per-tuple and per-query memory contexts) must be eagerly fetched while it is + * known valid. Its callback can become unregistered while thought to be + * registered. And the chief need for the callback (for PL/Java's purposes), + * to ensure that the {@link PLJavaBasedLanguage.SRFNext#close close()} method + * of a set-returning function gets called, goes unmet in error cleanup if + * relying on the ExprContext callback alone. + *

    + * It is also unsafe to call {@code UnregisterExprContextCallback} at an + * arbitrary time. Unregistering an already-unregistered callback is not + * a problem, but unregistering a callback on an ExprContext that has been + * freed without notice is crashworthy. Happily, the only time PL/Java might + * explicitly unregister the callback is after a successful final call of + * {@link PLJavaBasedLanguage.SRFNext#nextResult nextResult}, a moment when + * error cleanup is not in progress and the ExprContext can be safely expected + * to be live. + *

    + * To ensure that {@link PLJavaBasedLanguage.SRFNext#close close()} is + * called even in error cleanup, this object is itself given a state with + * a {@code Lifespan}, namely, that of the per-query memory context, in which + * the ExprContext is normally allocated, and expiration of that + * {@code Lifespan} will also be treated as expiration of this one. + *

    + * The approach is nonconservative: because an ExprContext can be (and normally + * is) freed explicitly, there is no guarantee it is live whenever its + * {@code Lifespan} is unexpired, but it is definitely gone when its + * {@code Lifespan} is. + *

    + * With that addition, it is not strictly necessary to have an action on a + * successful final call of {@code nextResult}; one of the two events already + * handled (the callback firing or the memory context being reset) will + * eventually occur and mop up the state. The final-call action could be added + * as an optimization to release the instance more promptly. + */ +public class ExprContextImpl extends LifespanImpl +{ + /** + * Map from integer key to an instance of this class. + *

    + * A non-concurrent map suffices, as the uses are only on the PG thread + * (in known() within a doInPG(), and in callback() invoked from PG). + */ + private static ExprContextImpl[] s_map = new ExprContextImpl [ 2 ]; + private static final BitSet s_used = new BitSet(s_map.length); + + /** + * Returns a new instance, given the address of the underlying native + * structure and the per-query memory context that bounds its lifespan. + */ + static ExprContextImpl newInstance(long address, MemoryContext querycxt) + { + assert threadMayEnterPG() : "ExprContextImpl.newInstance thread"; + + int key = s_used.nextClearBit(0); + if ( s_map.length <= key ) + s_map = copyOf(s_map, key << 1); + ExprContextImpl inst = new ExprContextImpl(querycxt, key); + s_used.set(key); + s_map [ key ] = inst; + _registerCallback(address, key); + return inst; + } + + /* + * Called either by a native ExprContext callback or by nativeStateReleased + * on demise of the ExprContext's containing memory context. + * + * Those are two times it is surely safe to decache an ExprContext, as no + * callback with that key can be forthcoming. + * + * It would be conceivable as an optimization to arrange to unregister the + * callback and decache when a set-returning function has been successfully + * read to completion, rather than waiting for one of the two events above. + */ + private static void releaseAndDecache(int key) + { + assert threadMayEnterPG() : "ExprContextImpl.releaseAndDecache thread"; + + ExprContextImpl inst = s_map [ key ]; + assert null != inst && s_used.get(key) : + "ExprContextImpl.releaseAndDecache bad key"; + s_map [ key ] = null; + s_used.clear(key); + inst.lifespanRelease(); + } + + private final State m_state; + + private ExprContextImpl(MemoryContext querycxt, int key) + { + m_state = new State(this, querycxt, key); + } + + private static final class State extends DualState + { + private final int m_key; + + private State(ExprContextImpl referent, MemoryContext lifespan, int key) + { + super(referent, lifespan); + m_key = key; + } + + @Override + protected void nativeStateReleased(boolean javaStateLive) + { + if ( ! javaStateLive ) + return; + releaseAndDecache(m_key); + } + } + + private static native void _registerCallback(long address, int key); +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ExtensionImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/ExtensionImpl.java new file mode 100644 index 000000000..1d894432c --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ExtensionImpl.java @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.Checked; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; +import static org.postgresql.pljava.model.MemoryContext.JavaMemoryContext; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.MemoryContextImpl.allocatingIn; +import static org.postgresql.pljava.pg.ModelConstants.Anum_pg_extension_oid; +import static org.postgresql.pljava.pg.ModelConstants.ExtensionOidIndexId; +import static org.postgresql.pljava.pg.TupleTableSlotImpl.heapTupleGetLightSlot; + +import static org.postgresql.pljava.pg.adt.ArrayAdapter + .FLAT_STRING_LIST_INSTANCE; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link Extension Extension} interface. + */ +class ExtensionImpl extends Addressed +implements Nonshared, Named, Owned, Extension +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + private static TupleTableSlot cacheTuple(ExtensionImpl o) + throws SQLException + { + ByteBuffer heapTuple; + TupleDescImpl td = (TupleDescImpl)o.cacheDescriptor(); + + /* + * See this method in CatalogObjectImpl.Addressed for more on the choice + * of memory context and lifespan. + */ + try ( Checked.AutoCloseable ac = + allocatingIn(JavaMemoryContext()) ) + { + heapTuple = _sysTableGetByOid( + o.classId().oid(), o.oid(), Anum_pg_extension_oid, + ExtensionOidIndexId, td.address()); + if ( null == heapTuple ) + return null; + } + return heapTupleGetLightSlot(td, heapTuple, null); + } + + /* Implementation of Named, Owned */ + + private static Simple name(ExtensionImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.EXTNAME, SIMPLE_INSTANCE); + } + + private static RegRole owner(ExtensionImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.EXTOWNER, REGROLE_INSTANCE); + } + + /* Implementation of Extension */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + ExtensionImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_TARGETNAMESPACE; + static final int SLOT_RELOCATABLE; + static final int SLOT_VERSION; + static final int SLOT_CONFIG; + static final int SLOT_CONDITION; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(ExtensionImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(ExtensionImpl.class.getDeclaredMethods()) + + /* + * First declare some slots whose consuming API methods are found + * on inherited interfaces. This requires some adjustment of method + * types so that run-time adaptation isn't needed. + */ + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + + .withReceiverType(CatalogObjectImpl.Owned.class) + .withReturnType(null) // cancel adjustment from above + .withDependent( "owner", SLOT_OWNER) + + /* + * Next come slots where the compute and API methods are here. + */ + .withReceiverType(null) + + .withDependent( "namespace", SLOT_TARGETNAMESPACE = i++) + .withDependent("relocatable", SLOT_RELOCATABLE = i++) + .withDependent( "version", SLOT_VERSION = i++) + .withDependent( "config", SLOT_CONFIG = i++) + .withDependent( "condition", SLOT_CONDITION = i++) + + .build(); + NSLOTS = i; + } + + static class Att + { + static final Attribute EXTNAME; + static final Attribute EXTOWNER; + static final Attribute EXTNAMESPACE; + static final Attribute EXTRELOCATABLE; + static final Attribute EXTVERSION; + static final Attribute EXTCONFIG; + static final Attribute EXTCONDITION; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "extname", + "extowner", + "extnamespace", + "extrelocatable", + "extversion", + "extconfig", + "extcondition" + ).iterator(); + + EXTNAME = itr.next(); + EXTOWNER = itr.next(); + EXTNAMESPACE = itr.next(); + EXTRELOCATABLE = itr.next(); + EXTVERSION = itr.next(); + EXTCONFIG = itr.next(); + EXTCONDITION = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static RegNamespace namespace(ExtensionImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.EXTNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static boolean relocatable(ExtensionImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.EXTRELOCATABLE, BOOLEAN_INSTANCE); + } + + private static String version(ExtensionImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.EXTVERSION, TextAdapter.INSTANCE); + } + + private static List config(ExtensionImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.EXTCONFIG, + ArrayAdapters.REGCLASS_LIST_INSTANCE); + } + + private static List condition(ExtensionImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.EXTCONDITION, + FLAT_STRING_LIST_INSTANCE); + } + + /* API methods */ + + @Override + public RegNamespace namespace() + { + try + { + MethodHandle h = m_slots[SLOT_TARGETNAMESPACE]; + return (RegNamespace)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean relocatable() + { + try + { + MethodHandle h = m_slots[SLOT_RELOCATABLE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String version() + { + try + { + MethodHandle h = m_slots[SLOT_VERSION]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List config() + { + try + { + MethodHandle h = m_slots[SLOT_CONFIG]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List condition() + { + try + { + MethodHandle h = m_slots[SLOT_CONDITION]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ForeignDataWrapperImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/ForeignDataWrapperImpl.java new file mode 100644 index 000000000..06898a516 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ForeignDataWrapperImpl.java @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.FOREIGNDATAWRAPPEROID; + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/** + * Implementation of the {@link ForeignDataWrapper ForeignDataWrapper} + * interface. + */ +class ForeignDataWrapperImpl extends Addressed +implements + Nonshared, Named, Owned, + AccessControlled, ForeignDataWrapper +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return FOREIGNDATAWRAPPEROID; + } + + /* Implementation of Named, Owned, AccessControlled */ + + private static Simple name(ForeignDataWrapperImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.FDWNAME, SIMPLE_INSTANCE); + } + + private static RegRole owner(ForeignDataWrapperImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.FDWOWNER, REGROLE_INSTANCE); + } + + private static List grants(ForeignDataWrapperImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.FDWACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of ForeignDataWrapper */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + ForeignDataWrapperImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_HANDLER; + static final int SLOT_VALIDATOR; + static final int SLOT_OPTIONS; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(ForeignDataWrapperImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(ForeignDataWrapperImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "handler", SLOT_HANDLER = i++) + .withDependent("validator", SLOT_VALIDATOR = i++) + .withDependent( "options", SLOT_OPTIONS = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute FDWNAME; + static final Attribute FDWOWNER; + static final Attribute FDWACL; + static final Attribute FDWHANDLER; + static final Attribute FDWVALIDATOR; + static final Attribute FDWOPTIONS; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "fdwname", + "fdwowner", + "fdwacl", + "fdwhandler", + "fdwvalidator", + "fdwoptions" + ).iterator(); + + FDWNAME = itr.next(); + FDWOWNER = itr.next(); + FDWACL = itr.next(); + FDWHANDLER = itr.next(); + FDWVALIDATOR = itr.next(); + FDWOPTIONS = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static RegProcedure handler(ForeignDataWrapperImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") + RegProcedure p = (RegProcedure) + s.get(Att.FDWHANDLER, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure validator( + ForeignDataWrapperImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") + RegProcedure p = (RegProcedure) + s.get(Att.FDWVALIDATOR, REGPROCEDURE_INSTANCE); + return p; + } + + private static Map options(ForeignDataWrapperImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.FDWOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + + /* API methods */ + + @Override + public RegProcedure handler() + { + try + { + MethodHandle h = m_slots[SLOT_HANDLER]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure validator() + { + try + { + MethodHandle h = m_slots[SLOT_VALIDATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Map options() + { + try + { + MethodHandle h = m_slots[SLOT_OPTIONS]; + return (Map)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ForeignServerImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/ForeignServerImpl.java new file mode 100644 index 000000000..9115a8191 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ForeignServerImpl.java @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.FOREIGNSERVEROID; + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.FDW_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/** + * Implementation of the {@link ForeignServer ForeignServer} interface. + */ +class ForeignServerImpl extends Addressed +implements + Nonshared, Named, Owned, + AccessControlled, ForeignServer +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return FOREIGNSERVEROID; + } + + /* Implementation of Named, Owned, AccessControlled */ + + private static Simple name(ForeignServerImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SRVNAME, SIMPLE_INSTANCE); + } + + private static RegRole owner(ForeignServerImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SRVOWNER, REGROLE_INSTANCE); + } + + private static List grants(ForeignServerImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SRVACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of ForeignServer */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + ForeignServerImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_FDW; + static final int SLOT_TYPE; + static final int SLOT_VERSION; + static final int SLOT_OPTIONS; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(ForeignServerImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(ForeignServerImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "fdw", SLOT_FDW = i++) + .withDependent( "type", SLOT_TYPE = i++) + .withDependent("version", SLOT_VERSION = i++) + .withDependent("options", SLOT_OPTIONS = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute SRVNAME; + static final Attribute SRVOWNER; + static final Attribute SRVACL; + static final Attribute SRVFDW; + static final Attribute SRVTYPE; + static final Attribute SRVVERSION; + static final Attribute SRVOPTIONS; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "srvname", + "srvowner", + "srvacl", + "srvfdw", + "srvtype", + "srvversion", + "srvoptions" + ).iterator(); + + SRVNAME = itr.next(); + SRVOWNER = itr.next(); + SRVACL = itr.next(); + SRVFDW = itr.next(); + SRVTYPE = itr.next(); + SRVVERSION = itr.next(); + SRVOPTIONS = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static ForeignDataWrapper fdw(ForeignServerImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.SRVFDW, FDW_INSTANCE); + } + + private static String type(ForeignServerImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.SRVTYPE, TextAdapter.INSTANCE); + } + + private static String version(ForeignServerImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.SRVVERSION, TextAdapter.INSTANCE); + } + + private static Map options(ForeignServerImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.SRVOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + + /* API methods */ + + @Override + public ForeignDataWrapper fdw() + { + try + { + MethodHandle h = m_slots[SLOT_FDW]; + return (ForeignDataWrapper)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String version() + { + try + { + MethodHandle h = m_slots[SLOT_VERSION]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Map options() + { + try + { + MethodHandle h = m_slots[SLOT_OPTIONS]; + return (Map)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/LookupImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/LookupImpl.java new file mode 100644 index 000000000..d3d64e927 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/LookupImpl.java @@ -0,0 +1,2274 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.annotation.Native; + +import java.lang.reflect.Constructor; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; +import java.nio.IntBuffer; + +import java.nio.charset.CharacterCodingException; + +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; + +import java.util.Arrays; +import java.util.BitSet; +import java.util.List; +import static java.util.Objects.requireNonNull; + +import java.util.regex.Pattern; + +import static java.util.stream.Collectors.counting; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.toList; +import java.util.stream.Stream; + +import org.postgresql.pljava.PLJavaBasedLanguage; +import org.postgresql.pljava.PLJavaBasedLanguage.InlineBlocks; +import org.postgresql.pljava.PLJavaBasedLanguage.ReturningSets; +import org.postgresql.pljava.PLJavaBasedLanguage.Routine; +import org.postgresql.pljava.PLJavaBasedLanguage.Routines; +import org.postgresql.pljava.PLJavaBasedLanguage.SRFFirst; +import org.postgresql.pljava.PLJavaBasedLanguage.SRFNext; +import org.postgresql.pljava.PLJavaBasedLanguage.SRFTemplate; +import org.postgresql.pljava.PLJavaBasedLanguage.Template; +import org.postgresql.pljava.PLJavaBasedLanguage.TriggerFunction; +import org.postgresql.pljava.PLJavaBasedLanguage.Triggers; +import org.postgresql.pljava.PLJavaBasedLanguage.TriggerTemplate; +import org.postgresql.pljava.PLJavaBasedLanguage.UsingTransforms; + +import org.postgresql.pljava.TargetList.Projection; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import static org.postgresql.pljava.internal.Backend.validateBodies; +import org.postgresql.pljava.internal.Checked; +import org.postgresql.pljava.internal.DualState; +import org.postgresql.pljava.internal.Invocation; +import org.postgresql.pljava.internal.UncheckedException; + +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; +import org.postgresql.pljava.model.MemoryContext; +import org.postgresql.pljava.model.ProceduralLanguage; +import static org.postgresql.pljava.model.ProceduralLanguage.C; +import org.postgresql.pljava.model.ProceduralLanguage.PLJavaBased; +import org.postgresql.pljava.model.ProceduralLanguage.Validator; +import org.postgresql.pljava.model.RegCollation; +import org.postgresql.pljava.model.RegProcedure; +import org.postgresql.pljava.model.RegProcedure.Call; +import org.postgresql.pljava.model.RegProcedure.Call.Context; +import org.postgresql.pljava.model.RegProcedure.Call.ResultInfo; +import static org.postgresql.pljava.model.RegProcedure.Kind.PROCEDURE; +import org.postgresql.pljava.model.RegProcedure.Lookup; // for javadoc +import org.postgresql.pljava.model.RegType; +import static org.postgresql.pljava.model.RegType.TRIGGER; +import org.postgresql.pljava.model.Trigger.ForTrigger; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.notyet; +import static org.postgresql.pljava.pg.CatalogObjectImpl.of; +import static org.postgresql.pljava.pg.CatalogObjectImpl.Factory.ANYOID; +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.DatumUtils.fetchPointer; +import static org.postgresql.pljava.pg.DatumUtils.fromBitmapset; +import static org.postgresql.pljava.pg.DatumUtils.mapFixedLength; +import static org.postgresql.pljava.pg.DatumUtils.toBitmapset; +import static org.postgresql.pljava.pg.ModelConstants.ALIGNOF_INT; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_fcinfo_args; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_fcinfo_fncollation; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_fcinfo_isnull; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_fcinfo_nargs; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_fcinfo_fncollation; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_fcinfo_isnull; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_fcinfo_nargs; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_NodeTag; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_Oid; +import static org.postgresql.pljava.pg.ModelConstants.T_Invalid; +import static org.postgresql.pljava.pg.ModelConstants.T_AggState; +import static org.postgresql.pljava.pg.ModelConstants.T_CallContext; +import static org.postgresql.pljava.pg.ModelConstants.T_ErrorSaveContext; +import static org.postgresql.pljava.pg.ModelConstants.T_EventTriggerData; +import static org.postgresql.pljava.pg.ModelConstants.T_ReturnSetInfo; +import static org.postgresql.pljava.pg.ModelConstants.T_TriggerData; +import static org.postgresql.pljava.pg.ModelConstants.T_WindowAggState; +import static org.postgresql.pljava.pg.ModelConstants.T_WindowObjectData; + +import org.postgresql.pljava.pg.ProceduralLanguageImpl.PLJavaMemo; + +import org.postgresql.pljava.pg.TupleDescImpl.OfType; +import static org.postgresql.pljava.pg.TupleDescImpl.synthesizeDescriptor; +import static org.postgresql.pljava.pg.TupleTableSlotImpl.newNullableDatum; + +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; + +import static org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +import static org.postgresql.pljava.sqlj.Loader.getSchemaLoader; + +/* Imports for TriggerData implementation */ + +import org.postgresql.pljava.annotation.Trigger.Called; +import org.postgresql.pljava.annotation.Trigger.Event; +import org.postgresql.pljava.annotation.Trigger.Scope; +import org.postgresql.pljava.model.RegClass; +import org.postgresql.pljava.model.Trigger; + +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_Relation_rd_id; + +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRGD_tg_event; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRGD_tg_relation; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRGD_tg_trigtuple; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRGD_tg_newtuple; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRGD_tg_trigger; +import static + org.postgresql.pljava.pg.ModelConstants.OFFSET_TRGD_tg_updatedcols; + +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_Trigger; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgoid; + +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_RSI_allowedModes; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_RSI_isDone; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_RSI_returnMode; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_INT; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_RSI_isDone; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_RSI_returnMode; + +import static + org.postgresql.pljava.pg.TupleTableSlotImpl.heapTupleGetLightSlotNoFree; + +/** + * The implementation of {@link RegProcedure.Lookup Lookup}, serving as + * the dispatcher for routines, validators, and inline code blocks + * in PL/Java-based languages. + */ +class LookupImpl implements RegProcedure.Lookup +{ + static + { + assert Integer.BYTES == SIZEOF_Oid : "sizeof oid"; + } + + private static final Routine s_placeholderRoutine = fcinfo -> + { + throw new IllegalStateException(String.format( + "reentrant resolution of Routine for %s", + fcinfo.lookup().target())); + }; + + private static final Template s_placeholderTemplate = flinfo -> + { + throw new IllegalStateException(String.format( + "reentrant resolution of Template for %s", + flinfo.target())); + }; + + private static final Routines s_placeholderInstance = new Routines() + { + @Override + public void essentialChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + { + throw squawk(subject.language()); + } + + @Override + public Template prepare(RegProcedure target, PLJavaBased memo) + { + throw squawk(target.language()); + } + + private IllegalStateException squawk(ProceduralLanguage pl) + { + return new IllegalStateException(String.format( + "reentrant construction of implementing class for %s", pl)); + } + }; + + private final RegProcedureImpl m_target; + private final State m_state; + private final boolean m_forValidator; + + /* + * Mutable fields to be mutated only on the PG thread. + * + * m_outputsDescriptor is a 'notional' descriptor that will be supplied to + * the language handler, so the target routine always delivers any result + * by storing something in a TupleTableSlot, and doesn't need to get into + * the PostgreSQL weeds of "a scalar gets returned, but OUT parameters make + * a composite result, unless it's just one OUT parameter and that's treated + * just like a scalar, but a polymorphic type later resolved to a one-column + * composite isn't" and so on. + * + * But here, once the target routine returns, we need to get into the weeds + * to know the right thing to do with the result, so we need m_returnType + * too. + */ + private boolean m_hasExpr; // PROBABLY could be final + private TupleDescriptor m_inputsDescriptor; + private TupleDescriptor m_outputsDescriptor; + private RegType m_returnType; + /* + * The most recent Call instance that referred to this Lookup. Strictly + * considered, one should be treated as referring to valid memory only while + * the invocation using it is on the PG thread's stack (in contrast to a + * Lookup instance, which can be longer-lived). A Call instance can be saved + * here, though (just not read or written through!) after such an invocation + * returns. If another invocation is found to have the same C structs in the + * same places, the ByteBuffers it encapsulates are still usable, and it can + * be treated as alive again for the duration of that call. + * + * Implicit in the way the PostgreSQL call convention works is that whenever + * control arrives here (at dispatchNew or dispatch), there is an fcinfo + * that is live. By the time preDispatch has returned, the Call instance in + * this field (be it newly constructed or just revivified) corresponds to + * that live fcinfo, and may, through the remaining dispatch action here, be + * used as such. + */ + private CallImpl m_savedCall; + + private Routine m_routine; + + private LookupImpl( + MemoryContext cxt, long extra, RegProcedure target, + boolean forValidator) + { + assert threadMayEnterPG() : "LookupImpl. thread"; + + m_target = (RegProcedureImpl)target; + m_state = new State(this, cxt, extra); + m_forValidator = forValidator; + } + + /** + * Invoked from native code when no existing instance was on hand. + *

    + * Creates fresh {@code Lookup} and {@code Call} instances, caches the + * {@code Lookup} reference in our 'extra' struct attached to the C + * {@code FmgrInfo}, calls {@link #preDispatch preDispatch} to ensure the + * fresh instances reflect the passed arguments, and finally proceeds + * through resolution and dispatch of the target routine. + * @param mcxt {@code fn_mcxt} from the underlying C {@code FmgrInfo} + * struct, used to bound the {@code Lifespan} of this instance + * @param extra address of the cache struct that has been installed in + * the C struct's {@code fn_extra} field, and will be used to cache a JNI + * global reference to this instance. Our {@link State State} will take care + * of reclaiming the JNI reference when the memory context is to be deleted. + * @param targetOid oid of the routine to be invoked + * @param forValidator true if control arrived through the validator entry + * point. For that case, targetOid (and therefore this {@code Lookup} + * instance) belongs to a validator function, and the oid of the user + * routine to be validated (call it the 'subject') is passed to it + * as argument 0. + * @param hasExpr whether information on the calling expression is available + * in the C struct. If false, no resolution of polymorphic parameter types + * will be possible. + * @param fcinfo {@code ByteBuffer} windowing the C function call info + * struct + * @param context {@code ByteBuffer} windowing one of the {@code Node} types + * that might appear in C at {@code fcinfo->context}, if present and of a + * node type we recognize, otherwise null. + * @param resultinfo {@code ByteBuffer} windowing a {@code Node} type that + * might appear in C at {@code fcinfo->resultinfo}, if present and of a + * node type we recognize, otherwise null. + */ + private static void dispatchNew( + long mcxt, long extra, int targetOid, + boolean forValidator, boolean hasExpr, + ByteBuffer fcinfo, ByteBuffer context, ByteBuffer resultinfo, + long exprContextAddress, long perQueryCxtAddress) + throws SQLException + { + /* + * This method is only called from native code and is already on the PG + * thread, so a simple assert threadMayEnterPG() would be tempting here. + * However, in java_thread_pg_entry=allow mode, threadMayEnterPG can be + * false here if the JNI upcall was not one of the ...Locked variants. + * That flavor isn't used for this upcall, because the intent is that + * we'll ultimately dispatch to arbitrary user code and not want the + * monitor held for that. Hence this otherwise-redundant doInPG. + */ + Checked.Runnable r = doInPG(() -> + { + MemoryContext cxt = MemoryContextImpl.fromAddress(mcxt); + RegProcedure target = of(RegProcedure.CLASSID, targetOid); + + LookupImpl flinfo = + new LookupImpl(cxt, extra, target, forValidator); + + _cacheReference(flinfo, extra); + + CallImpl cImpl = flinfo.preDispatch( + targetOid, true, hasExpr, fcinfo, context, resultinfo, + exprContextAddress, perQueryCxtAddress); + + Routine routine = flinfo.selectRoutine(); + + return () -> cImpl.dispatch(routine); + }); + + r.run(); + } + + /** + * Invoked from native code when this existing instance has been found + * in {@code fn_extra}. + *

    + * Calls {@link #preDispatch preDispatch} to ensure the {@code Lookup} + * instance (this instance, reused) and the {@code Call} instance + * (conditionally reused) reflect the current passed arguments, and finally + * proceeds through resolution / dispatch of the target routine. + * @param targetOid asserted the same as what's stored in this instance; + * many assumptions here go out the window if that could ever change + * @param newExpr whether the calling-expression information (or its + * presence/absence) appears to have changed since the last dispatch through + * this instance; I suspect that's not possible, but there has been no + * response to the query on -hackers. + * @param hasExpr if newExpr is true, indicates whether + * expression information is now present at all. + * @param fcinfo {@code ByteBuffer} windowing the C function call info + * struct for this call; null if the address and size are unchanged since + * the last call, meaning the existing {@code ByteBuffer} can be reused. + * @param context {@code ByteBuffer} windowing the C {@code context} node + * for this call; null to reuse an existing {@code ByteBuffer}; + * a zero-length {@code ByteBuffer} to indicate that there no longer is + * a node there. + * @param resultinfo {@code ByteBuffer} windowing the C {@code resultinfo} + * node for this call; null to reuse an existing {@code ByteBuffer}; + * a zero-length {@code ByteBuffer} to indicate that there no longer is + * a node there. + */ + private void dispatch( + int targetOid, boolean newExpr, boolean hasExpr, + ByteBuffer fcinfo, ByteBuffer context, ByteBuffer resultinfo, + long exprContextAddress, long perQueryCxtAddress) + throws SQLException + { + /* + * For the same reason as in dispatchNew, this is a doInPG and not just + * an assert, even though the only caller is native code and always + * on the PG thread. + */ + Checked.Runnable r = doInPG(() -> + { + CallImpl cImpl = preDispatch( + targetOid, newExpr, hasExpr, fcinfo, context, resultinfo, + exprContextAddress, perQueryCxtAddress); + + Routine routine = selectRoutine(); + + return () -> cImpl.dispatch(routine); + }); + + r.run(); + } + + /** + * Invoked from native code to handle an inline code block. + *

    + * This method is nearly independent of most of this class, which is + * otherwise geared toward the validation, caching, and execution of + * functions and procedures. Other than resolving the implementing class + * of the inline block's declared language, very little of that is + * necessary here. + */ + private static void dispatchInline( + int langOid, boolean atomic, ByteBuffer source_text) + throws SQLException + { + /* + * For the same reason as in dispatchNew, this is a doInPG and not just + * an assert, even though the only caller is native code and always + * on the PG thread. + */ + Checked.Runnable r = doInPG(() -> + { + int placeholders = 0; + ProceduralLanguageImpl pl_outer = null; + try + { + ProceduralLanguageImpl pl = pl_outer = + (ProceduralLanguageImpl)of( + ProceduralLanguage.CLASSID, langOid); + + if ( ! pl.isPLJavaBased() ) + throw new SQLSyntaxErrorException(String.format( + "%s not recognized as a PL/Java-based language", pl), + "42883"); + + String source_string; + try + { + source_string = + SERVER_ENCODING.decode(source_text).toString(); + } + catch ( CharacterCodingException e ) // shouldn't really happen + { + throw new SQLException("in text of DO block: " + e, e); + } + + /* + * True return from isPLJavaBased => safe to call + * implementingClass. + */ + PLJavaBasedLanguage pbl = pl.implementingClass(); + + if ( pbl instanceof InlineBlocks ) + return () -> + ((InlineBlocks)pbl).execute(source_string, atomic); + + Checked.Function + + ctor = validatorToCtor(pl.validator(), InlineBlocks.class); + + /* + * This is explained where it happens in selectRoutine below. + */ + pl.memoizeImplementingClass(null, s_placeholderInstance); + ++ placeholders; + + return () -> + { + InlineBlocks impl = ctor.apply(pl); + + doInPG(() -> + pl.memoizeImplementingClass(s_placeholderInstance, impl)); + + impl.execute(source_string, atomic); + }; + } + catch ( Throwable t ) + { + if ( 1 == placeholders && null != pl_outer ) + pl_outer.memoizeImplementingClass( + s_placeholderInstance, null); + throw t; + } + }); + + r.run(); + } + + /** + * Logic shared between {@link #dispatchNew dispatchNew} (after freshly + * constructing this instance and a {@code Call} instance) and + * {@link #dispatch dispatch} (having found and reused this instance) to + * update this instance and/or construct a new {@code Call} instance, as + * needed, to reflect the latest values passed from the native code. + *

    + * At return, {@code m_savedCall} is known to be a {@code Call} instance + * reflecting the current values, and can be used for dispatch. + * @return the up-to-date {@code Call} instance, usable for dispatch (also + * saved in m_savedCall) + */ + private CallImpl preDispatch( + int targetOid, boolean newExpr, boolean hasExpr, + ByteBuffer fcinfo, ByteBuffer context, ByteBuffer resultinfo, + long exprContextAddress, long perQueryCxtAddress) + throws SQLException + { + assert threadMayEnterPG() : "LookupImpl.dispatchNew thread"; + assert targetOid == m_target.oid() : "flinfo target oid changed"; + + if ( newExpr ) + { + m_hasExpr = hasExpr; + m_inputsDescriptor = m_outputsDescriptor = null; + m_returnType = null; + m_routine = null; + } + + boolean newCallNeeded = false; + if ( null == m_savedCall ) + newCallNeeded = true; + else + { + if ( null == fcinfo ) + fcinfo = m_savedCall.m_fcinfo; + else + newCallNeeded = true; + + if ( null == context ) + context = m_savedCall.m_context; + else + { + newCallNeeded = true; + if ( 0 == context.capacity() ) + context = null; + } + + if ( null == resultinfo ) + resultinfo = m_savedCall.m_resultinfo; + else + { + newCallNeeded = true; + if ( 0 == resultinfo.capacity() ) + resultinfo = null; + } + + if ( null == resultinfo ) + assert 0L == exprContextAddress && 0L == perQueryCxtAddress; + else + { + if ( exprContextAddress != m_savedCall.m_exprContextAddress ) + newCallNeeded = true; + + if ( perQueryCxtAddress != m_savedCall.m_perQueryCxtAddress ) + newCallNeeded = true; + } + } + + if ( newCallNeeded ) + m_savedCall = + new CallImpl(fcinfo, context, resultinfo, + exprContextAddress, perQueryCxtAddress); + + return m_savedCall; + } + + /** + * Returns the {@link Routine Routine} to be dispatched to for handling + * this call. + *

    + * When a suitable {@code Routine} has already been cached, specialized to + * this call site (if required), it is returned directly. Otherwise, + * depending on what else has been cached to this point, when this + * {@code Lookup} instance does not represent a validator call, a temporary + * {@code Routine} is returned that will have one of these behaviors: + *

      + *
    1. Starting with a {@link Template Template} object already cached on + * the target {@code RegProcedure}, calls its {@code specialize} method to + * obtain a {@code Routine}, caches that on this {@code Lookup} instance, + * and then proceeds to execute it.
    2. + *
    3. Starting with the {@link Routines Routines} instance already cached + * on the target's {@code ProceduralLanguage}, calls its + * {@code essentialChecks} and {@code prepare} methods to obtain + * a {@code Template}, caches that on the {@code RegProcedure} instance, and + * then proceeds with the actions of (1).
    4. + *
    5. Using the class name in {@code target.language().validator().src()}, + * constructs an instance (loading and initializing the class if need be), + * caches that on the {@code ProceduralLanguage} instance, and then proceeds + * with the actions of (2).
    6. + *
    + *

    + * When this {@code Lookup} instance does represent a validator call, + * caches and returns a {@code Routine} that handles that. + */ + private Routine selectRoutine() throws SQLException + { + assert threadMayEnterPG() : "selectRoutine thread"; + + int placeholders = 0; + ProceduralLanguageImpl pl = null; + PLJavaMemo memo = null; + try + { + /* + * If the Routine to execute is already cached, we're done + * immediately. + */ + if ( null != m_routine ) + return m_routine; + + /* + * If this is a validator call, do that. Otherwise, we want a + * Template that can be passed to specializingRoutine (to return a + * Routine that, when executed, will invoke Template.specialize to + * obtain the real Routine, memoize that on this Lookup instance, + * then execute it). + */ + pl = (ProceduralLanguageImpl)m_target.language(); + + if ( m_forValidator ) + { + if ( pl.isPLJavaHandler(null) ) + return m_routine = LookupImpl::routineValidator; + + if ( C == pl ) + return m_routine = LookupImpl::validatorValidator; + + throw new SQLSyntaxErrorException(String.format( + "%s of %s not recognized as PL/Java handler language", + pl, m_target), "42883"); + } + + /* + * Not a validator. So, confirm the PL is something PL/Java-based, + * to justify expecting to find a PLJavaBased memo there. That's + * where we will find the Template if one has already been cached. + */ + if ( ! pl.isPLJavaBased() ) + throw new SQLSyntaxErrorException(String.format( + "%s of %s not recognized as a PL/Java-based language", + pl, m_target), "42883"); + + memo = m_target.m_how; + + /* + * We are getting ready to return a Routine that, when executed, + * will generate the real Routine that should be cached here. Before + * doing so, cache s_placeholderRoutine for now, to make noise in + * case we arrive here reentrantly; that's not expected, but we are + * about to execute user language-handler code whose behavior isn't + * known. The user specialize() code won't be running in doInPG(), + * but the code to cache the result will be, and will make sure this + * placeholder is still here. + */ + m_routine = s_placeholderRoutine; + ++ placeholders; + + if ( null != memo ) + { + Template template = memo.m_routineTemplate; + assert null != template : "PLJavaBased memo with null template"; + return specializingRoutine(template); + } + + /* + * No memo with a Template was already cached. So, we want to + * find the implementing class instance for this PL, to pass to + * preparingRoutine(), to return a Routine that, when executed, will + * invoke that instance's prepare method to obtain a Template, cache + * that in the RegProcedure's memo, and then proceed as for + * specializingRoutine. + */ + PLJavaBasedLanguage pbl = pl.implementingClass(); + + /* + * We are getting ready to return a Routine that, when executed, + * will generate the real Template that should be cached here. + * Before doing so, cache s_placeholderTemplate for now, all + * analogously to the use of s_placeholderRoutine above. + */ + memo = pl.addDependentRoutine(m_target); + memo.m_routineTemplate = s_placeholderTemplate; + ++ placeholders; + + if ( pbl instanceof Routines ) + return preparingRoutine(m_target, memo, (Routines)pbl); + + assert null == pbl + : "PL with wrong type of implementing class cached"; + + /* + * No implementing class instance was already cached for this PL. + * So, we want to resolve the class providing the implementation, + * to pass to instantiatingRoutine, to return a Routine that, when + * executed, will (if necessary, initialize, and) instantiate that + * class, memoize the instance on this PL, and then proceed as for + * preparingRoutine. + */ + Checked.Function + ctor = validatorToCtor(pl.validator(), Routines.class); + + /* + * We are getting ready to return a Routine that, when executed, + * will generate the implementing class instance that should be + * cached on this PL. Before doing so, cache s_placeholderInstance + * for now, all analogously to the use of s_placeholderRoutine + * above. + */ + pl.memoizeImplementingClass(null, s_placeholderInstance); + ++ placeholders; + + return instantiatingRoutine(m_target, memo, ctor, pl); + } + catch ( Throwable t ) + { + switch ( placeholders ) + { + case 3: + pl.memoizeImplementingClass(s_placeholderInstance, null); + /* FALLTHROUGH */ + case 2: + memo.m_routineTemplate = null; + memo.discardIncomplete(); + /* FALLTHROUGH */ + case 1: + m_routine = null; + } + throw t; + } + } + + /** + * Returns a {@code Routine} that first generates, from a {@code Template}, + * a {@code Routine} specialized to the current call site, then caches that + * on the call site and executes it. + */ + private static Routine specializingRoutine(Template template) + { + return fcinfo -> + { + LookupImpl flinfo = (LookupImpl)fcinfo.lookup(); + Routine outer_r = null; + + try + { + outer_r = requireNonNull(template.specialize(flinfo)); + } + finally + { + Routine r = outer_r; + doInPG(() -> + { + assert s_placeholderRoutine == flinfo.m_routine + : "routine updated by reentrant call?"; + flinfo.m_routine = r; + }); + } + + outer_r.call(fcinfo); + }; + } + + /** + * Returns a {@code Routine} that first generates, from a target routine and + * its language's implementing class, a {@code Template}, caches that on the + * target {@code RegProcedure}, and then proceeds as for + * {@code specializingRoutine}. + */ + private static Routine preparingRoutine( + RegProcedureImpl target, PLJavaMemo memo, Routines impl) + throws SQLException + { + return fcinfo -> + { + Template outer_template = null; + + try + { + validate(impl, target, true/*checkBody*/, false/*additional*/); + outer_template = prepare(impl, (RegProcedureImpl)target); + } + finally + { + Template template = outer_template; + doInPG(() -> + { + assert s_placeholderTemplate == memo.m_routineTemplate + : "template updated by reentrant call?"; + memo.m_routineTemplate = template; + if ( null == template ) + memo.discardIncomplete(); + }); + } + + specializingRoutine(outer_template).call(fcinfo); + }; + } + + /** + * Returns a {@code Routine} that first instantiates the implementing class + * for a PL/Java-based language, caches that on the + * {@code ProceduralLanguage} instance, and then proceeds as for + * {@code preparingRoutine}. + */ + private static Routine instantiatingRoutine( + RegProcedureImpl target, PLJavaMemo memo, + Checked.Function + ctor, + ProceduralLanguageImpl pl) + { + return fcinfo -> + { + Routines impl = ctor.apply(pl); + + doInPG(() -> + pl.memoizeImplementingClass(s_placeholderInstance, impl)); + + preparingRoutine(target, memo, impl).call(fcinfo); + }; + } + + /** + * Given a {@link RegProcedure RegProcedure} understood to represent the + * validator of a PL/Java-based language, returns the proper constructor + * for instances of the language's implementing class. + */ + private static + Checked.Function + validatorToCtor(RegProcedure vp, Class wanted) + throws SQLException + { + String spec = vp.src(); + ClassLoader loader = getSchemaLoader(vp.namespace().name()); + Class c; + try + { + c = Class.forName(spec, false, loader); + + if ( ! wanted.isAssignableFrom(c) ) + throw new SQLSyntaxErrorException(String.format( + "%s 'AS' class, %s, does not implement %s", + vp, c.getCanonicalName(), wanted.getCanonicalName()), + "42883"); + + /* + * If the caller has passed only the PLJavaBasedLanguage interface, + * the caller is the language-handler validator, and this is a good + * place to insist that the class implement at least one of the + * specific subinterfaces. + */ + if ( PLJavaBasedLanguage.class == wanted + && ! InlineBlocks.class.isAssignableFrom(c) + && ! Routines.class.isAssignableFrom(c) ) + throw new SQLSyntaxErrorException(String.format( + "%s 'AS' class, %s, does not implement at least one of " + + "%s, %s", + vp, c.getCanonicalName(), + InlineBlocks.class.getCanonicalName(), + Routines.class.getCanonicalName()), + "42883"); + + Constructor ctor = + c.asSubclass(wanted).getConstructor(ProceduralLanguage.class); + + return pl -> + { + try + { + return ctor.newInstance(pl); + } + catch ( ReflectiveOperationException e ) + { + throw new SQLNonTransientException( + "instantiating class " + spec + ": " + e, "46103", e); + } + }; + } + catch ( ReflectiveOperationException e ) + { + throw new SQLNonTransientException( + "resolving class " + spec + ": " + e, "46103", e); + } + } + + /** + * The dispatching validator for a proposed routine in a PL/Java-based + * language. + *

    + * Obtains the subject routine from {@code fcinfo.arguments().get(0)}, + * identifies that routine's declared language, obtains that language's + * implementing class (instantiating it if necessary, with the aid of + * {@link #validatorToCtor validatorToCtor} on that language's declared + * validator and memoizing that instance on the {@code ProceduralLanguage}), + * provisionally records the proposed new routine as a dependent routine on + * the implementing language, and invokes {@link #validate validate} to + * apply the validation logic, which includes applying the language + * implementation's {@code essentialChecks} and {@code additionalChecks} + * on the subject routine. + */ + private static void routineValidator(Call fcinfo) throws SQLException + { + RegProcedure subject = + fcinfo.arguments().get(0, REGPROCEDURE_INSTANCE); + + boolean checkBody = validateBodies.getAsBoolean(); + + Checked.Runnable r = doInPG(() -> + { + int placeholders = 0; + ProceduralLanguageImpl pl_outer = null; + boolean reportable = true; + try + { + ProceduralLanguageImpl pl = pl_outer = + (ProceduralLanguageImpl)subject.language(); + + if ( ! pl.isPLJavaBased() ) + throw new SQLException(String.format( + "the language %s of %s does not appear to be " + + "PL/Java-based", pl, subject)); + + /* + * True return from isPLJavaBased => safe to call + * implementingClass. + */ + + PLJavaBasedLanguage pbl = pl.implementingClass(); + + if ( pbl instanceof Routines ) + { + /* + * This is done here so the references all exist as expected + * while executing the validator, but it won't stick. + * Not only will there be a selective-invalidation message + * undoing it if the validator routines reject the routine + * and cause rollback, the successful creation of the + * routine will also generate such a message, undoing the + * link. We'll just recreate it on first actual use: + */ + pl.addDependentRoutine(subject); + return () -> validate((Routines)pbl, subject, checkBody); + } + else if ( null != pbl ) + throw new SQLSyntaxErrorException(String.format( + "%s of %s does not support functions / procedures", + pl, subject)); + + if ( ! checkBody ) + reportable = false; + + Checked.Function + + ctor = validatorToCtor(pl.validator(), Routines.class); + + pl.memoizeImplementingClass(null, s_placeholderInstance); + ++ placeholders; + + return () -> + { + Routines impl = null; + try + { + impl = ctor.apply(pl); + } + catch ( Throwable t ) + { + if ( checkBody ) + throw t; + } + finally + { + Routines f_impl = impl; + doInPG(() -> + { + pl.memoizeImplementingClass( + s_placeholderInstance, f_impl); + + /* + * See note above where this is also done: + */ + if ( null != f_impl ) + pl.addDependentRoutine(subject); + }); + } + + if ( null != impl ) + validate(impl, subject, checkBody); + }; + } + catch ( Throwable t ) + { + if ( 1 == placeholders && null != pl_outer ) + pl_outer.memoizeImplementingClass( + s_placeholderInstance, null); + if ( reportable ) + throw t; + return () -> {}; + } + }); + + r.run(); + } + + /** + * Validates a proposed routine, given the language implementing instance, + * which has already been looked up and confirmed to implement + * {@code Routines}. + */ + private static void validate( + Routines impl, RegProcedure subject, boolean checkBody) + throws SQLException + { + RegProcedureImpl rpi = (RegProcedureImpl)subject; + validate(impl, rpi, checkBody, true); + + /* + * On the way to invoking this method, nearly all of the linkages + * between the subject RegProcedure and its language and validator have + * been created, except of course for a null in the RegProcedure memo + * where a Template ought to be. + * + * Often, the linkages are torn down by a shared-invalidation event + * should validate() fail, or even if it succeeds and the affected + * pg_proc row is committed. But it can happen, if creation / validation + * and use occur in the same transaction, that the teardown by a SINVAL + * event can't be relied on, and the attempt to use the now-validated + * routine could stumble on a null template reference in the memo. + * + * There are two ways to prevent that. An obvious choice is to simulate + * what a SINVAL event would do and discard the incomplete memo. We'll + * do that here if checkBody was false, as some necessary validation + * could have been skipped. + * + * On the other hand, if checkBody was true, we have here a fully + * validated RegProcedure and the Routines instance needed to prepare + * it, and may as well replace the null with a preparingTemplate which, + * if it is still there (no SINVAL) on the first attempt at use, will + * directly call impl.prepare and replace itself in the memo. + */ + doInPG(() -> + { + PLJavaMemo m = rpi.m_how; + if ( null == m || null != m.m_routineTemplate ) + return; + if ( checkBody ) + m.m_routineTemplate = preparingTemplate(rpi, impl); + else + m.discardIncomplete(); + }); + } + + /** + * Calls the implementation's essential (and additional, if requested) + * checks, following general checks common to all languages. + *

    + * If subject's {@link RegProcedure#returnsSet returnsSet()} + * is true, delegates to {@link #validateSRF validateSRF}. Otherwise: + *

    + * If subject's return type is {@link RegType#TRIGGER TRIGGER}, + * this method will also check that impl implements + * {@link Triggers Triggers} and that subject declares no + * parameters, throwing appropriate + * exceptions if those conditions do not hold, and will then call + * impl's {@code essentialTriggerChecks} (and, if requested, + * {@code additionalTriggerChecks}) methods. + *

    + * Otherwise, calls impl's {@code essentialChecks} (and, if + * requested, {@code additionalChecks}) methods. + */ + private static void validate( + Routines impl, RegProcedureImpl subject, + boolean checkBody, boolean additionalChecks) + throws SQLException + { + checkForTransforms(subject, impl, additionalChecks); + + PLJavaMemo memo = subject.m_how; + + if ( subject.returnsSet() ) + { + validateSRF(impl, subject, memo, checkBody, additionalChecks); + return; + } + + if ( TRIGGER == subject.returnType() ) + { + if ( ! (impl instanceof Triggers) ) + throw new SQLSyntaxErrorException(String.format( + "%s of %s does not support triggers", + subject.language(), subject), "42P13"); + + if ( 0 < subject.argTypes().size() + || null != subject.allArgTypes() ) + throw new SQLSyntaxErrorException(String.format( + "%s declares arguments, but a trigger function may not", + subject), "42P13"); + + Triggers timpl = (Triggers)impl; + + @SuppressWarnings("unchecked") + RegProcedure tsubj = (RegProcedure)subject; + + timpl.essentialTriggerChecks(tsubj, memo, checkBody); + + if ( ! additionalChecks ) + return; + + timpl.additionalTriggerChecks(tsubj, memo, checkBody); + return; + } + + impl.essentialChecks(subject, memo, checkBody); + + if ( ! additionalChecks ) + return; + + impl.additionalChecks(subject, memo, checkBody); + } + + /** + * Calls the implementation's essential (and additional, if requested) + * set-returning function checks, following general checks common to + * all languages. + *

    + * If impl does not implement + * {@link ReturningSets ReturningSets}, throws an immediate exception + * reporting that the language does not support returning sets. + *

    + * Ultimately, calls impl's {@code essentialSRFChecks} (and, if + * requested, {@code additionalSRFChecks}) methods. + */ + private static void validateSRF( + Routines impl, RegProcedureImpl subject, PLJavaMemo memo, + boolean checkBody, boolean additionalChecks) + throws SQLException + { + if ( ! (impl instanceof ReturningSets) ) + throw new SQLSyntaxErrorException(String.format( + "%s of %s does not support RETURNS SETOF / RETURNS TABLE", + subject.language(), subject), "42P13"); + + if ( TRIGGER == subject.returnType() ) + throw new SQLSyntaxErrorException(String.format( + "%s returns SETOF, but a trigger function may not", + subject), "42P13"); + + ReturningSets rsImpl = (ReturningSets)impl; + rsImpl.essentialSRFChecks(subject, memo, checkBody); + + if ( ! additionalChecks ) + return; + + rsImpl.additionalSRFChecks(subject, memo, checkBody); + } + + /** + * Calls the appropriate method on impl to prepare and return + * a {@code Template}. + *

    + * If target's return type is not + * {@link RegType#TRIGGER TRIGGER}, simply calls {@code impl.prepare}. + *

    + * Otherwise, validation has already established that impl + * implements {@link Triggers Triggers} and target has proper + * form. This method calls {@code prepareTrigger} and wraps the returned + * {@code TriggerTemplate} in a {@code Template} whose {@code specialize} + * method will pass the appropriate {@link Trigger Trigger} instance to + * the {@code TriggerTemplate}'s {@code specialize} method, and wrap the + * returned {@code TriggerFunction} in a {@code Routine} that will pass + * a caller's {@link Context.TriggerData TriggerData} to the + * {@code TriggerFunction}'s {@code apply} method. + */ + private static Template prepare(Routines impl, RegProcedureImpl target) + throws SQLException + { + if ( target.returnsSet() ) + return prepareSRF(impl, target); + + if ( TRIGGER != target.returnType() ) + return requireNonNull(impl.prepare(target, target.m_how)); + + /* + * It's a trigger (and validated before we got here). + */ + + @SuppressWarnings("unchecked") + RegProcedure ttgt = (RegProcedure)target; + + TriggerTemplate ttpl = + requireNonNull(((Triggers)impl).prepareTrigger(ttgt, target.m_how)); + + return flinfo -> + { + TriggerFunction tf; + + // block, so these variable names don't collide with lambda below + { + Context c = ((LookupImpl)flinfo).m_savedCall.context(); + + if ( ! ( c instanceof Context.TriggerData ) ) + throw new SQLNonTransientException(String.format( + "%s was not called by trigger manager", target), + "39P01"); + + CallImpl.TriggerDataImpl tdi = (CallImpl.TriggerDataImpl)c; + TriggerImpl trgi = (TriggerImpl)tdi.trigger(); + + tf = trgi.withTriggerData(tdi, () -> ttpl.specialize(trgi)); + } + + TriggerFunction final_tf = requireNonNull(tf); + + return fcinfo -> + { + CallImpl.TriggerDataImpl tdi = + (CallImpl.TriggerDataImpl)fcinfo.context(); + TriggerImpl trgi = (TriggerImpl)tdi.trigger(); + TupleTableSlot tts = + trgi.withTriggerData(tdi, () -> final_tf.apply(tdi)); + // XXX when result-returning implemented, do the right stuff + }; + }; + } + + private static Template prepareSRF( + Routines impl, RegProcedureImpl target) throws SQLException + { + ReturningSets rsImpl = (ReturningSets)impl; + SRFTemplate rsTpl = + requireNonNull(rsImpl.prepareSRF(target, target.m_how)); + + return flinfo -> + { + ResultInfo ri = ((LookupImpl)flinfo).m_savedCall.resultInfo(); + + if ( ! ( ri instanceof ResultInfo.ReturnSetInfo ) ) + throw new SQLFeatureNotSupportedException(String.format( + "set-valued %s " + + "called in context that cannot accept a set", target), + "0A000"); + + CallImpl.ReturnSetInfoImpl rsii = + (CallImpl.ReturnSetInfoImpl)ri; + + List> callerSupported = + rsii.allowedModes(); + + Class negotiated; + + try + { + int index = rsTpl.negotiate(callerSupported); + negotiated = callerSupported.get(index); + negotiated.cast(rsTpl); // for check + } + catch ( IndexOutOfBoundsException | ClassCastException e ) + { + List> plSupported = + Stream.iterate( + (new Class[] { rsTpl.getClass() }), + (a -> 0 < a.length), + a -> + ( + Arrays.stream(a) + .flatMap(c -> + Stream.concat( + ( + c.isInterface() + ? Stream.of() + : Stream.of(c.getSuperclass()) + ), + Arrays.stream(c.getInterfaces()) + ) + ) + .filter(SRFTemplate.class::isAssignableFrom) + .toArray(Class[]::new) + ) + ) + .flatMap(Arrays::stream) + .filter(c -> SRFTemplate.class == c.getDeclaringClass()) + .distinct() + .map(c -> c.asSubclass(SRFTemplate.class)) + .collect(toList()); + + String plString = plSupported.stream() + .map(Class::getSimpleName) + .collect(joining(",","{","}")); + + String callerString = callerSupported.stream() + .map(Class::getSimpleName) + .collect(joining(",","{","}")); + + boolean noneCommon = plSupported + .stream().noneMatch(callerSupported::contains); + + if ( noneCommon ) + throw new SQLFeatureNotSupportedException(String.format( + "%s of %s can return set result using %s, none " + + "matching caller-expected %s", + target.language(), target, plString, callerString), + "0A000"); + else + throw new SQLException(String.format( + "%s of %s can return set result using %s, caller " + + "accepts %s, but language handler did not " + + "negotiate a match", + target.language(), target, plString, callerString), + "39P02"); + } + + + if ( SRFTemplate.Materialize.class == negotiated ) + return fcinfo -> + { + throw notyet("set-return by Materialize"); + }; + else if ( SRFTemplate.ValuePerCall.class == negotiated ) + { + LookupImpl flinfo_i = (LookupImpl)flinfo; + SRFTemplate.ValuePerCall vpcTpl = + (SRFTemplate.ValuePerCall)rsTpl; + SRFFirst srfFirst = vpcTpl.specializeValuePerCall(flinfo); + + return new SRFRoutine(flinfo_i, srfFirst); + } + else + throw notyet("set-return by " + negotiated.getSimpleName()); + }; + } + + /** + * Returns a placeholder {@code Template} whose {@code specialize} method + * will first generate and memoize the real {@code Template} and then call + * its {@code specialize} method and return the result. + */ + private static Template preparingTemplate( + RegProcedureImpl subject, Routines impl) + { + return flinfo -> + { + assert flinfo.target() == subject: "preparingTemplate wrong target"; + + /* + * A call of prepare is normally preceded by a call of + * essentialChecks in case the routine was not fully validated. + * This Template, however, is only present after successful full + * validation, so a repeated call of essentialChecks isn't needed. + */ + Template t = prepare(impl, subject); + + doInPG(() -> + { + PLJavaMemo m = subject.m_how; + if ( null == m ) + return; + m.m_routineTemplate = t; + }); + + return t.specialize(flinfo); + }; + } + + /** + * Pattern to recognize a Java type name, possibly qualified, + * without array brackets. + */ + private static final Pattern javaTypeName = Pattern.compile(String.format( + "(?:%1$s\\.)*+%1$s", + String.format("\\p{%1$sStart}\\p{%1sPart}*+", "javaJavaIdentifier")) + ); + + /** + * The validator of a PL/Java-based language implementation's validator. + *

    + * The subject {@code RegProcedure} (argument 0) needs to be a function + * with one parameter, typed oid, and we'll say void return, for + * consistency. Its language (of implementation; the language it validates + * may not be declared yet) must return true for {@code isPLJavaHandler}. + *

    + * Its {@code src} needs to be a class name, and the class needs to + * implement {@code PLJavaBasedLanguage.Routines}. + */ + private static void validatorValidator(Call fcinfo) throws SQLException + { + RegProcedure subject = + fcinfo.arguments().get(0, REGPROCEDURE_INSTANCE); + + boolean checkBody = validateBodies.getAsBoolean(); + + switch ( subject.kind() ) + { + case FUNCTION: break; + default: + throw new SQLException(String.format( + "%s must have kind FUNCTION; has %s", subject, subject.kind())); + } + + List argtypes = subject.argTypes(); + if ( 1 != argtypes.size() || RegType.OID != argtypes.get(0) ) + throw new SQLException(String.format( + "%s must have one parameter of type pg_catalog.oid; has %s", + subject, argtypes)); + + if ( RegType.VOID != subject.returnType() ) + throw new SQLException(String.format( + "%s must have return type pg_catalog.void; has %s", + subject, subject.returnType())); + + /* + * This check probably cannot fail, given that PostgreSQL dispatched the + * validation here. But in the interest of thoroughness and sanity .... + */ + if ( ! doInPG(() -> ((ProceduralLanguageImpl)subject.language()) + .isPLJavaHandler(null)) ) + throw new SQLException(String.format( + "the language %s of %s does not appear " + + "to be PL/Java's dispatcher", + subject.language(), subject)); + + String src = subject.src(); + + if ( null == src || ! javaTypeName.matcher(src).matches() ) + throw new SQLException(String.format( + "%s AS must be a Java class name; found: %s", + subject, src)); + + if ( ! checkBody ) + return; + + @SuppressWarnings("unchecked") + RegProcedure rpv = (RegProcedure)subject; + + /* + * This will verify that the class can be found, implements the right + * interface, and has the expected public constructor, throwing + * appropriate exceptions if not. + */ + validatorToCtor(rpv, PLJavaBasedLanguage.class); + } + + @Override + public RegProcedure target() + { + return m_target; + } + + @Override + public TupleDescriptor inputsDescriptor() throws SQLException + { + return doInPG(() -> + { + if ( null != m_inputsDescriptor ) + return m_inputsDescriptor; + + TupleDescriptor tpl = m_target.inputsTemplate(); + BitSet unres = m_target.unresolvedInputs(); + if ( unres.isEmpty() ) // nothing to resolve, use template as-is + return m_inputsDescriptor = tpl; + + int tplSize = tpl.size(); + int argSize = m_savedCall.nargs(); + ByteBuffer bb = // assert Integer.BYTES == SIZEOF_Oid + ByteBuffer.allocateDirect( + argSize * Integer.BYTES + ALIGNOF_INT - 1) + .alignedSlice(ALIGNOF_INT).order(nativeOrder()); + tpl.stream() + .map(Attribute::type) + .mapToInt(RegType::oid) + .forEachOrdered(bb::putInt); + /* + * The buffer bb is argSize Oids long, with maybe fewer + * (just tplSize) written. See inputsAreSpread(). + * + * Even though the C code won't be modifying this Bitmapset, + * OR in a guard bit anyway because, if empty, we would be + * violating PG's assumption that the only empty Bitmapset + * is a null one. + */ + int guardPos = unres.length(); + unres.set(guardPos); + ByteBuffer unres_b = toBitmapset(unres); + if ( ! _resolveArgTypes( + m_savedCall.m_fcinfo, bb, unres_b, tplSize, argSize) ) + throw new SQLException( + "failure resolving polymorphic argument types"); + unres.clear(guardPos); + + Identifier.Simple[] names = new Identifier.Simple [argSize]; + RegType[] types = new RegType[argSize]; + + IntBuffer ib = bb.rewind().asIntBuffer(); + + for ( int i = 0 ; i < argSize ; ++ i ) + { + if ( i >= tplSize ) + names[i] = Identifier.None.INSTANCE; + else + { + names[i] = tpl.get(i).name(); + if ( ! unres.get(i) ) + { + types[i] = tpl.get(i).type(); + continue; + } + } + types[i] = of(RegType.CLASSID, ib.get(i)); + } + + return m_inputsDescriptor = + synthesizeDescriptor( + List.of(types), List.of(names), null); + }); + } + + @Override + public TupleDescriptor outputsDescriptor() throws SQLException + { + return doInPG(() -> + { + if ( null != m_outputsDescriptor ) + return m_outputsDescriptor; + + BitSet unres = m_target.unresolvedOutputs(); + if ( null != unres && unres.isEmpty() ) + { + m_returnType = m_target.returnType(); + return m_outputsDescriptor = m_target.outputsTemplate(); + } + + /* + * Having a template TupleDescriptor already cached with the + * RegProcedure itself, and a BitSet identifying exactly + * which of its types call for resolution, evokes pleasing + * visions of somehow using the PostgreSQL API routines to + * resolve only the types needing it, and reusing the rest. + * But funcapi.h just really doesn't offer anything easy to + * use that way. It is ultimately simplest to just let + * get_call_result_type re-do the whole job from scratch + * and use that descriptor in place of the cached one. + * Still, we're able to skip doing any of that when the + * cached one requires no resolution, so there's that. + */ + int[] retOid = new int[1]; + TupleDescriptor td = + _notionalCallResultType(m_savedCall.m_fcinfo, retOid); + m_returnType = of(RegType.CLASSID, retOid[0]); + + /* + * The native method will have returned null for these cases: + * - Type resolved to VOID. No problem. Return null here. + * - Resolved type is not composite. Make an OfType descriptor here. + * - Type resolved to RECORD, no columns at call site. Bad. + */ + if ( null == td && RegType.VOID != m_returnType ) + { + if ( RegType.RECORD != m_returnType ) + td = new OfType(m_returnType); + else + { + throw new SQLSyntaxErrorException(String.format( + "RECORD-returning function %s called without the " + + "required column-definition list following " + + "the call", m_target), "42P18"); + /* + * ^^^ that can also happen if a polymorphic return type + * gets resolved to match an input for which a row type + * was passed whose tuple descriptor is not cataloged. + * (Interned is no good, because the polymorphic + * resolution does not keep track of the typmods.) + * For now, I'm just going to leave that gobbledygook + * out of the message. And maybe forever. + */ + } + } + return m_outputsDescriptor = td; + }); + } + + @Override + public boolean inputsAreSpread() + { + if ( ANYOID != m_target.variadicType().oid() ) + return false; + return ! doInPG(() -> _get_fn_expr_variadic(m_savedCall.m_fcinfo)); + } + + @Override + public BitSet stableInputs(BitSet ofInterest) + { + BitSet s = (BitSet)ofInterest.clone(); + + /* + * Add one bit above the highest one of interest, which the C code + * will not touch. That ensures the C code will not be trying to grow + * or shrink the memory region, which isn't palloc'd, so that wouldn't + * go well. + */ + int guardPos = s.length(); + s.set(guardPos); + ByteBuffer b = toBitmapset(s); + doInPG(() -> _stableInputs(m_savedCall.m_fcinfo, b)); + s = fromBitmapset(b); + s.clear(guardPos); + return s; + } + + /** + * Wrapper that presents {@link SRFFirst SRFFirst} as an ordinary + * {@link Routine Routine}. + *

    + * This wrapper is what will be cached in the {@link Lookup Lookup} + * as the routine specialization, except during the actual collection of + * rows from a {@code ValuePerCall} set-returning function. During the + * collection of rows, this wrapper is replaced in the cache by a lambda + * that collects the next row. When the row collection completes normally or + * abnormally, this wrapper is reestablished in the cache. + *

    + * This is a full-fledged class, and not merely a lambda itself, as it must + * refer to its own {@code this} to reestablish itself in the cache, and + * hold a {@link SRFRoutine.State State} object to know when to do so in + * the abnormal-completion case. + */ + private static class SRFRoutine implements Routine + { + private final LookupImpl m_flinfo; + private final SRFFirst m_srfFirst; + + /* + * A mutable field that holds a State only while m_routine holds + * a row-collecting lambda instead of this object. Only to be mutated + * on the PG thread. The reestablish() method below clears this field + * and stores this SRFRoutine back into m_routine. + */ + private State m_state; + + private SRFRoutine(LookupImpl flinfo, SRFFirst srfFirst) + { + m_flinfo = flinfo; + m_srfFirst = srfFirst; + } + + private final void reestablish(boolean release) + { + doInPG(() -> + { + if ( release ) + m_state.release(); + m_state = null; + m_flinfo.m_routine = this; + }); + } + + @Override + public void call(Call fcinfo) throws SQLException + { + SRFNext next = m_srfFirst.firstCall(fcinfo); + SRFNext.Result rslt = next.nextResult(fcinfo); + CallImpl.ReturnSetInfoImpl rsi = + (CallImpl.ReturnSetInfoImpl)fcinfo.resultInfo(); + /* + * Logically, rsi.setMode(SFRM_ValuePerCall) belongs here, but + * in practice can be elided because PG initializes the mode to + * ValuePerCall. + */ + rsi.setDone(rslt); + switch ( rslt ) + { + case END: + next.close(); + /* FALLTHROUGH */ + case SINGLE: + return; + case MULTIPLE: + } + + Routine r = fci2 -> + { + SRFNext.Result rslt2 = next.nextResult(fci2); + CallImpl.ReturnSetInfoImpl rsi2 = + (CallImpl.ReturnSetInfoImpl)fci2.resultInfo(); + /* + * If ValuePerCall were not the default mode, + * rsi2.setMode(SFRM_ValuePerCall) would be needed here too; + * PG checks it on every ValuePerCall iteration, weirdly enough. + */ + rsi2.setDone(rslt2); + if ( SRFNext.Result.MULTIPLE == rslt2 ) + return; + /* + * Assume it is DONE. SINGLE would be a protocol + * violation, and PG will report that for us. + */ + try + { + next.close(); + } + finally + { + reestablish(true); + } + }; + + doInPG(() -> + { + m_state = new State(this, rsi.exprContext(), next); + m_flinfo.m_routine = r; + }); + } + + private static final class State extends DualState + { + private final SRFNext m_next; + + private State( + SRFRoutine referent, ExprContextImpl lifespan, SRFNext next) + { + super(referent, lifespan); + m_next = next; + } + + private void release() + { + releaseFromJava(); + } + + @Override + protected void nativeStateReleased(boolean javaStateLive) + { + if ( ! javaStateLive ) + return; + SRFRoutine r = referent(); + try + { + m_next.close(); + } + finally + { + if ( null != r ) + r.reestablish(false); + } + } + } + } + + /** + * The implementation of {@link Call}, encapsulating the information + * supplied by PostgreSQL in the per-call C struct. + */ + class CallImpl implements RegProcedure.Call + { + private final ByteBuffer m_fcinfo; + private final ByteBuffer m_context; + private final ByteBuffer m_resultinfo; + /* + * Only relevant when m_resultinfo is a ReturnSetInfo: + */ + private final long m_exprContextAddress; + private final long m_perQueryCxtAddress; + + /* mutable, accessed on the PG thread */ + private TupleTableSlot m_arguments; + private Context m_contextImpl; + private ResultInfo m_resultinfoImpl; + + private CallImpl( + ByteBuffer fcinfo, ByteBuffer context, ByteBuffer resultinfo, + long exprContextAddress, long perQueryCxtAddress) + { + m_fcinfo = fcinfo.order(nativeOrder()); + m_context = asReadOnlyNativeOrder(context); + m_resultinfo = + null == resultinfo ? null : resultinfo.order(nativeOrder()); + m_exprContextAddress = exprContextAddress; + m_perQueryCxtAddress = perQueryCxtAddress; + } + + private void dispatch(Routine r) throws SQLException + { + r.call(this); + + /* + * For now, set isNull if the result would be interpreted as a + * reference. For a function, that means outputsDescriptor is other + * than one column wide (one column is treated as scalar by the + * function caller) or the one attribute isn't byValue. Otherwise, + * the temporary PG_RETURN_VOID in the C wrapper will be returning + * a non-null, zero result. That could be bogus for some arbitrary + * by-value type, but at least isn't an immediate bad dereference. + * The condition for procedures is simpler; they can only return + * VOID or RECORD, so if it isn't void, null it must be. + */ + TupleDescriptor td = outputsDescriptor(); + + if ( null == td ) // declared as returning void; nothing to do + return; + + if ( 1 != td.size() // it can't be a wrapper + || td.get(0).type() != m_returnType // or it isn't a wrapper + || ! m_returnType.byValue() ) // or wraps a by-ref type + isNull(true); // avoid a dereference + } + + private short nargs() + { + assert Short.BYTES == SIZEOF_fcinfo_nargs : "sizeof fcinfo nargs"; + return m_fcinfo.getShort(OFFSET_fcinfo_nargs); + } + + @Override + public RegProcedure.Lookup lookup() + { + return LookupImpl.this; + } + + @Override + public TupleTableSlot arguments() throws SQLException + { + return doInPG(() -> + { + if ( null == m_arguments ) + { + TupleDescriptor td = inputsDescriptor(); + m_arguments = newNullableDatum(td, m_fcinfo + /* + * Java 13: .slice(OFFSET_fcinfo_args, + * m_fcinfo.capacity() - OFFSET_fcinfo_args).order(m_fcinfo.order()) + */ + .duplicate().position(OFFSET_fcinfo_args) + .slice().order(m_fcinfo.order()) + ); + } + return m_arguments; + }); + } + + @Override + public TupleTableSlot result() + { + throw notyet(); + } + + @Override + public void isNull(boolean nullness) + { + assert 1 == SIZEOF_fcinfo_isnull : "sizeof fcinfo isnull"; + doInPG(() -> + m_fcinfo.put( + OFFSET_fcinfo_isnull, nullness ? (byte)1 : (byte)0)); + } + + @Override + public RegCollation collation() + { + assert Integer.BYTES == SIZEOF_fcinfo_fncollation + : "sizeof fcinfo fncollation"; + int oid = m_fcinfo.getInt(OFFSET_fcinfo_fncollation); + return of(RegCollation.CLASSID, oid); + } + + @Override + public Context context() + { + assert Integer.BYTES == SIZEOF_NodeTag : "sizeof NodeTag"; + return doInPG(() -> + { + if ( null != m_contextImpl ) + return m_contextImpl; + + if ( null == m_context ) + return null; + + int tag = m_context.getInt(0); + + if ( T_Invalid == tag ) + return null; + if ( T_TriggerData == tag ) + return m_contextImpl = new TriggerDataImpl(); + if ( T_EventTriggerData == tag ) + return m_contextImpl = new EventTriggerDataImpl(); + if ( T_AggState == tag ) + return m_contextImpl = new AggStateImpl(); + if ( T_WindowAggState == tag ) + return m_contextImpl = new WindowAggStateImpl(); + if ( T_WindowObjectData == tag ) + return m_contextImpl = new WindowObjectImpl(); + if ( T_CallContext == tag ) + return m_contextImpl = new CallContextImpl(); + if ( T_ErrorSaveContext == tag ) + return m_contextImpl = new ErrorSaveContextImpl(); + return null; + }); + } + + @Override + public ResultInfo resultInfo() + { + return doInPG(() -> + { + if ( null != m_resultinfoImpl ) + return m_resultinfoImpl; + + if ( null == m_resultinfo ) + return null; + + int tag = m_resultinfo.getInt(0); + + if ( T_ReturnSetInfo == tag ) + return m_resultinfoImpl = new ReturnSetInfoImpl(); + return null; + }); + } + + class TriggerDataImpl implements Context.TriggerData + { + /** + * When non-null, holds a {@code ByteBuffer} that windows the + * structurs at {@code *td_trigger}. + *

    + * In a bit of an incestuous relationship, this will be set + * by the {@link #trigger trigger()} method below, which returns + * the corresponding instance of {@link TriggerDataImpl}. That + * class has a package-visible {@code withTriggerData} method that + * the caller can use to run client code in a scope throughout which + * that {@code Trigger} instance will be associated with this + * instance and can read from this {@code ByteBuffer}. + *

    + * On exit of that scope, this field will be reset to null and the + * association between the {@code TriggerImpl} instance and this + * broken. + *

    + * The dispatcher will execute client {@code specialize} and + * {@code apply} methods within such a scope, so those methods will + * see a {@code Trigger} instance that works, while avoiding thorny + * questions here about cache validity, since every call will use + * the structure freshly passed by PostgreSQL. + */ + ByteBuffer m_trigger; + + @Override + public Called called() + { + assert Integer.BYTES == SIZEOF_TRGD_tg_event; + int event = m_context.getInt(OFFSET_TRGD_tg_event); + switch ( event & TRIGGER_EVENT_TIMINGMASK ) + { + case TRIGGER_EVENT_BEFORE: + return Called.BEFORE; + case TRIGGER_EVENT_AFTER: + return Called.AFTER; + case TRIGGER_EVENT_INSTEAD: + return Called.INSTEAD_OF; + default: + throw new + AssertionError("unexpected TriggerData.called"); + } + } + + @Override + public Event event() + { + int event = m_context.getInt(OFFSET_TRGD_tg_event); + switch ( event & TRIGGER_EVENT_OPMASK ) + { + case TRIGGER_EVENT_INSERT: + return Event.INSERT; + case TRIGGER_EVENT_DELETE: + return Event.DELETE; + case TRIGGER_EVENT_UPDATE: + return Event.UPDATE; + case TRIGGER_EVENT_TRUNCATE: + return Event.TRUNCATE; + default: + throw new + AssertionError("unexpected TriggerData.event"); + } + } + + @Override + public Scope scope() + { + int event = m_context.getInt(OFFSET_TRGD_tg_event); + if ( 0 != (event & TRIGGER_EVENT_ROW) ) + return Scope.ROW; + return Scope.STATEMENT; + } + + @Override + public RegClass relation() + { + long r = fetchPointer(m_context, OFFSET_TRGD_tg_relation); + ByteBuffer bb = + mapFixedLength(r + OFFSET_Relation_rd_id, SIZEOF_Oid); + int oid = bb.getInt(0); + return of(RegClass.CLASSID, oid); + } + + @Override + public TupleTableSlot triggerTuple() + { + return doInPG(() -> + { + long t = fetchPointer(m_context, OFFSET_TRGD_tg_trigtuple); + + if ( 0 == t ) + return null; + + TupleDescriptor td = relation().tupleDescriptor(); + return + heapTupleGetLightSlotNoFree( + td, t, Invocation.current()); + }); + } + + @Override + public TupleTableSlot newTuple() + { + return doInPG(() -> + { + long t = fetchPointer(m_context, OFFSET_TRGD_tg_newtuple); + + if ( 0 == t ) + return null; + + TupleDescriptor td = relation().tupleDescriptor(); + return + heapTupleGetLightSlotNoFree( + td, t, Invocation.current()); + }); + } + + @Override + public Trigger trigger() + { + long t = fetchPointer(m_context, OFFSET_TRGD_tg_trigger); + ByteBuffer bb = mapFixedLength(t, SIZEOF_Trigger); + int oid = bb.getInt(OFFSET_TRG_tgoid); + m_trigger = bb; + return of(Trigger.CLASSID, oid); + } + + @Override + public Projection updatedColumns() + { + long t = fetchPointer(m_context, OFFSET_TRGD_tg_updatedcols); + BitSet s = fromBitmapset(t); + /* + * The PostgreSQL bitmapset representation for a zero-length set + * is null, so we think we got a zero-length set even for + * non-update trigger events. But an empty set of update columns + * is most likely a non-update event, so return null for that + * case rather than constructing an empty Projection. + */ + if ( 0 == s.length() ) + return null; + /* + * Shifting out -FirstLowInvalidHeapAttributeNumber would + * yield a BitSet with SQLish one-based numbering. Shift out + * 1 - FirstLowInvalidHeapAttributeNumber to get a 0-based set. + */ + s = s.get(1 - FirstLowInvalidHeapAttributeNumber, s.length()); + return relation().tupleDescriptor().project(s); + } + } + + class EventTriggerDataImpl implements Context.EventTriggerData + { + } + + class AggStateImpl implements Context.AggState + { + } + + class WindowAggStateImpl implements Context.WindowAggState + { + } + + class WindowObjectImpl implements Context.WindowObject + { + } + + class CallContextImpl implements Context.CallContext + { + @Override + public boolean atomic() + { + assert 1 == SIZEOF_CallContext_atomic; + return 0 != m_context.get(OFFSET_CallContext_atomic); + } + } + + class ErrorSaveContextImpl implements Context.ErrorSaveContext + { + } + + class ReturnSetInfoImpl implements ResultInfo.ReturnSetInfo + { + private final MemoryContext m_perQueryCxt; + + { + assert threadMayEnterPG() : "ReturnSetInfoImpl new thread"; + m_perQueryCxt = + MemoryContextImpl.fromAddress(m_perQueryCxtAddress); + } + + /* + * The per-query memory context referenced from the ExprContext. + * + * In strict fidelity to PostgreSQL, this would be a method on + * ExprContextImpl. But there is not much else about an ExprContext + * interesting enough to be worth putting accessor methods on it, + * and this context will be needed for allocating even the first + * row from an SRF, before it is known whether a Java mirror + * of ExprContext will even be needed. So to simplify the API and + * reduce indirection, the method is here. + */ + MemoryContext perQueryMemoryContext() + { + return m_perQueryCxt; + } + + ExprContextImpl exprContext() + { + assert threadMayEnterPG() : + "ReturnSetInfoImpl.exprContext thread"; + + return ExprContextImpl.newInstance( + m_exprContextAddress, m_perQueryCxt); + } + + @Override + public List> allowedModes() + { + assert Integer.BYTES == SIZEOF_INT; + int modes = m_resultinfo.getInt(OFFSET_RSI_allowedModes); + switch ( modes & ( SFRM_ValuePerCall | SFRM_Materialize ) ) + { + case SFRM_ValuePerCall: return s_vpc; + case SFRM_Materialize : return s_mat; + case SFRM_ValuePerCall | SFRM_Materialize : + if ( 0 == (modes & SFRM_Materialize_Preferred) ) + return s_vpcmat; + return s_matvpc; + default: + throw new AssertionError( + "unhandled ReturnSetInfo.allowedModes"); + } + } + + void setMode(int returnMode) + { + assert Integer.BYTES == SIZEOF_RSI_returnMode:"returnMode size"; + m_resultinfo.putInt(OFFSET_RSI_returnMode, returnMode); + } + + void setDone(SRFNext.Result result) + { + int isDone = 0; // Java >= 21: exhaustive switch is enough + + switch ( result ) + { + case SINGLE: isDone = ExprSingleResult; break; + case MULTIPLE: isDone = ExprMultipleResult; break; + case END: isDone = ExprEndResult; break; + } + + assert Integer.BYTES == SIZEOF_RSI_isDone:"isDone size"; + m_resultinfo.putInt(OFFSET_RSI_isDone, isDone); + } + } + } + + private static class State + extends DualState.SingleDeleteGlobalRefP + { + /** + * Constructs a {@code State} given the memory context that will bound + * its lifespan, and a pointer to the JNI global ref that must be freed + * at end of life. The caller passes the address of the {@code extra} + * struct, which therefore needs to have the JNI global ref as its first + * member. + */ + private State(LookupImpl referent, MemoryContext cxt, long globalRefP) + { + super(referent, cxt, globalRefP); + } + } + + private static void checkForTransforms( + RegProcedure p, Routines impl, boolean addlChecks) + throws SQLException + { + List types = p.transformTypes(); + if ( null == types ) + return; + + if ( ! ( impl instanceof UsingTransforms ) ) + throw new SQLSyntaxErrorException(String.format( + "%s of %s does not implement TRANSFORM FOR TYPE", + p.language(), p), "42P13"); + + /* + * Check (as PostgreSQL doesn't) for multiple mentions of a type + * to transform, but only as an additionalCheck, at validation time. + * If a routine so declared sneaks past validation, we'll just do + * duplicative work at call time rather than checking for it then. + */ + if ( addlChecks ) + { + List dups = types.stream() + .collect(groupingBy(t -> t, counting())) + .entrySet().stream() + .filter(e -> 1 < e.getValue()) + .map(e -> e.getKey()) + .collect(toList()); + if ( ! dups.isEmpty() ) + throw new SQLSyntaxErrorException(String.format( + "%s mentions redundantly in TRANSFORM FOR TYPE: %s", + p, dups), "42P13"); + } + + PLJavaMemo m = ((RegProcedureImpl)p).m_how; + + /* + * m.transforms() will construct a list (and cache it, so it doesn't + * hurt much to do it here in advance) of transforms corresponding to + * the transformTypes. It can throw an SQLException if any needed + * transform can't be found, or if impl's essentialTransformChecks + * method throws one. Like most CatalogObject methods, transforms() + * will have wrapped that in an UncheckedException, but we unwrap it + * here if it happens. When the language handler calls transforms(), it + * should be getting a cached value so an exception would be unexpected. + */ + try + { + m.transforms(); + } + catch ( UncheckedException e ) + { + Throwable t = e.unwrap(); + if ( t instanceof SQLException ) + throw (SQLException)t; + throw e; + } + } + + private static final List> s_vpc = + List.of(SRFTemplate.ValuePerCall.class); + private static final List> s_mat = + List.of(SRFTemplate.Materialize.class); + private static final List> s_vpcmat = + List.of(SRFTemplate.ValuePerCall.class, SRFTemplate.Materialize.class); + private static final List> s_matvpc = + List.of(SRFTemplate.Materialize.class, SRFTemplate.ValuePerCall.class); + + private static native void _cacheReference(LookupImpl instance, long extra); + + private static native boolean _get_fn_expr_variadic(ByteBuffer fcinfo); + + private static native void _stableInputs( + ByteBuffer fcinfo, ByteBuffer bits); + + private static native TupleDescriptor _notionalCallResultType( + ByteBuffer fcinfo, int[] returnTypeOid); + + private static native boolean _resolveArgTypes( + ByteBuffer fcinfo, ByteBuffer types, ByteBuffer unresolvedBitmap, + int tplSz, int argSz); + + @Native private static final int OFFSET_CallContext_atomic = 4; + @Native private static final int SIZEOF_CallContext_atomic = 1; + + @Native private static final int SIZEOF_TRGD_tg_event = 4; + + @Native private static final int TRIGGER_EVENT_INSERT = 0; + @Native private static final int TRIGGER_EVENT_DELETE = 1; + @Native private static final int TRIGGER_EVENT_UPDATE = 2; + @Native private static final int TRIGGER_EVENT_TRUNCATE = 3; + @Native private static final int TRIGGER_EVENT_OPMASK = 3; + @Native private static final int TRIGGER_EVENT_ROW = 4; + @Native private static final int TRIGGER_EVENT_BEFORE = 8; + @Native private static final int TRIGGER_EVENT_AFTER = 0; + @Native private static final int TRIGGER_EVENT_INSTEAD = 0x10; + @Native private static final int TRIGGER_EVENT_TIMINGMASK = 0x18; + + @Native private static final int FirstLowInvalidHeapAttributeNumber = -7; + + @Native private static final int SFRM_ValuePerCall = 0x01; + @Native private static final int SFRM_Materialize = 0x02; + @Native private static final int SFRM_Materialize_Random = 0x04; + @Native private static final int SFRM_Materialize_Preferred = 0x08; + + @Native private static final int ExprSingleResult = 0; + @Native private static final int ExprMultipleResult = 1; + @Native private static final int ExprEndResult = 2; +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/MemoryContextImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/MemoryContextImpl.java new file mode 100644 index 000000000..854fa4605 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/MemoryContextImpl.java @@ -0,0 +1,439 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import static java.lang.Integer.toUnsignedLong; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.nio.charset.CharacterCodingException; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; + +import org.postgresql.pljava.internal.CacheMap; +import org.postgresql.pljava.internal.Checked; +import static org.postgresql.pljava.internal.DualState.m; +import org.postgresql.pljava.internal.LifespanImpl; + +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; +import org.postgresql.pljava.model.MemoryContext; + +import static org.postgresql.pljava.pg.DatumUtils.addressOf; +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.DatumUtils.fetchPointer; +import static org.postgresql.pljava.pg.DatumUtils.mapCString; +import static org.postgresql.pljava.pg.DatumUtils.mapFixedLength; +import static org.postgresql.pljava.pg.DatumUtils.storePointer; + +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_DATUM; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_MCTX; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_MCTX_name; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_MCTX_ident; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_MCTX_firstchild; + +/* + * CurrentMemoryContext is declared in utils/palloc.h and defined in + * utils/mmgr/mcxt.c along with the rest of these, which are puzzlingly + * declared in utils/memutils.h instead. + * + * TopMemoryContext // can be made a static + * ErrorContext + * PostmasterContext + * CacheMemoryContext + * MessageContext + * TopTransactionContext + * CurTransactionContext + * PortalContext // transient; for active portal + * + * The structure of a context is in nodes/memnodes.h + */ + +/** + * A lazily-created mirror of a PostgreSQL MemoryContext. + *

    + * PostgreSQL is creating, resetting, and deleting memory contexts all the time, + * and most of them will never be visible in PL/Java; one of these objects only + * gets created when Java code specifically requests a reference to a particular + * context, generally to make it the {@code Lifespan} of some PL/Java object + * that should be invalidated when the context goes away. + *

    + * Once an instance of this class has been instantiated and before it escapes to + * calling Java code, it must be registered for a reset/delete callback on the + * underlying PostgreSQL context so it can track its life cycle and invalidate, + * when the time comes, any objects it has been used as the "owner" of. + * (Instances that might be transiently created here, say in traversing the + * context tree, and won't escape, don't need the full registration treatment.) + * All creation, traversal, and mutation has to happen on the PG thread. Once + * published and while valid, an instance can be observed by other threads. + *

    + * Events that can occur in the life of a memory context: + *

    + *
    SetParent
    It can be made a child of a context other than its original + * parent. (It can also be given the null parent, immediately before being + * deleted; this happens after invocation of the callback, though, so + * gives the callback routine no help in determining what is happening.) + *
    Reset
    It can have all of its descendant contexts deleted and its own + * allocations freed, but remain in existence itself. + *
    ResetOnly
    It can have its own allocations freed, with no effect on + * descendant contexts. + *
    ResetChildren
    All of its children can recursively get + * the ResetChildren treatment and in addition be ResetOnly themselves, but + * with no effect on this context itself. + *
    Delete
    All of its descendants, and last this context itself, go away. + *
    DeleteChildren
    All of its descendants go away, with no other effect + * on this context. + *
    + *

    + * Complicating the lifecycle tracking, PostgreSQL will invoke exactly the same + * callback, with exactly the same parameter, whether the context in question + * is being deleted or reset. In the reset case, the context is still valid + * after the callback; in the delete case, it is not. The difference is not + * important for the objects "owned" by this context; they're to be invalidated + * in either case. But it leaves the callback with a puzzle to solve regarding + * what to do with this object itself. + *

    + * A few related observations: + *

      + *
    • Within the callback itself, the context is still valid; its native struct + * may still be accessed safely, and its parent, child, and sibling links + * are sane. + *
    • If the {@code firstchild} link is non-null, this is definitely a reset + * and not a delete. In any delete case, all children will already be gone. + *
    • Conversely, though, absence of children does not prove this is deletion. + *
    • Hence, the callback will leave this mirror in either a definitely-valid + * or a maybe-deleted state. + *
    • In either state, its callback will have been deregistered. It must + * re-register the callback in the definitely-valid state. In the maybe-deleted + * state, it will receive no further callbacks, unless it can later be found + * revivifiable and the callback is re-registered. + *
    • Because the callback can only proceed when none of this ResourceOwner's + * owned objects are pinned, and they all will be invalidated and delinked from + * it, it will always be the owner of no objects when the callback completes. + * A possible approach then is to treat maybe-deleted as definitely-deleted + * always, invalidate and unpublish this object, and require a Java caller to + * obtain a new mirror of the same context if indeed it still exists and is + * wanted. Efforts to retain and possibly revivify the mirror could be viewed + * as optimizations. (They could have API consequences, though; without + * revivification, the object would have to be made invalid and throw an + * exception if used by Java code that had held on to a reference, even if only + * a reset was intended. Revivification could allow the retained reference + * to remain usable.) + *
    • Once the callback completes, the maybe-deleted state must be treated as + * completely forbidding any access to the mapped memory. If there is any + * information that could be useful in a later revivification decision, it must + * be collected by the callback and saved in the Java object state. + *
    • If the callback for a maybe-deleted mirror saves a reference to (a + * published Java mirror of) its parent at callback time and, at a later + * attempt to use the object, the parent is found to be valid and have this + * object as a child, revivification is supported. + *
    • That child-of-valid parent test can be applied recursively if the parent + * is also found to be maybe-deleted. But the test can spuriously fail if a + * (reset-but-still-valid) context was reparented after the callback saved its + * parent reference. + *
    • Obtaining the reference again from one of the PostgreSQL globals or from + * a valid PostgreSQL data structure clearly re-establishes that it is valid. + * (Whether it is "the same" context is more a philosophical point; whether + * reset or deleted, it was left with no allocations and no owned objects at + * that point, so questions of its "identity" may not be critical. Its name and + * ident may have changed. Its operations (the 'type' of context) may also have + * changed, but may be a lower-level detail than needs attention here. + *
    + */ +public class MemoryContextImpl extends LifespanImpl +implements MemoryContext, LifespanImpl.Addressed +{ + static final ByteBuffer[] s_knownContexts; + + /** + * Map from native address of a PostgreSQL MemoryContext to an instance + * of this class. + *

    + * A non-concurrent map suffices, as the uses are only on the PG thread + * (in known() within a doInPG(), and in callback() invoked from PG). + */ + static final CacheMap s_map = + CacheMap.newThreadConfined( + () -> ByteBuffer.allocate(SIZEOF_DATUM).order(nativeOrder())); + + static + { + ByteBuffer[] bs = EarlyNatives._window(ByteBuffer.class); + /* + * The first one windows CurrentMemoryContext. Set the correct byte + * order but do not make it read-only; operations may be provided + * for setting it. + */ + bs[0] = bs[0].order(nativeOrder()); + /* + * The rest are made native-ordered and read-only. + */ + for ( int i = 1; i < bs.length; ++ i ) + bs[i] = asReadOnlyNativeOrder(bs[i]); + s_knownContexts = bs; + } + + static MemoryContext known(int which) + { + ByteBuffer global = s_knownContexts[which]; + return doInPG(() -> + { + long ctx = fetchPointer(global, 0); + if ( 0 == ctx ) + return null; + return fromAddress(ctx); + }); + } + + public static MemoryContext fromAddress(long address) + { + assert threadMayEnterPG() : m("MemoryContext thread"); + + /* + * Cache strongly; see LifespanImpl javadoc. + */ + return s_map.stronglyCache( + b -> + { + if ( 4 == SIZEOF_DATUM ) + b.putInt((int)address); + else + b.putLong(address); + }, + b -> + { + MemoryContextImpl c = new MemoryContextImpl(address); + EarlyNatives._registerCallback(address); + return c; + } + ); + } + + /** + * Specialized method intended, so far, only for {@code PgSavepoint}'s use. + *

    + * Only to be called on the PG thread. + */ + public static long getCurrentRaw() + { + assert threadMayEnterPG() : m("MemoryContext thread"); + return fetchPointer(s_knownContexts[0], 0); + } + + /** + * Even more specialized method intended, so far, only for + * {@code PgSavepoint}'s use. + *

    + * Only to be called on the PG thread. + */ + public static void setCurrentRaw(long context) + { + assert threadMayEnterPG() : m("MemoryContext thread"); + storePointer(s_knownContexts[0], 0, context); + } + + /** + * Change the current memory context to c, for use in + * a {@code try}-with-resources to restore the prior context on exit + * of the block. + */ + public static Checked.AutoCloseable + allocatingIn(MemoryContext c) + { + assert threadMayEnterPG() : m("MemoryContext thread"); + MemoryContextImpl ci = (MemoryContextImpl)c; + long prior = getCurrentRaw(); + Checked.AutoCloseable ac = () -> setCurrentRaw(prior); + setCurrentRaw(ci.m_address); + return ac; + } + + /* + * Called only from JNI. + * + * See EarlyNatives._registerCallback below for discussion of why the native + * context address is used as the callback argument. + * + * Deregistering the callback is a non-issue: that has already happened + * when this call is made. + */ + private static void callback(long ctx) + { + CacheMap.Entry e = s_map.find( + b -> + { + if ( 4 == SIZEOF_DATUM ) + b.putInt((int)ctx); + else + b.putLong(ctx); + } + ); + + if ( null == e ) + return; + + MemoryContextImpl c = e.get(); + if ( null == c ) + return; + + /* + * invalidate() has to make a (conservative) judgment whether this + * callback reflects a 'reset' or 'delete' operation, and return true + * if the mapping should be removed from the cache. It should return + * false only if the case is provably a reset only, or (possible future + * work) if it can be placed in a maybe-deleted state and possibly + * revivified later. Otherwise, the instance must be conservatively + * marked invalid, and dropped from the cache. + */ + if ( c.invalidate() ) + e.remove(); + } + + private final ByteBuffer m_context; + /** + * The address of the context, even though technically redundant. + *

    + * A JNI function can easily retrieve it from the {@code ByteBuffer}, but + * by keeping the value here, sometimes a JNI call can be avoided. + */ + private final long m_address; + private String m_ident; + private final String m_name; + + private MemoryContextImpl(long context) + { + m_address = context; + m_context = mapFixedLength(context, SIZEOF_MCTX); + String s; + + long p = fetchPointer(m_context, OFFSET_MCTX_ident); + + try + { + if ( 0 == p ) + s = null; + else + s = SERVER_ENCODING.decode(mapCString(p)).toString(); + } + catch ( CharacterCodingException e ) + { + s = "[unexpected encoding]"; + } + m_ident = s; + + p = fetchPointer(m_context, OFFSET_MCTX_name); + + try + { + if ( 0 == p ) + s = null; + else + s = SERVER_ENCODING.decode(mapCString(p)).toString(); + } + catch ( CharacterCodingException e ) + { + s = "[unexpected encoding]"; + } + m_name = s; + } + + @Override + public long address() + { + if ( 0 == m_context.limit() ) + throw new IllegalStateException( + "address may not be taken of invalidated MemoryContext"); + return m_address; + } + + @Override + public String toString() + { + return String.format("MemoryContext[%s,%s]", m_name, m_ident); + } + + /* + * Determine (or conservatively guess) whether this context is being deleted + * or merely reset, and perform (in either case) the nativeRelease() actions + * for dependent objects. + * + * Return false only if this is provably a reset only, or (possible future + * work) if it can be placed in a maybe-deleted state and possibly + * revivified later. Otherwise, the instance must be conservatively + * marked invalid, and true returned to drop it from the cache. + */ + private boolean invalidate() + { + lifespanRelease(); + + /* + * The one easy rule is that if there is any child, the case can only be + * 'reset'. + */ + if ( 0 != fetchPointer(m_context, OFFSET_MCTX_firstchild) ) + return false; + + /* + * Rather than a separate field to record invalidation status, set the + * windowing ByteBuffer's limit to zero. This will ensure an + * IndexOutOfBoundsException on future attempts to read through it, + * without cluttering the code with additional tests. + */ + m_context.limit(0); + return true; + } + + private static class EarlyNatives + { + /** + * Returns an array of ByteBuffer, one covering each PostgreSQL known + * memory context global, in the same order as the arbitrary indices + * defined in the API class CatalogObject.Factory, which are what will + * be passed to the known() method. + *

    + * Takes a {@code Class} argument, to save the native code + * a lookup. + */ + private static native ByteBuffer[] _window(Class component); + + /** + * Register a memory context callback for the context with the given + * native address in PostgreSQL. + *

    + * A callback is allowed one {@code void *}-sized argument to receive + * when called back. If that were a JNI global reference, for example, + * we could arrange for {@link #callback callback} to be invoked with + * the affected Java instance directly. But {@code callback} will be + * wanting the native address anyway in order to look it up and remove + * it from the CacheMap, and JNI's global references surely involve + * their own layer of mapping under the JVM's hood. So we may as well + * keep it simple and use our one allowed arg to hold the context + * address itself, which is necessary anyway, and sufficient. + */ + private static native void _registerCallback(long nativeAddress); + } + + //possibly useful operations: + //MemoryContext parent(); + // B palloc(Class api, long size); // ByteBuffer.class for now + // flags HUGE 1 NO_OOM 2 ZERO 4 + // B repalloc(B chunk, long size); + // others from palloc.h ? + // AutoCloseable switchedTo(); + // reset/delete/resetonly/resetchildren/deletechildren/setparent + // from utils/memutils.h? only if PL/Java created the context? + // require intent to delete/reset to be declared on creation, and prevent + // such a context being used in switchedTo()? + // AllocSetContextCreate/SlabContextCreate/GenerationContextCreate + // protect some operations as protected methods of Adapter? +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ModelConstants.java b/pljava/src/main/java/org/postgresql/pljava/pg/ModelConstants.java new file mode 100644 index 000000000..0ee645c0d --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ModelConstants.java @@ -0,0 +1,851 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import org.postgresql.pljava.annotation.BaseUDT.Alignment; +import org.postgresql.pljava.annotation.BaseUDT.Storage; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import java.lang.annotation.Native; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; +import java.nio.IntBuffer; + +import java.sql.SQLException; + +/** + * Supply static values that can vary between PostgreSQL versions/builds. + */ +public abstract class ModelConstants +{ + /* + * C code will contain a static array of int initialized to the values + * that are needed. The native method in this class obtains a ByteBuffer + * windowing that static array. + * + * To detect fat-finger mistakes, the array will include alternating indices + * and values { IDX_SIZEOF_DATUM, SIZEOF_DATUM, ... }, so when windowed as + * an IntBuffer, get(2*IDX_FOO) should equal IDX_FOO and get(1 + 2*IDX_FOO) + * is then the value; this can be done without hairy preprocessor logic on + * the C side, and checked here (not statically, but still cheaply). C99's + * designated array initializers would offer a simpler, all-static approach, + * but PostgreSQL strives for C89 compatibility before PostgreSQL 12. + * + * As a practical matter, the sequence of IDX_... values is allowed to have + * gaps, so that new constants can be added as needed, coherently grouped, + * without requiring extensive renumbering of otherwise unaffected lines. + * The array cells remain consecutive, and this class simply tracks a gap + * between the IDX_... value and the physical position. This, of course, + * would complicate any move to C99 designated initializers. + * + * Starting with PostgreSQL 11, LLVM bitcode for the server might be found + * in $pkglibdir/bitcode/postgres, and that could one day pose opportunities + * for a PL/Java using an LLVM library, or depending on GraalVM, to access + * these values (and do much more) without this tedious hand coding. But for + * now, the goal is to support earlier versions and not require LLVM or + * GraalVM, and hope that the bootstrapping needed here does not become too + * burdensome. + */ + private static class Natives implements AutoCloseable + { + private IntBuffer b = _statics() + .asReadOnlyBuffer().order(nativeOrder()).asIntBuffer(); + private int gap = 0; + + /** + * Returns the next constant from the windowed array. + *

    + * The next constant is determined by the buffer's current position, + * not by index, which is only used for sanity checking: + *

      + *
    1. adjusted by the current "gap", it should be half the buffer's + * current position, and + *
    2. unadjusted, it should equal the {@code int} read at + * that position. + *
    + * The {@code int} read at the next consecutive position is returned. + * @param index the expected index of the next constant to be read + * @return the next constant + * @throws ConstantsError if any sanity check fails + */ + int get(int index) + { + try + { + if ( b.position() != (index - gap) << 1 || index != b.get() ) + throw new ConstantsError(); + return b.get(); + } + catch ( Exception e ) + { + throw (ConstantsError)new ConstantsError().initCause(e); + } + } + + /** + * Conforms the internal sanity checking to a gap in assigned indices. + *

    + * The supplied index must be greater than that implied by + * the buffer's current position (and previously recorded gap, if any), + * and the difference is remembered as the new gap. + * @param index the assigned index of the next constant to be read + * @throws ConstantsError if the newly-computed gap is not larger than + * the remembered value + */ + void gap(int index) + { + int pos = b.position(); + assert 0 == (pos & 1); // expected as get() always advances by two + index -= pos >>> 1; + if ( index <= gap ) + throw new ConstantsError(); + gap = index; + } + + @Override + public void close() + { + if ( 0 < b.remaining() ) + throw new ConstantsError(); + } + + private static native ByteBuffer _statics(); + } + + /* + * These constants (which will be included in a generated header available + * to the C code) have historically stable values that aren't expected to + * change. The C code simply asserts statically at build time that they + * are right. If a new PG version conflicts with the assertion, move the + * constant from here to the list further below of constants that get their + * values *from* the C code at class initialization time. (When doing that, + * also check uses of the constant for any assumptions that might no longer + * hold.) + */ + + @Native public static final int PG_SQL_ASCII = 0; + @Native public static final int PG_UTF8 = 6; + @Native public static final int PG_LATIN1 = 8; + @Native public static final int PG_ENCODING_BE_LAST = 34; + + @Native public static final int VARHDRSZ = 4; + @Native public static final int VARHDRSZ_EXTERNAL = 2; + @Native public static final byte VARTAG_INDIRECT = 1; + @Native public static final byte VARTAG_EXPANDED_RO = 2; + @Native public static final byte VARTAG_EXPANDED_RW = 3; + @Native public static final byte VARTAG_ONDISK = 18; + + @Native public static final int Anum_pg_attribute_attname = 2; + + @Native public static final int SIZEOF_pg_attribute_atttypid = 4; + @Native public static final int SIZEOF_pg_attribute_attlen = 2; + @Native public static final int SIZEOF_pg_attribute_atttypmod = 4; + @Native public static final int SIZEOF_pg_attribute_attbyval = 1; + @Native public static final int SIZEOF_pg_attribute_attalign = 1; + @Native public static final int SIZEOF_pg_attribute_attnotnull = 1; + @Native public static final int SIZEOF_pg_attribute_attisdropped = 1; + + @Native public static final int SIZEOF_CompactAttribute = 16; + + @Native public static final int Anum_pg_extension_oid = 1; + @Native public static final int ExtensionOidIndexId = 3080; + + @Native public static final int Anum_pg_trigger_oid = 1; + @Native public static final int TriggerOidIndexId = 2702; + + @Native public static final int SIZEOF_ArrayType_ndim = 4; + @Native public static final int SIZEOF_ArrayType_dataoffset = 4; + @Native public static final int SIZEOF_ArrayType_elemtype = 4; + + @Native public static final int OFFSET_ArrayType_ndim = 0; + @Native public static final int OFFSET_ArrayType_dataoffset = 4; + @Native public static final int OFFSET_ArrayType_elemtype = 8; + + @Native public static final int OFFSET_ArrayType_DIMS = 12; + @Native public static final int SIZEOF_ArrayType_DIM = 4; + + @Native public static final int SIZEOF_NodeTag = 4; + @Native public static final int SIZEOF_Oid = 4; + + @Native public static final int SIZEOF_fcinfo_fncollation = 4; + @Native public static final int SIZEOF_fcinfo_isnull = 1; + @Native public static final int SIZEOF_fcinfo_nargs = 2; + + /* + * These constants (which will be included in a generated header available + * to the C code) are (almost) indices into the 'statics' array where the + * various wanted values should be placed. Edits should keep them distinct + * consecutive small array indices within related groups; gaps are allowed + * (and encouraged) between groups, so additions can be made without mass + * renumbering. The get() method of Natives, used in the static initializer, + * will be checking for gaps or repeats; the gap() method must be called + * where each gap occurs, to advise what the next expected IDX_... value + * is to be. + */ + @Native private static final int IDX_PG_VERSION_NUM = 0; + + @Native private static final int IDX_SIZEOF_DATUM = 1; + @Native private static final int IDX_SIZEOF_INT = 2; + @Native private static final int IDX_SIZEOF_LONG = 3; + @Native private static final int IDX_SIZEOF_SIZE = 4; + + @Native private static final int IDX_ALIGNOF_SHORT = 5; + @Native private static final int IDX_ALIGNOF_INT = 6; + @Native private static final int IDX_ALIGNOF_DOUBLE = 7; + @Native private static final int IDX_MAXIMUM_ALIGNOF = 8; + + @Native private static final int IDX_NAMEDATALEN = 9; + + @Native private static final int IDX_SIZEOF_varatt_indirect = 10; + @Native private static final int IDX_SIZEOF_varatt_expanded = 11; + @Native private static final int IDX_SIZEOF_varatt_external = 12; + + @Native private static final int IDX_OFFSET_Relation_rd_id = 13; + + + @Native private static final int IDX_HEAPTUPLESIZE = 20; + @Native private static final int IDX_OFFSET_TTS_NVALID = 21; + @Native private static final int IDX_SIZEOF_TTS_NVALID = 22; + + @Native private static final int IDX_TTS_FLAG_EMPTY = 23; + @Native private static final int IDX_TTS_FLAG_FIXED = 24; + @Native private static final int IDX_OFFSET_TTS_FLAGS = 25; + + /* + * Before PG 12, TTS had no flags field with bit flags, but instead + * distinct boolean (1-byte) fields. + */ + @Native private static final int IDX_OFFSET_TTS_EMPTY = 26; + @Native private static final int IDX_OFFSET_TTS_FIXED = 27; + @Native private static final int IDX_OFFSET_TTS_TABLEOID = 28; + + @Native private static final int IDX_OFFSET_NullableDatum_isnull = 29; + @Native private static final int IDX_SIZEOF_NullableDatum = 30; + + @Native private static final int IDX_OFFSET_fcinfo_fncollation = 31; + @Native private static final int IDX_OFFSET_fcinfo_isnull = 32; + @Native private static final int IDX_OFFSET_fcinfo_nargs = 33; + @Native private static final int IDX_OFFSET_fcinfo_args = 34; + + @Native private static final int IDX_OFFSET_Bitmapset_words = 35; + + + + @Native private static final int IDX_OFFSET_TUPLEDESC_ATTRS = 40; + @Native private static final int IDX_OFFSET_TUPLEDESC_TDREFCOUNT = 41; + @Native private static final int IDX_SIZEOF_TUPLEDESC_TDREFCOUNT = 42; + @Native private static final int IDX_OFFSET_TUPLEDESC_TDTYPEID = 43; + @Native private static final int IDX_OFFSET_TUPLEDESC_TDTYPMOD = 44; + + + + @Native private static final int IDX_SIZEOF_FORM_PG_ATTRIBUTE = 50; + @Native private static final int IDX_ATTRIBUTE_FIXED_PART_SIZE = 51; + @Native private static final int IDX_OFFSET_pg_attribute_atttypid = 52; + @Native private static final int IDX_OFFSET_pg_attribute_attlen = 53; + // available(1) + @Native private static final int IDX_OFFSET_pg_attribute_atttypmod = 55; + @Native private static final int IDX_OFFSET_pg_attribute_attbyval = 56; + @Native private static final int IDX_OFFSET_pg_attribute_attalign = 57; + @Native private static final int IDX_OFFSET_pg_attribute_attnotnull = 58; + @Native private static final int IDX_OFFSET_pg_attribute_attisdropped = 59; + + + + @Native private static final int IDX_CLASS_TUPLE_SIZE = 70; + @Native private static final int IDX_Anum_pg_class_reltype = 71; + + + + @Native private static final int IDX_SIZEOF_MCTX = 80; + @Native private static final int IDX_OFFSET_MCTX_isReset = 81; + @Native private static final int IDX_OFFSET_MCTX_mem_allocated = 82; + @Native private static final int IDX_OFFSET_MCTX_parent = 83; + @Native private static final int IDX_OFFSET_MCTX_firstchild = 84; + @Native private static final int IDX_OFFSET_MCTX_prevchild = 85; + @Native private static final int IDX_OFFSET_MCTX_nextchild = 86; + @Native private static final int IDX_OFFSET_MCTX_name = 87; + @Native private static final int IDX_OFFSET_MCTX_ident = 88; + + + + /* + * N_ACL_RIGHTS was stable for a long time, but changes in PG 15 and in 16 + */ + @Native private static final int IDX_N_ACL_RIGHTS = 100; + @Native private static final int IDX_BITS_PER_BITMAPWORD = 101; + + + + /* + * Tags of a select few PostgreSQL Node subtypes. + */ + @Native private static final int IDX_T_Invalid = 110; + @Native private static final int IDX_T_AggState = 111; + @Native private static final int IDX_T_CallContext = 112; + @Native private static final int IDX_T_EventTriggerData = 113; + @Native private static final int IDX_T_ReturnSetInfo = 114; + @Native private static final int IDX_T_TriggerData = 115; + @Native private static final int IDX_T_WindowAggState = 116; + @Native private static final int IDX_T_WindowObjectData = 117; + @Native private static final int IDX_T_Bitmapset = 118; + @Native private static final int IDX_T_ErrorSaveContext = 119; + + + + /* + * Layout of the PostgreSQL Trigger structure that is supplied for a call + * on a trigger. The content comes from pg_trigger but the layout differs, + * so reading from it must be done without TupleTableSlot's help. + */ + @Native private static final int IDX_OFFSET_TRG_tgoid = 150; + @Native private static final int IDX_OFFSET_TRG_tgname = 151; + @Native private static final int IDX_OFFSET_TRG_tgfoid = 152; + @Native private static final int IDX_OFFSET_TRG_tgtype = 153; + @Native private static final int IDX_OFFSET_TRG_tgenabled = 154; + @Native private static final int IDX_OFFSET_TRG_tgisinternal = 155; + @Native private static final int IDX_OFFSET_TRG_tgisclone = 156; + @Native private static final int IDX_OFFSET_TRG_tgconstrrelid = 157; + @Native private static final int IDX_OFFSET_TRG_tgconstrindid = 158; + @Native private static final int IDX_OFFSET_TRG_tgconstraint = 159; + @Native private static final int IDX_OFFSET_TRG_tgdeferrable = 160; + @Native private static final int IDX_OFFSET_TRG_tginitdeferred = 161; + @Native private static final int IDX_OFFSET_TRG_tgnargs = 162; + @Native private static final int IDX_OFFSET_TRG_tgnattr = 163; + @Native private static final int IDX_OFFSET_TRG_tgattr = 164; + @Native private static final int IDX_OFFSET_TRG_tgargs = 165; + @Native private static final int IDX_OFFSET_TRG_tgqual = 166; + @Native private static final int IDX_OFFSET_TRG_tgoldtable = 167; + @Native private static final int IDX_OFFSET_TRG_tgnewtable = 168; + @Native private static final int IDX_SIZEOF_Trigger = 169; + + + + /* + * Layouts of fcinfo->context / fcinfo->resultinfo structures. + */ + @Native private static final int IDX_OFFSET_TRGD_tg_event = 180; + @Native private static final int IDX_OFFSET_TRGD_tg_relation = 181; + @Native private static final int IDX_OFFSET_TRGD_tg_trigtuple = 182; + @Native private static final int IDX_OFFSET_TRGD_tg_newtuple = 183; + @Native private static final int IDX_OFFSET_TRGD_tg_trigger = 184; + @Native private static final int IDX_OFFSET_TRGD_tg_updatedcols = 185; + + @Native private static final int IDX_OFFSET_RSI_allowedModes = 186; + @Native private static final int IDX_OFFSET_RSI_isDone = 187; + @Native private static final int IDX_OFFSET_RSI_returnMode = 188; + @Native private static final int IDX_SIZEOF_RSI_isDone = 189; + @Native private static final int IDX_SIZEOF_RSI_returnMode = 190; + + + + /* + * Identifiers of different caches in PG's syscache, utils/cache/syscache.c. + * As upstream adds new caches, the enum is kept in alphabetical order, so + * they belong in this section to have their effective values picked up. + */ + @Native private static final int IDX_ATTNUM = 500; + @Native private static final int IDX_AUTHMEMMEMROLE = 501; + @Native private static final int IDX_AUTHMEMROLEMEM = 502; + @Native private static final int IDX_AUTHOID = 503; + @Native private static final int IDX_COLLOID = 504; + @Native private static final int IDX_DATABASEOID = 505; + @Native private static final int IDX_LANGOID = 506; + @Native private static final int IDX_NAMESPACEOID = 507; + @Native private static final int IDX_OPEROID = 508; + @Native private static final int IDX_PROCOID = 509; + @Native private static final int IDX_RELOID = 510; + @Native private static final int IDX_TSCONFIGOID = 511; + @Native private static final int IDX_TSDICTOID = 512; + @Native private static final int IDX_TYPEOID = 513; + @Native private static final int IDX_CONSTROID = 514; + @Native private static final int IDX_TRFOID = 515; + @Native private static final int IDX_TRFTYPELANG = 516; + @Native private static final int IDX_AMOID = 517; + @Native private static final int IDX_TABLESPACEOID = 518; + + @Native private static final int IDX_FOREIGNDATAWRAPPEROID = 519; + @Native private static final int IDX_FOREIGNSERVEROID = 520; + @Native private static final int IDX_FOREIGNTABLEREL = 521; + + + + /* + * These public statics are the values of interest, set at class + * initialization time by reading them from the buffer managed by Natives. + */ + + /** + * Numeric PostgreSQL version compiled in at build time. + */ + public static final int PG_VERSION_NUM; + + public static final int SIZEOF_DATUM; + /* + * In backporting, can be useful when the git history shows something was + * always of 'int' type, so it doesn't need a dedicated SIZEOF_FOO, but does + * need to notice if a platform has an unexpected 'int' width. Same for + * 'long', widely used in SPI and having an unexpected width on Windows. + */ + public static final int SIZEOF_INT; + public static final int SIZEOF_LONG; + public static final int SIZEOF_SIZE; + + public static final int ALIGNOF_SHORT; + public static final int ALIGNOF_INT; + public static final int ALIGNOF_DOUBLE; + public static final int MAXIMUM_ALIGNOF; + + public static final short NAMEDATALEN; + + public static final int SIZEOF_varatt_indirect; + public static final int SIZEOF_varatt_expanded; + public static final int SIZEOF_varatt_external; + + public static final int OFFSET_Relation_rd_id; + + + public static final int HEAPTUPLESIZE; + public static final int OFFSET_TTS_NVALID; + public static final int SIZEOF_TTS_NVALID; // int or int16 per pg version + + public static final int TTS_FLAG_EMPTY; + public static final int TTS_FLAG_FIXED; + public static final int OFFSET_TTS_FLAGS; + + public static final int OFFSET_TTS_EMPTY; + public static final int OFFSET_TTS_FIXED; + + public static final int OFFSET_TTS_TABLEOID; // NOCONSTANT unless PG >= 12 + + public static final int OFFSET_NullableDatum_isnull; + public static final int SIZEOF_NullableDatum; + + public static final int OFFSET_fcinfo_fncollation; + public static final int OFFSET_fcinfo_isnull; + public static final int OFFSET_fcinfo_nargs; + public static final int OFFSET_fcinfo_args; + + public static final int OFFSET_Bitmapset_words; + + + + /** + * Offset to where per-attribute data in a {@code TupleDesc} begins. + *

    + * Prior to PostgreSQL 18, this was the offset of a member actually named + * {@code attrs}, and was exactly where the first {@code Form_pg_attribute} + * would be found. As of PostgreSQL 18, this will actually be the offset of + * a member named {@code compact_attrs}, and the first + * {@code Form_pg_attribute} must be found at this offset plus the number of + * attributes times {@code SIZEOF_CompactAttribute}. + */ + public static final int OFFSET_TUPLEDESC_ATTRS; + public static final int OFFSET_TUPLEDESC_TDREFCOUNT; + public static final int SIZEOF_TUPLEDESC_TDREFCOUNT; + public static final int OFFSET_TUPLEDESC_TDTYPEID; + public static final int OFFSET_TUPLEDESC_TDTYPMOD; + + + + public static final int SIZEOF_FORM_PG_ATTRIBUTE; + public static final int ATTRIBUTE_FIXED_PART_SIZE; + public static final int OFFSET_pg_attribute_atttypid; + public static final int OFFSET_pg_attribute_attlen; + // available(1) + public static final int OFFSET_pg_attribute_atttypmod; + public static final int OFFSET_pg_attribute_attbyval; + public static final int OFFSET_pg_attribute_attalign; + public static final int OFFSET_pg_attribute_attnotnull; + public static final int OFFSET_pg_attribute_attisdropped; + + + + public static final int CLASS_TUPLE_SIZE; + public static final int Anum_pg_class_reltype; + + + + public static final int SIZEOF_MCTX; + public static final int OFFSET_MCTX_isReset; + public static final int OFFSET_MCTX_mem_allocated; // since PG 13 + public static final int OFFSET_MCTX_parent; + public static final int OFFSET_MCTX_firstchild; + public static final int OFFSET_MCTX_prevchild; // since PG 9.6 + public static final int OFFSET_MCTX_nextchild; + public static final int OFFSET_MCTX_name; + public static final int OFFSET_MCTX_ident; // since PG 11 + + + + /* + * The number of meaningful rights bits in an ACL bitmask, imported by + * AclItem. + */ + public static final int N_ACL_RIGHTS; + public static final int BITS_PER_BITMAPWORD; // DatumUtils + + + + /* + * Tags of a select few PostgreSQL Node subtypes. + */ + public static final int T_Invalid; + public static final int T_AggState; + public static final int T_CallContext; + public static final int T_EventTriggerData; + public static final int T_ReturnSetInfo; + public static final int T_TriggerData; + public static final int T_WindowAggState; + public static final int T_WindowObjectData; + public static final int T_Bitmapset; + public static final int T_ErrorSaveContext; + + + + /* + * Layout of the PostgreSQL Trigger structure that is supplied for a call + * on a trigger. + */ + public static final int OFFSET_TRG_tgoid; + public static final int OFFSET_TRG_tgname; + public static final int OFFSET_TRG_tgfoid; + public static final int OFFSET_TRG_tgtype; + public static final int OFFSET_TRG_tgenabled; + public static final int OFFSET_TRG_tgisinternal; + public static final int OFFSET_TRG_tgisclone; + public static final int OFFSET_TRG_tgconstrrelid; + public static final int OFFSET_TRG_tgconstrindid; + public static final int OFFSET_TRG_tgconstraint; + public static final int OFFSET_TRG_tgdeferrable; + public static final int OFFSET_TRG_tginitdeferred; + public static final int OFFSET_TRG_tgnargs; + public static final int OFFSET_TRG_tgnattr; + public static final int OFFSET_TRG_tgattr; + public static final int OFFSET_TRG_tgargs; + public static final int OFFSET_TRG_tgqual; + public static final int OFFSET_TRG_tgoldtable; + public static final int OFFSET_TRG_tgnewtable; + public static final int SIZEOF_Trigger; + + + + /* + * Layouts of fcinfo->context structures. + */ + public static final int OFFSET_TRGD_tg_event; + public static final int OFFSET_TRGD_tg_relation; + public static final int OFFSET_TRGD_tg_trigtuple; + public static final int OFFSET_TRGD_tg_newtuple; + public static final int OFFSET_TRGD_tg_trigger; + public static final int OFFSET_TRGD_tg_updatedcols; + + public static final int OFFSET_RSI_allowedModes; + public static final int OFFSET_RSI_isDone; + public static final int OFFSET_RSI_returnMode; + public static final int SIZEOF_RSI_isDone; + public static final int SIZEOF_RSI_returnMode; + + + + /* + * These identify different caches in the PostgreSQL syscache. + * The indicated classes import them. + */ + public static final int ATTNUM; // AttributeImpl + public static final int AUTHMEMMEMROLE; // RegRoleImpl + public static final int AUTHMEMROLEMEM; // " + public static final int AUTHOID; // " + public static final int COLLOID; // RegCollationImpl + public static final int DATABASEOID; // DatabaseImpl + public static final int LANGOID; // ProceduralLanguageImpl + public static final int NAMESPACEOID; // RegNamespaceImpl + public static final int OPEROID; // RegOperatorImpl + public static final int PROCOID; // RegProcedureImpl + public static final int RELOID; // RegClassImpl + public static final int TSCONFIGOID; // RegConfigImpl + public static final int TSDICTOID; // RegDictionaryImpl + public static final int TYPEOID; // RegTypeImpl + public static final int CONSTROID; // ConstraintImpl + public static final int TRFOID; // TransformImpl + public static final int TRFTYPELANG; // " + public static final int AMOID; // AccessMethodImpl + public static final int TABLESPACEOID; // TablespaceImpl + + public static final int FOREIGNDATAWRAPPEROID; // ForeignDataWrapperImpl + public static final int FOREIGNSERVEROID; // ForeignServerImpl + public static final int FOREIGNTABLEREL; // RegClassImpl + + + + /** + * Value supplied for one of these constants when built in a version of PG + * that does not define it. + *

    + * Clearly not useful if the value could be valid for the constant + * in question. + */ + @Native public static final int NOCONSTANT = -1; + + static + { + try ( Natives n = new Natives() ) + { + PG_VERSION_NUM = n.get(IDX_PG_VERSION_NUM); + + SIZEOF_DATUM = n.get(IDX_SIZEOF_DATUM); + SIZEOF_INT = n.get(IDX_SIZEOF_INT); + SIZEOF_LONG = n.get(IDX_SIZEOF_LONG); + SIZEOF_SIZE = n.get(IDX_SIZEOF_SIZE); + + ALIGNOF_SHORT = n.get(IDX_ALIGNOF_SHORT); + ALIGNOF_INT = n.get(IDX_ALIGNOF_INT); + ALIGNOF_DOUBLE = n.get(IDX_ALIGNOF_DOUBLE); + MAXIMUM_ALIGNOF = n.get(IDX_MAXIMUM_ALIGNOF); + + int c = n.get(IDX_NAMEDATALEN); + NAMEDATALEN = (short)c; + assert c == NAMEDATALEN; + + SIZEOF_varatt_indirect = n.get(IDX_SIZEOF_varatt_indirect); + SIZEOF_varatt_expanded = n.get(IDX_SIZEOF_varatt_expanded); + SIZEOF_varatt_external = n.get(IDX_SIZEOF_varatt_external); + + OFFSET_Relation_rd_id = n.get(IDX_OFFSET_Relation_rd_id); + + + + n.gap(IDX_HEAPTUPLESIZE); + HEAPTUPLESIZE = n.get(IDX_HEAPTUPLESIZE); + OFFSET_TTS_NVALID = n.get(IDX_OFFSET_TTS_NVALID); + SIZEOF_TTS_NVALID = n.get(IDX_SIZEOF_TTS_NVALID); + + TTS_FLAG_EMPTY = n.get(IDX_TTS_FLAG_EMPTY); + TTS_FLAG_FIXED = n.get(IDX_TTS_FLAG_FIXED); + OFFSET_TTS_FLAGS = n.get(IDX_OFFSET_TTS_FLAGS); + + OFFSET_TTS_EMPTY = n.get(IDX_OFFSET_TTS_EMPTY); + OFFSET_TTS_FIXED = n.get(IDX_OFFSET_TTS_FIXED); + + OFFSET_TTS_TABLEOID = n.get(IDX_OFFSET_TTS_TABLEOID); + + OFFSET_NullableDatum_isnull= n.get(IDX_OFFSET_NullableDatum_isnull); + SIZEOF_NullableDatum = n.get(IDX_SIZEOF_NullableDatum); + + OFFSET_fcinfo_fncollation = n.get(IDX_OFFSET_fcinfo_fncollation); + OFFSET_fcinfo_isnull = n.get(IDX_OFFSET_fcinfo_isnull); + OFFSET_fcinfo_nargs = n.get(IDX_OFFSET_fcinfo_nargs); + OFFSET_fcinfo_args = n.get(IDX_OFFSET_fcinfo_args); + + OFFSET_Bitmapset_words = n.get(IDX_OFFSET_Bitmapset_words); + + + + n.gap(IDX_OFFSET_TUPLEDESC_ATTRS); + OFFSET_TUPLEDESC_ATTRS = n.get(IDX_OFFSET_TUPLEDESC_ATTRS); + OFFSET_TUPLEDESC_TDREFCOUNT= n.get(IDX_OFFSET_TUPLEDESC_TDREFCOUNT); + SIZEOF_TUPLEDESC_TDREFCOUNT= n.get(IDX_SIZEOF_TUPLEDESC_TDREFCOUNT); + OFFSET_TUPLEDESC_TDTYPEID = n.get(IDX_OFFSET_TUPLEDESC_TDTYPEID); + OFFSET_TUPLEDESC_TDTYPMOD = n.get(IDX_OFFSET_TUPLEDESC_TDTYPMOD); + + + + n.gap(IDX_SIZEOF_FORM_PG_ATTRIBUTE); + SIZEOF_FORM_PG_ATTRIBUTE = n.get(IDX_SIZEOF_FORM_PG_ATTRIBUTE); + ATTRIBUTE_FIXED_PART_SIZE = n.get(IDX_ATTRIBUTE_FIXED_PART_SIZE); + OFFSET_pg_attribute_atttypid + = n.get(IDX_OFFSET_pg_attribute_atttypid); + OFFSET_pg_attribute_attlen + = n.get(IDX_OFFSET_pg_attribute_attlen); + n.gap(IDX_OFFSET_pg_attribute_atttypmod); // available(1) + OFFSET_pg_attribute_atttypmod + = n.get(IDX_OFFSET_pg_attribute_atttypmod); + OFFSET_pg_attribute_attbyval + = n.get(IDX_OFFSET_pg_attribute_attbyval); + OFFSET_pg_attribute_attalign + = n.get(IDX_OFFSET_pg_attribute_attalign); + OFFSET_pg_attribute_attnotnull + = n.get(IDX_OFFSET_pg_attribute_attnotnull); + OFFSET_pg_attribute_attisdropped + = n.get(IDX_OFFSET_pg_attribute_attisdropped); + + + + n.gap(IDX_CLASS_TUPLE_SIZE); + CLASS_TUPLE_SIZE = n.get(IDX_CLASS_TUPLE_SIZE); + Anum_pg_class_reltype = n.get(IDX_Anum_pg_class_reltype); + + + + n.gap(IDX_SIZEOF_MCTX); + SIZEOF_MCTX = n.get(IDX_SIZEOF_MCTX); + OFFSET_MCTX_isReset = n.get(IDX_OFFSET_MCTX_isReset); + OFFSET_MCTX_mem_allocated = n.get(IDX_OFFSET_MCTX_mem_allocated); + OFFSET_MCTX_parent = n.get(IDX_OFFSET_MCTX_parent); + OFFSET_MCTX_firstchild = n.get(IDX_OFFSET_MCTX_firstchild); + OFFSET_MCTX_prevchild = n.get(IDX_OFFSET_MCTX_prevchild); + OFFSET_MCTX_nextchild = n.get(IDX_OFFSET_MCTX_nextchild); + OFFSET_MCTX_name = n.get(IDX_OFFSET_MCTX_name); + OFFSET_MCTX_ident = n.get(IDX_OFFSET_MCTX_ident); + + + + n.gap(IDX_N_ACL_RIGHTS); + N_ACL_RIGHTS = n.get(IDX_N_ACL_RIGHTS); + BITS_PER_BITMAPWORD = n.get(IDX_BITS_PER_BITMAPWORD); + + + + n.gap(IDX_T_Invalid); + T_Invalid = n.get(IDX_T_Invalid); + T_AggState = n.get(IDX_T_AggState); + T_CallContext = n.get(IDX_T_CallContext); + T_EventTriggerData = n.get(IDX_T_EventTriggerData); + T_ReturnSetInfo = n.get(IDX_T_ReturnSetInfo); + T_TriggerData = n.get(IDX_T_TriggerData); + T_WindowAggState = n.get(IDX_T_WindowAggState); + T_WindowObjectData = n.get(IDX_T_WindowObjectData); + T_Bitmapset = n.get(IDX_T_Bitmapset); + T_ErrorSaveContext = n.get(IDX_T_ErrorSaveContext); + + + + n.gap(IDX_OFFSET_TRG_tgoid); + OFFSET_TRG_tgoid = n.get(IDX_OFFSET_TRG_tgoid); + OFFSET_TRG_tgname = n.get(IDX_OFFSET_TRG_tgname); + OFFSET_TRG_tgfoid = n.get(IDX_OFFSET_TRG_tgfoid); + OFFSET_TRG_tgtype = n.get(IDX_OFFSET_TRG_tgtype); + OFFSET_TRG_tgenabled = n.get(IDX_OFFSET_TRG_tgenabled); + OFFSET_TRG_tgisinternal = n.get(IDX_OFFSET_TRG_tgisinternal); + OFFSET_TRG_tgisclone = n.get(IDX_OFFSET_TRG_tgisclone); + OFFSET_TRG_tgconstrrelid = n.get(IDX_OFFSET_TRG_tgconstrrelid); + OFFSET_TRG_tgconstrindid = n.get(IDX_OFFSET_TRG_tgconstrindid); + OFFSET_TRG_tgconstraint = n.get(IDX_OFFSET_TRG_tgconstraint); + OFFSET_TRG_tgdeferrable = n.get(IDX_OFFSET_TRG_tgdeferrable); + OFFSET_TRG_tginitdeferred = n.get(IDX_OFFSET_TRG_tginitdeferred); + OFFSET_TRG_tgnargs = n.get(IDX_OFFSET_TRG_tgnargs); + OFFSET_TRG_tgnattr = n.get(IDX_OFFSET_TRG_tgnattr); + OFFSET_TRG_tgattr = n.get(IDX_OFFSET_TRG_tgattr); + OFFSET_TRG_tgargs = n.get(IDX_OFFSET_TRG_tgargs); + OFFSET_TRG_tgqual = n.get(IDX_OFFSET_TRG_tgqual); + OFFSET_TRG_tgoldtable = n.get(IDX_OFFSET_TRG_tgoldtable); + OFFSET_TRG_tgnewtable = n.get(IDX_OFFSET_TRG_tgnewtable); + SIZEOF_Trigger = n.get(IDX_SIZEOF_Trigger); + + + + n.gap(IDX_OFFSET_TRGD_tg_event); + OFFSET_TRGD_tg_event = n.get(IDX_OFFSET_TRGD_tg_event); + OFFSET_TRGD_tg_relation = n.get(IDX_OFFSET_TRGD_tg_relation); + OFFSET_TRGD_tg_trigtuple = n.get(IDX_OFFSET_TRGD_tg_trigtuple); + OFFSET_TRGD_tg_newtuple = n.get(IDX_OFFSET_TRGD_tg_newtuple); + OFFSET_TRGD_tg_trigger = n.get(IDX_OFFSET_TRGD_tg_trigger); + OFFSET_TRGD_tg_updatedcols = n.get(IDX_OFFSET_TRGD_tg_updatedcols); + + OFFSET_RSI_allowedModes = n.get(IDX_OFFSET_RSI_allowedModes); + OFFSET_RSI_isDone = n.get(IDX_OFFSET_RSI_isDone); + OFFSET_RSI_returnMode = n.get(IDX_OFFSET_RSI_returnMode); + SIZEOF_RSI_isDone = n.get(IDX_SIZEOF_RSI_isDone); + SIZEOF_RSI_returnMode = n.get(IDX_SIZEOF_RSI_returnMode); + + + + n.gap(IDX_ATTNUM); + ATTNUM = n.get(IDX_ATTNUM); + AUTHMEMMEMROLE = n.get(IDX_AUTHMEMMEMROLE); + AUTHMEMROLEMEM = n.get(IDX_AUTHMEMROLEMEM); + AUTHOID = n.get(IDX_AUTHOID); + COLLOID = n.get(IDX_COLLOID); + DATABASEOID = n.get(IDX_DATABASEOID); + LANGOID = n.get(IDX_LANGOID); + NAMESPACEOID = n.get(IDX_NAMESPACEOID); + OPEROID = n.get(IDX_OPEROID); + PROCOID = n.get(IDX_PROCOID); + RELOID = n.get(IDX_RELOID); + TSCONFIGOID = n.get(IDX_TSCONFIGOID); + TSDICTOID = n.get(IDX_TSDICTOID); + TYPEOID = n.get(IDX_TYPEOID); + CONSTROID = n.get(IDX_CONSTROID); + TRFOID = n.get(IDX_TRFOID); + TRFTYPELANG = n.get(IDX_TRFTYPELANG); + AMOID = n.get(IDX_AMOID); + TABLESPACEOID = n.get(IDX_TABLESPACEOID); + + FOREIGNDATAWRAPPEROID = n.get(IDX_FOREIGNDATAWRAPPEROID); + FOREIGNSERVEROID = n.get(IDX_FOREIGNSERVEROID); + FOREIGNTABLEREL = n.get(IDX_FOREIGNTABLEREL); + + + + } + } + + /** + * Distinguished subclass of {@link ExceptionInInitializerError} thrown + * if the build-time constants cannot be loaded. + */ + static class ConstantsError extends ExceptionInInitializerError + { + ConstantsError() + { + super("PL/Java native constants jumbled; " + + "are jar and shared object same version?"); + } + } + + /* + * Some static methods used by more than one model class, here because they + * are sort of related to constants. For example, Alignment appears both in + * RegType and in Attribute. + */ + + static Alignment alignmentFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'c': return Alignment.CHAR; + case (byte)'s': return Alignment.INT2; + case (byte)'i': return Alignment.INT4; + case (byte)'d': return Alignment.DOUBLE; + } + throw unchecked(new SQLException( + "unrecognized alignment '" + (char)b + "' in catalog", "XX000")); + } + + static int alignmentModulus(Alignment a) + { + switch ( a ) + { + case CHAR: return 1; + case INT2: return ALIGNOF_SHORT; + case INT4: return ALIGNOF_INT; + case DOUBLE: return ALIGNOF_DOUBLE; + } + throw unchecked(new SQLException( + "expected alignment, got " + a, "XX000")); + } + + static Storage storageFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'x': return Storage.EXTENDED; + case (byte)'e': return Storage.EXTERNAL; + case (byte)'m': return Storage.MAIN; + case (byte)'p': return Storage.PLAIN; + } + throw unchecked(new SQLException( + "unrecognized storage '" + (char)b + "' in catalog", "XX000")); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ProceduralLanguageImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/ProceduralLanguageImpl.java new file mode 100644 index 000000000..b62457576 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ProceduralLanguageImpl.java @@ -0,0 +1,950 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; + +import java.util.ArrayList; +import static java.util.Arrays.copyOfRange; +import java.util.BitSet; +import static java.util.Collections.unmodifiableSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import static java.util.Objects.requireNonNull; +import java.util.Set; + +import java.util.concurrent.CopyOnWriteArraySet; + +import java.util.function.Supplier; +import java.util.function.Function; + +import static java.util.stream.Collectors.toList; +import java.util.stream.Stream; + +import org.postgresql.pljava.PLJavaBasedLanguage; +import org.postgresql.pljava.PLJavaBasedLanguage.Routine; +import org.postgresql.pljava.PLJavaBasedLanguage.Template; +import org.postgresql.pljava.PLJavaBasedLanguage.UsingTransforms; +import org.postgresql.pljava.PLPrincipal; + +import org.postgresql.pljava.model.*; +import org.postgresql.pljava.model.RegProcedure.Call; +import org.postgresql.pljava.model.RegProcedure.Memo; + +import org.postgresql.pljava.annotation.Function.Trust; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.Checked; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.SwitchPointCache.doNotCache; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.CatalogObjectImpl.Factory.LANGOID_CB; +import static org.postgresql.pljava.pg.ModelConstants.LANGOID; // syscache +import org.postgresql.pljava.pg.RegProcedureImpl.AbstractMemo.How; +import org.postgresql.pljava.pg.RegProcedureImpl.SupportMemo; +import org.postgresql.pljava.pg.TransformImpl.FromSQLMemo; +import org.postgresql.pljava.pg.TransformImpl.ToSQLMemo; + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link ProceduralLanguage ProceduralLanguage} + * interface. + */ +class ProceduralLanguageImpl extends Addressed +implements + Nonshared, Named, Owned, + AccessControlled, ProceduralLanguage +{ + private static final Function s_initializer; + + /** + * Count of instances subject to invalidation. + *

    + * Only accessed in invalidate and SP.onFirstUse, both on the PG thread. + */ + private static int s_instances; + + private static class SP extends SwitchPoint + { + @Override + protected void onFirstUse() + { + if ( 1 == ++ s_instances ) + sysCacheInvalArmed(LANGOID_CB, true); + } + } + + private final SwitchPoint[] m_sp; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return LANGOID; + } + + /* Implementation of Named, Owned, AccessControlled */ + + private static Simple name(ProceduralLanguageImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.LANNAME, SIMPLE_INSTANCE); + } + + private static RegRole owner(ProceduralLanguageImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.LANOWNER, REGROLE_INSTANCE); + } + + private static List grants(ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.LANACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of ProceduralLanguage */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + ProceduralLanguageImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_sp = new SwitchPoint[] { new SP() }; + } + + @Override + void invalidate(List sps, List postOps) + { + SwitchPoint oldSP = m_sp[0]; + if ( null == oldSP || oldSP.unused() ) + return; // reentrant call + + try + { + /* + * Assigning null here ensures quick return from a reentrant call, + * and also serves as an assertion that validator() and language() + * have current cached values, as assumed below; with null here, + * they'll fail if they don't. + */ + m_sp[0] = null; + sps.add(oldSP); + if ( 0 == -- s_instances ) + sysCacheInvalArmed(LANGOID_CB, false); + + Set deps = m_dependents; + m_dependents = null; + + if ( deps instanceof RoutineSet ) + { + /* + * If I have a RoutineSet for my dependencies, I am a + * user-defined procedural language. My validator is written in + * a system-defined "pljavahandler" language (one that has a + * LanguageSet for its dependencies), and I no longer belong in + * its LanguageSet. + * + * To find that language, I can cheaply follow validator() and + * language(), as those must still hold cached values until the + * SwitchPoint gets invalidated after this method returns. + */ + RegProcedure vp = validator(); + ProceduralLanguage vl = vp.language(); + Set ls = ((ProceduralLanguageImpl)vl).m_dependents; + if ( null != ls ) + { + assert ls instanceof LanguageSet : "not a LanguageSet"; + ls.remove(this); + } + + /* + * My validator needn't necessarily be invalidated + * *as a routine*; it simply isn't my validator routine + * anymore, so I should be removed from the dependent-languages + * set in its memo. + */ + ValidatorMemo.removeDependent(vp, this); + + /* + * Likewise for handler and inline-handler dependencies. + */ + HandlerMemo.removeDependent(handler(), this); + InlineHandlerMemo.removeDependent(inlineHandler(), this); + + /* + * Routines for which I am the language of implementation + * get invalidated. + */ + ((RoutineSet)deps).forEach(r -> + ((RegProcedureImpl)r).invalidate(sps, postOps)); + } + else if ( deps instanceof LanguageSet ) + { + /* + * If I have a LanguageSet for my dependencies, I am (was) + * a pljavahandler language and no longer belong in that static + * set. My support functions are no longer my support functions. + * My dependent languages get invalidated. + */ + s_plJavaHandlers.remove(this); + + ValidatorMemo.removeDependent(validator(), this); + HandlerMemo.removeDependent(handler(), this); + /* a pljavahandler language has no inline handler */ + + ((LanguageSet)deps).forEach(l -> + ((ProceduralLanguageImpl)l).invalidate(sps, postOps)); + } + } + finally + { + m_sp[0] = new SP(); + } + } + + static final int SLOT_PRINCIPAL; + static final int SLOT_HANDLER; + static final int SLOT_INLINEHANDLER; + static final int SLOT_VALIDATOR; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(ProceduralLanguageImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> o.m_sp[0]) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(ProceduralLanguageImpl.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "principal", SLOT_PRINCIPAL = i++) + .withDependent( "handler", SLOT_HANDLER = i++) + .withDependent("inlineHandler", SLOT_INLINEHANDLER = i++) + .withDependent( "validator", SLOT_VALIDATOR = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute LANNAME; + static final Attribute LANOWNER; + static final Attribute LANACL; + static final Attribute LANPLTRUSTED; + static final Attribute LANPLCALLFOID; + static final Attribute LANINLINE; + static final Attribute LANVALIDATOR; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "lanname", + "lanowner", + "lanacl", + "lanpltrusted", + "lanplcallfoid", + "laninline", + "lanvalidator" + ).iterator(); + + LANNAME = itr.next(); + LANOWNER = itr.next(); + LANACL = itr.next(); + LANPLTRUSTED = itr.next(); + LANPLCALLFOID = itr.next(); + LANINLINE = itr.next(); + LANVALIDATOR = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + static final Set NOT_PLJAVA_BASED = + unmodifiableSet(new CopyOnWriteArraySet<>()); + + private static class LanguageSet + extends CopyOnWriteArraySet + { + } + + private static class RoutineSet extends HashSet> + { + /* only accessed on the PG thread */ + private PLJavaBasedLanguage m_implementingClass; + + /* likewise */ + private Map m_typeTransforms; + } + + /* mutable non-API data used only on the PG thread */ + + /** + * Remembers instances that represent the {@code pljavahandler} 'language' + * as they are discovered, keeping them live. + *

    + * While no reason for multiple aliases is foreseen, a {@code Set} is used + * instead of a single field, so they do not have to be forbidden. + */ + private static final Set + s_plJavaHandlers = new CopyOnWriteArraySet<>(); + + /** + * For an instance determined to be a PL/Java-based language, a + * {@code RoutineSet} keeping live any known dependent routines; for an + * instance representing the "PL/Java handler language", a + * {@code LanguageSet} keeping live any known dependent languages; + * {@code NOT_PLJAVA_BASED} if known to be neither; null if not yet + * classified. + */ + private Set m_dependents; + + void removeDependentRoutine(RegProcedure r) + { + assert threadMayEnterPG() : "removeDependentRoutine thread"; + + if ( ! (m_dependents instanceof RoutineSet) ) + return; + + ((RoutineSet)m_dependents).remove(r); + } + + PLJavaMemo addDependentRoutine(RegProcedure r) + { + assert threadMayEnterPG() : "addDependentRoutine thread"; + + assert m_dependents instanceof RoutineSet : "not PL/Java-based"; + if ( ! (m_dependents instanceof RoutineSet) ) + return null; + + RegProcedureImpl rpi = (RegProcedureImpl)r; + + if ( ((RoutineSet)m_dependents).add(rpi) ) + new PLJavaMemo(rpi).apply(); + + return rpi.m_how; + } + + void memoizeImplementingClass( + PLJavaBasedLanguage expected, PLJavaBasedLanguage superseding) + { + assert threadMayEnterPG() : "memoizeImplementingClass thread"; + + RoutineSet s = (RoutineSet)m_dependents; + + if ( expected != s.m_implementingClass ) + throw new IllegalStateException( + "expected prior value mismatch in memoizeImplentingClass"); + + s.m_implementingClass = requireNonNull(superseding); + } + + PLJavaBasedLanguage implementingClass() + { + return ((RoutineSet)m_dependents).m_implementingClass; + } + + void removeKnownTransform(Transform tr) + { + assert threadMayEnterPG() : "removeKnownTransform thread"; + if ( ! (m_dependents instanceof RoutineSet) ) + return; + Map map = + ((RoutineSet)m_dependents).m_typeTransforms; + if ( null != map ) + map.values().remove(tr); // avoid assuming type() was cached + } + + /** + * Indicates whether this instance represents a PL/Java-based language. + *

    + * A PL/Java-based language is one that: + *

      + *
    • Has one or both of: + *
        + *
      • a {@code handler} whose {@code language} is {@link #C}, whose + * {@code src} is {@code pljavaDispatchRoutine}, and whose {@code bin} is + * a shared-object file name, or
      • + *
      • an {@code inlineHandler} whose {@code language} is {@link #C}, whose + * {@code src} is {@code pljavaDispatchInline}, and whose {@code bin} is + * a shared-object file name
      • + *
      , and + *
    • has a {@code validator} whose {@code language} is an instance + * representing the {@code pljavahandler} language.
    • + *
    + * and for which the shared-object file names, if more than one, are equal. + *

    + * An instance represents the {@code pljavahandler} language if it: + *

      + *
    • has no {@code inlineHandler}, and + *
    • has both a {@code handler} and a {@code validator} with + * {@code language} of {@link #C}, the same {@code src} of + * {@code pljavaDispatchValidator}, and whose {@code bin} is + * the same shared-object file name. + *
    + * Such an instance is not also considered a PL/Java-based language. + *

    + * A PL/Java-based language has a set of dependent routines + * ({@code RegProcedure} instances), keeping live whatever such routines + * have been discovered. A language representing {@code pljavahandler} has + * a set of dependent languages, keeping live whichever of those have been + * discovered. The validator routines for those languages do not need to be + * treated additionally as dependent routines; it suffices that they are + * cached as the validators of their respective languages. + *

    + * On invalidation, a PL/Java-based language invalidates its dependent + * routines, and removes itself from the dependent-languages set of its + * validator's language. (Finding its validator and the validator's language + * are inexpensive as both references must have been traversed when the + * instance was determined to be PL/Java-based, and their cached values are + * lost only after {@code invalidate} returns.) A language that represents + * {@code pljavahandler} removes itself from the static set of those, and + * invalidates its dependent languages. + */ + boolean isPLJavaBased() + { + assert threadMayEnterPG() : "isPLJavaBased thread"; + + if ( m_dependents instanceof RoutineSet ) + return true; + + if ( null != m_dependents ) + return false; + + do // while ( false ): break to mark notPLJavaBased and return false + { + if ( INTERNAL == this || C == this || SQL == this ) + break; + + RegProcedure hp = handler(); + RegProcedure ip = inlineHandler(); + RegProcedure vp = validator(); + + if ( ! vp.exists() ) + break; + + ProceduralLanguageImpl vl = (ProceduralLanguageImpl)vp.language(); + + if ( INTERNAL == vl || SQL == vl ) + break; + + if ( C == vl ) + { + if ( isPLJavaHandler(null) ) + return false; // will have made m_dependents a LanguageSet + break; + } + + List bins = + Stream.of( + binFromCWithSrc(hp, "pljavaDispatchRoutine"), + binFromCWithSrc(ip, "pljavaDispatchInline")) + .filter(Objects::nonNull) + .distinct() + .collect(toList()); + + if ( 1 != bins.size() ) + break; + + if ( ! vl.isPLJavaHandler(bins.get(0)) ) + break; + + ((LanguageSet)vl.m_dependents).add(this); + + m_dependents = new RoutineSet(); // found to be null above + + HandlerMemo.addDependent(hp, this); + InlineHandlerMemo.addDependent(ip, this); + ValidatorMemo.addDependent(vp, this); + + return true; + } + while ( false ); + + m_dependents = NOT_PLJAVA_BASED; + return false; + } + + /** + * Indicates whether this instance represents the (or possibly a) + * "PL/Java handler" language, and can serve as the language for the + * validator function of a PL/Java-based language. + *

    + * A "PL/Java handler" language is one that: + *

      + *
    • has no {@code inlineHandler}, and + *
    • has both a {@code handler} and a {@code validator} with + * {@code language} of {@link #C}, the same {@code src} of + * {@code pljavaDispatchValidator}, and whose {@code bin} is + * the same shared-object file name. + *
    + * Such an instance is not also considered a PL/Java-based language. + *

    + * A language representing {@code pljavahandler} is kept live in a static + * set, and has a set of dependent languages, keeping live whichever of + * those have been discovered. The validator routines for those languages do + * not need to be treated additionally as dependent routines; it suffices + * that they are cached as the validators of their respective languages. + *

    + * On invalidation, a language that represents {@code pljavahandler} + * removes itself from the static set of those, and invalidates its + * dependent languages. + * @param expectedBinOrNull if the caller knows the name of the loaded + * PL/Java shared object, it can pass that to ensure this method only + * matches entries using that name. Or, when called from + * {@code isPLJavaBased}, this parameter will be the same shared-object name + * used in the purported PL/Java-based language's entries, to make sure this + * method will only match the same one, whether or not it is independently + * known to be right. If null, the shared-object name is not checked. + */ + boolean isPLJavaHandler(String expectedBinOrNull) + { + assert threadMayEnterPG() : "isPLJavaHandler thread"; + + if ( m_dependents instanceof LanguageSet ) + return true; + + if ( null != m_dependents ) + return false; + + if ( INTERNAL == this || C == this || SQL == this ) + return false; + + RegProcedure hp = handler(); + RegProcedure ip = inlineHandler(); + RegProcedure vp = validator(); + + if ( ip.isValid() || ! hp.exists() || ! vp.exists() ) + return false; + + String hbin = binFromCWithSrc(hp, "pljavaDispatchValidator"); + String vbin = binFromCWithSrc(vp, "pljavaDispatchValidator"); + + if ( ! Objects.equals(hbin, vbin) || null == hbin ) + return false; + + if ( null != expectedBinOrNull && ! expectedBinOrNull.equals(hbin) ) + return false; + + s_plJavaHandlers.add(this); + + m_dependents = new LanguageSet(); + + HandlerMemo.addDependent(hp, this); + ValidatorMemo.addDependent(vp, this); + + return true; + } + + /** + * A quick check that a {@code RegProcedure} p is in language + * {@code C} with {@code src} matching a given known entry point symbol. + * @return p's shared-object name if so, otherwise null + */ + private String binFromCWithSrc(RegProcedure p, String src) + { + if ( p.exists() && C == p.language() && src.equals(p.src()) ) + return p.bin(); + return null; + } + + /** + * Called from a {@code RegProcedureImpl} computation method, returns + * a list of {@code Transform} corresponding to the supplied {@code RegType} + * list. + *

    + * A map is maintained here from types to transforms that have already + * passed the language implementation's {@code essentialTransformChecks}. + * If transforms for all of types are already in the map, a list + * of those is returned in a trivial constant {@code Supplier} that will be + * cached when the calling computation method returns. + *

    + * If not all of types can already be found in the map, they must + * be looked up in the syscache. They have not yet been checked by + * {@code essentialTransformChecks}. A {@code Supplier} will be returned, + * without caching, that calls {@code essentialTransformChecks} on those, + * outside of {@code doInPG}, adds them to the map if that succeeds, and + * then returns the value of a recursive call of the + * {@code RegProcedureImpl} method, which will then find all the transforms + * in the map and lead to the constant {@code Supplier} being cached. + */ + Checked.Supplier,SQLException> + transformsFor(List types, RegProcedureImpl p) + throws SQLException + { + assert threadMayEnterPG() : "transformsFor thread"; + assert ! types.isEmpty() : "transformsFor empty types"; + + Map map = + ((RoutineSet)m_dependents).m_typeTransforms; + + if ( null == map ) + ((RoutineSet)m_dependents).m_typeTransforms = map = new HashMap<>(); + + Transform[] ts = new Transform [ types.size() ]; + int nKnown = 0, iNew = ts.length; + + for ( RegType ty : types ) + { + Transform tr = map.get(ty); + if ( null != tr ) + { + ts [ nKnown ++ ] = tr; + continue; + } + + tr = TransformImpl.fromTypeLang(ty, this); + + if ( null == tr ) + throw new SQLSyntaxErrorException(String.format( + "%s of %s has no transform defined for: %s", + this, p, ty), "42P13"); + + ts [ -- iNew ] = tr; + } + + if ( ts.length == nKnown ) + { + List result = List.of(ts); + TransformImpl.addDependentRoutine(p, result); + p.m_dependsOnTransforms = true; // here is the only place that's set + return () -> result; + } + + Transform[] toCheck = copyOfRange(ts, iNew, ts.length); + UsingTransforms utImpl = (UsingTransforms)implementingClass(); + Map final_map = map; + + doNotCache(); + return () -> + { + for ( int i = toCheck.length ; i --> 0 ; ) + { + Transform tr = toCheck [ i ]; + utImpl.essentialTransformChecks(tr); + } + return doInPG(() -> + { + for ( Transform tr : toCheck ) + { + tr.language(); // make sure it's cached for invalidation use + final_map.put(tr.type(), tr); + FromSQLMemo.addDependent(tr.fromSQL(), tr); + ToSQLMemo.addDependent(tr.toSQL(), tr); + } + return p.transforms(); + }); + }; + } + + /* computation methods */ + + private static PLPrincipal principal(ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + if ( s.get(Att.LANPLTRUSTED, BOOLEAN_INSTANCE) ) + return new PLPrincipal.Sandboxed(o.name()); + return new PLPrincipal.Unsandboxed(o.name()); + } + + private static RegProcedure handler(ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.LANPLCALLFOID, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure inlineHandler( + ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.LANINLINE, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure validator(ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.LANVALIDATOR, REGPROCEDURE_INSTANCE); + return p; + } + + /* API methods */ + + @Override + public PLPrincipal principal() + { + try + { + MethodHandle h = m_slots[SLOT_PRINCIPAL]; + return (PLPrincipal)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure handler() + { + try + { + MethodHandle h = m_slots[SLOT_HANDLER]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure inlineHandler() + { + try + { + MethodHandle h = m_slots[SLOT_INLINEHANDLER]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure validator() + { + try + { + MethodHandle h = m_slots[SLOT_VALIDATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link ProceduralLanguage ProceduralLanguage} handler. + */ + static class HandlerMemo + extends SupportMemo implements Handler + { + private HandlerMemo( + RegProcedure carrier, ProceduralLanguageImpl lang) + { + super(carrier, lang); + } + + static void addDependent( + RegProcedure proc, + ProceduralLanguageImpl lang) + { + SupportMemo.add(proc, lang, HandlerMemo.class, + () -> new HandlerMemo(proc, lang)); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link ProceduralLanguage ProceduralLanguage} inline handler. + */ + static class InlineHandlerMemo + extends SupportMemo + implements InlineHandler + { + private InlineHandlerMemo( + RegProcedure carrier, + ProceduralLanguageImpl lang) + { + super(carrier, lang); + } + + static void addDependent( + RegProcedure proc, + ProceduralLanguageImpl lang) + { + SupportMemo.add(proc, lang, InlineHandlerMemo.class, + () -> new InlineHandlerMemo(proc, lang)); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link ProceduralLanguage ProceduralLanguage} validator. + */ + static class ValidatorMemo + extends SupportMemo implements Validator + { + private ValidatorMemo( + RegProcedure carrier, + ProceduralLanguageImpl lang) + { + super(carrier, lang); + } + + static void addDependent( + RegProcedure proc, + ProceduralLanguageImpl lang) + { + SupportMemo.add(proc, lang, ValidatorMemo.class, + () -> new ValidatorMemo(proc, lang)); + } + } + + /** + * Implementation of {@link PLJavaBased PLJavaBased} memo for attachment to + * a {@link RegProcedure RegProcedure} whose implementation is + * PL/Java-based. + */ + static class PLJavaMemo extends How implements PLJavaBased + { + Template m_routineTemplate; + + private PLJavaMemo(RegProcedureImpl carrier) + { + super(carrier); + } + + /* + * Discards a PLJavaMemo that has been retained with a null template. + * + * The validator logic in LookupImpl creates the usual linkages between + * a RegProcedure and its language and validator, but never installs a + * template in the RegProcedure's memo as the first actual call would. + * Often, that isn't noticeable, because a shared-invalidation event + * upon rollback if the validator rejected, or even upon successful + * entry of the routine, causes the incomplete memo to be discarded. + * + * It can happen, though, if a routine is created/validated and then + * used in the same transaction, that the incomplete memo with null + * template is still there. Here is a convenient method to get rid of + * it the same way shared-invalidation would. + */ + void discardIncomplete() + { + assert null == m_routineTemplate : "discard memo Template non-null"; + List sps = new ArrayList<>(); + List postOps = new ArrayList<>(); + invalidate(sps, postOps); + assert 0 == sps.size() && 0 == postOps.size(); + } + + @Override + void invalidate(List sps, List postOps) + { + super.invalidate(sps, postOps); + ProceduralLanguageImpl pl = + (ProceduralLanguageImpl)m_carrier.language(); + pl.removeDependentRoutine(m_carrier); + } + + @Override + public TupleDescriptor inputsTemplate() + { + return m_carrier.inputsTemplate(); + } + + @Override + public BitSet unresolvedInputs() + { + return m_carrier.unresolvedInputs(); + } + + @Override + public TupleDescriptor outputsTemplate() + { + return m_carrier.outputsTemplate(); + } + + @Override + public BitSet unresolvedOutputs() + { + return m_carrier.unresolvedOutputs(); + } + + @Override + public List transforms() + { + return m_carrier.transforms(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegClassImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegClassImpl.java new file mode 100644 index 000000000..4895f49f1 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegClassImpl.java @@ -0,0 +1,1079 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; +import static org.postgresql.pljava.model.MemoryContext.CurrentMemoryContext; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static + org.postgresql.pljava.pg.CatalogObjectImpl.Factory.ForeignTableRelationId; + +import static org.postgresql.pljava.pg.ModelConstants.Anum_pg_class_reltype; +import static org.postgresql.pljava.pg.ModelConstants.RELOID; // syscache +import static org.postgresql.pljava.pg.ModelConstants.FOREIGNTABLEREL; // " +import static org.postgresql.pljava.pg.ModelConstants.CLASS_TUPLE_SIZE; + +import static org.postgresql.pljava.pg.TupleTableSlotImpl.heapTupleGetLightSlot; + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import org.postgresql.pljava.pg.adt.NameAdapter; +import static org.postgresql.pljava.pg.adt.OidAdapter.AM_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCLASS_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.SERVER_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.TABLESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.*; +import static org.postgresql.pljava.pg.adt.XMLAdapter.SYNTHETIC_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Qualified; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/* + * Can get lots of information, including Form_pg_class rd_rel and + * TupleDesc rd_att, from the relcache. See CacheRegisterRelcacheCallback(). + * However, the relcache copy of the class tuple is cut off at CLASS_TUPLE_SIZE. + */ + +/** + * Implementation of the {@link RegClass RegClass} interface. + */ +class RegClassImpl extends Addressed +implements + Nonshared, Namespaced, Owned, + AccessControlled, RegClass +{ + /** + * Subclass that additionally implements + * {@link RegClass.Known RegClass.Known}. + */ + static class Known> + extends RegClassImpl implements RegClass.Known + { + } + + private static final Function s_initializer; + + /** + * Per-instance switch point, to be invalidated selectively + * by a relcache callback. + */ + private final SwitchPoint[] m_cacheSwitchPoint; + + final SwitchPoint cacheSwitchPoint() + { + return m_cacheSwitchPoint[0]; + } + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return RELOID; + } + + /* Implementation of Named, Namespaced, Owned, AccessControlled */ + + private static Simple name(RegClassImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.RELNAME, NameAdapter.SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegClassImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.RELNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegClassImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.RELOWNER, REGROLE_INSTANCE); + } + + private static List grants(RegClassImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.RELACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of RegClass */ + + RegClassImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_cacheSwitchPoint = new SwitchPoint[] { new SwitchPoint() }; + } + + /** + * Called from {@code Factory}'s {@code invalidateRelation} to set up + * the invalidation of this relation's metadata. + *

    + * Adds this relation's {@code SwitchPoint} to the caller's list so that, + * if more than one is to be invalidated, that can be done in bulk. Adds to + * postOps any operations the caller should conclude with + * after invalidating the {@code SwitchPoint}. + */ + void invalidate(List sps, List postOps) + { + SwitchPoint sp = m_cacheSwitchPoint[0]; + if ( sp.unused() ) + return; + TupleDescriptor.Interned[] oldTDH = m_tupDescHolder; + sps.add(sp); + + /* + * Before invalidating the SwitchPoint, line up a new one (and a newly + * nulled tupDescHolder) for value-computing methods to find once the + * old SwitchPoint is invalidated. + */ + m_cacheSwitchPoint[0] = new SwitchPoint(); + m_tupDescHolder = null; + + /* + * After the old SwitchPoint gets invalidated, the old tupDescHolder, + * if any, can have its element nulled so the old TupleDescriptor can + * be collected without having to wait for the 'guardWithTest's it is + * bound into to be recomputed. + */ + if ( null != oldTDH ) + { + postOps.add(() -> + { + TupleDescImpl td = (TupleDescImpl)oldTDH[0]; + if ( null == td ) + return; + oldTDH[0] = null; + td.invalidate(); + }); + } + } + + /** + * Associated tuple descriptor, redundantly kept accessible here as well as + * opaquely bound into a {@code SwitchPointCache} method handle. + *

    + * This one-element array containing the descriptor is what gets bound into + * the handle, so the descriptor can be freed for GC at invalidation time + * (rather than waiting for the next tuple-descriptor request to replace + * the handle). Only accessed from {@code SwitchPointCache} computation + * methods or {@code TupleDescImpl} factory methods, all of which execute + * on the PG thread; no synchronization fuss needed. + *

    + * When null, no computation method has run (or none since invalidation), + * and the state is not known. Otherwise, the single element is the result + * to be returned by the {@code tupleDescriptor()} API method. + */ + TupleDescriptor.Interned[] m_tupDescHolder; + + /** + * Holder for the {@code RegType} corresponding to {@code type()}, + * only non-null during a call of {@code dualHandshake}. + */ + private RegType m_dual = null; + + /** + * Called by the corresponding {@code RegType} instance if it has just + * looked us up. + *

    + * Because the {@code SwitchPointCache} recomputation methods always execute + * on the PG thread, plain access to an instance field suffices here. + */ + void dualHandshake(RegType dual) + { + try + { + m_dual = dual; + dual = type(); + assert dual == m_dual : "RegType/RegClass handshake outcome"; + } + finally + { + m_dual = null; + } + } + + static final int SLOT_TUPLEDESCRIPTOR; + static final int SLOT_TYPE; + static final int SLOT_OFTYPE; + static final int SLOT_AM; + static final int SLOT_TABLESPACE; + static final int SLOT_TOASTRELATION; + static final int SLOT_HASINDEX; + static final int SLOT_ISSHARED; + static final int SLOT_PERSISTENCE; + static final int SLOT_KIND; + static final int SLOT_NATTRIBUTES; + static final int SLOT_CHECKS; + static final int SLOT_HASRULES; + static final int SLOT_HASTRIGGERS; + static final int SLOT_HASSUBCLASS; + static final int SLOT_ROWSECURITY; + static final int SLOT_FORCEROWSECURITY; + static final int SLOT_ISPOPULATED; + static final int SLOT_REPLIDENT; + static final int SLOT_ISPARTITION; + static final int SLOT_OPTIONS; + static final int SLOT_FOREIGN; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegClassImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> o.m_cacheSwitchPoint[0]) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(RegClassImpl.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withReturnType(null) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "tupleDescriptor", SLOT_TUPLEDESCRIPTOR = i++) + .withDependent( "type", SLOT_TYPE = i++) + .withDependent( "ofType", SLOT_OFTYPE = i++) + .withDependent( "accessMethod", SLOT_AM = i++) + .withDependent( "tablespace", SLOT_TABLESPACE = i++) + .withDependent( "toastRelation", SLOT_TOASTRELATION = i++) + .withDependent( "hasIndex", SLOT_HASINDEX = i++) + .withDependent( "isShared", SLOT_ISSHARED = i++) + .withDependent( "persistence", SLOT_PERSISTENCE = i++) + .withDependent( "kind", SLOT_KIND = i++) + .withDependent( "nAttributes", SLOT_NATTRIBUTES = i++) + .withDependent( "checks", SLOT_CHECKS = i++) + .withDependent( "hasRules", SLOT_HASRULES = i++) + .withDependent( "hasTriggers", SLOT_HASTRIGGERS = i++) + .withDependent( "hasSubclass", SLOT_HASSUBCLASS = i++) + .withDependent( "rowSecurity", SLOT_ROWSECURITY = i++) + .withDependent("forceRowSecurity", SLOT_FORCEROWSECURITY = i++) + .withDependent( "isPopulated", SLOT_ISPOPULATED = i++) + .withDependent( "replicaIdentity", SLOT_REPLIDENT = i++) + .withDependent( "isPartition", SLOT_ISPARTITION = i++) + .withDependent( "options", SLOT_OPTIONS = i++) + .withDependent( "foreign", SLOT_FOREIGN = i++) + + .build(); + NSLOTS = i; + } + + static class Att + { + static final Attribute RELNAME; + static final Attribute RELNAMESPACE; + static final Attribute RELOWNER; + static final Attribute RELACL; + static final Attribute RELOFTYPE; + static final Attribute RELAM; + static final Attribute RELTABLESPACE; + static final Attribute RELTOASTRELID; + static final Attribute RELHASINDEX; + static final Attribute RELISSHARED; + static final Attribute RELPERSISTENCE; + static final Attribute RELKIND; + static final Attribute RELNATTS; + static final Attribute RELCHECKS; + static final Attribute RELHASRULES; + static final Attribute RELHASTRIGGERS; + static final Attribute RELHASSUBCLASS; + static final Attribute RELROWSECURITY; + static final Attribute RELFORCEROWSECURITY; + static final Attribute RELISPOPULATED; + static final Attribute RELREPLIDENT; + static final Attribute RELISPARTITION; + static final Attribute RELOPTIONS; + static final Attribute RELPARTBOUND; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "relname", + "relnamespace", + "relowner", + "relacl", + "reloftype", + "relam", + "reltablespace", + "reltoastrelid", + "relhasindex", + "relisshared", + "relpersistence", + "relkind", + "relnatts", + "relchecks", + "relhasrules", + "relhastriggers", + "relhassubclass", + "relrowsecurity", + "relforcerowsecurity", + "relispopulated", + "relreplident", + "relispartition", + "reloptions", + "relpartbound" + ).iterator(); + + RELNAME = itr.next(); + RELNAMESPACE = itr.next(); + RELOWNER = itr.next(); + RELACL = itr.next(); + RELOFTYPE = itr.next(); + RELAM = itr.next(); + RELTABLESPACE = itr.next(); + RELTOASTRELID = itr.next(); + RELHASINDEX = itr.next(); + RELISSHARED = itr.next(); + RELPERSISTENCE = itr.next(); + RELKIND = itr.next(); + RELNATTS = itr.next(); + RELCHECKS = itr.next(); + RELHASRULES = itr.next(); + RELHASTRIGGERS = itr.next(); + RELHASSUBCLASS = itr.next(); + RELROWSECURITY = itr.next(); + RELFORCEROWSECURITY = itr.next(); + RELISPOPULATED = itr.next(); + RELREPLIDENT = itr.next(); + RELISPARTITION = itr.next(); + RELOPTIONS = itr.next(); + RELPARTBOUND = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /** + * A tiny class to just encapsulate the couple of extra attributes a foreign + * table has, as an alternative to a full-blown ForeignTable catalog object. + *

    + * This class eagerly populates both {@code server} and {@code options} when + * constructed, so the {@code RegClass} needs just one slot holding this. + */ + static class Foreign + { + private static final RegClass FT; + private static final Attribute FTSERVER; + private static final Attribute FTOPTIONS; + + static + { + FT = of(CLASSID, ForeignTableRelationId); + Iterator itr = FT.tupleDescriptor().project( + "ftserver", + "ftoptions" + ).iterator(); + + FTSERVER = itr.next(); + FTOPTIONS = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + + final ForeignServer server; + final Map options; + + Foreign(int oid) + { + ByteBuffer heapTuple = _searchSysCacheCopy1(FOREIGNTABLEREL, oid); + TupleTableSlot tts = heapTupleGetLightSlot( + FT.tupleDescriptor(), heapTuple, CurrentMemoryContext()); + + server = tts.get(FTSERVER, SERVER_INSTANCE); + options = tts.get(FTOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + } + + /* computation methods */ + + /** + * Return the tuple descriptor for this relation, wrapped in a one-element + * array, which is also stored in {@code m_tupDescHolder}. + *

    + * The tuple descriptor for a relation can be retrieved from the PostgreSQL + * {@code relcache}, or from the {@code typcache} if the relation has an + * associated type; it's the same descriptor, and the + * latter gets it from the former. Going through the {@code relcache} is + * fussier, involving the lock manager every time, while using the + * {@code typcache} can avoid that except in its cache-miss case. + *

    + * Here, for every relation other than {@code pg_class} itself, we will + * rely on the corresponding {@code RegType}, if there is one, to do + * the work. There is a bit + * of incest involved; it will construct the descriptor to rely on our + * {@code SwitchPoint} for invalidation, and will poke the wrapper array + * into our {@code m_tupDescHolder}. + *

    + * It does that last bit so that, even if the first query for a type's + * tuple descriptor is made through the {@code RegType}, we will also return + * it if a later request is made here, and all of the invalidation logic + * lives here; it is relation-cache invalidation that obsoletes a cataloged + * tuple descriptor. + *

    + * However, when the relation is {@code pg_class} itself, or is one + * of the relation kinds without an associated type entry, we rely + * on a bespoke JNI method to get the descriptor from the {@code relcache}. + * The {@code pg_class} case occurs when we are looking up the descriptor to + * interpret our own cache tuples, and the normal case's {@code type()} call + * won't work before that's available. + */ + private static TupleDescriptor.Interned[] tupleDescriptor(RegClassImpl o) + { + TupleDescriptor.Interned[] r = o.m_tupDescHolder; + + /* + * If not null, r is a value placed here by an invocation of + * tupleDescriptor() on the associated RegType, and we have not seen an + * invalidation since that happened (invalidations run on the PG thread, + * as do computation methods like this, so we've not missed anything). + * It is the value to return. + */ + if ( null != r ) + return r; + + /* + * In any case other than looking up our own tuple descriptor, we can + * use type() to find the associated RegType and let it, if valid, + * do the work. + */ + if ( CLASSID != o ) + { + RegType t = o.type(); + if ( t.isValid() ) + { + t.tupleDescriptor(); // side effect: writes o.m_tupDescHolder + return o.m_tupDescHolder; + } + } + + /* + * May be the bootstrap case, looking up the pg_class tuple descriptor, + * or just a relation kind that does not have an associate type entry. + * If we got here we need it, so we can call the Cataloged constructor + * directly, rather than fromByteBuffer (which would first check whether + * we need it, and bump its reference count only if so). Called + * directly, the constructor expects the count already bumped, which + * the _tupDescBootstrap method will have done for us. + */ + ByteBuffer bb = _tupDescBootstrap(o.oid()); + bb.order(nativeOrder()); + r = new TupleDescriptor.Interned[] {new TupleDescImpl.Cataloged(bb, o)}; + return o.m_tupDescHolder = r; + } + + private static RegType type(RegClassImpl o) throws SQLException + { + /* + * If this is a handshake occurring when the corresponding RegType + * has just looked *us* up, we are done. + */ + if ( null != o.m_dual ) + return o.m_dual; + + /* + * Otherwise, look up the corresponding RegType, and do the same + * handshake in reverse. Either way, the connection is set up + * bidirectionally with one cache lookup starting from either. That + * can avoid extra work in operations (like TupleDescriptor caching) + * that may touch both objects, without complicating their code. + * + * Because the fetching of pg_attribute's tuple descriptor + * necessarily passes through this point, and attributes don't know + * what their names are until it has, use the attribute number here. + */ + TupleTableSlot s = o.cacheTuple(); + RegType t = s.get( + s.descriptor().sqlGet(Anum_pg_class_reltype), REGTYPE_INSTANCE); + + /* + * Regular relations have a valid reltype, but other kinds of RegClass + * (index, toast table) do not. + */ + if ( t.isValid() ) + ((RegTypeImpl)t).dualHandshake(o); + + return t; + } + + private static RegType ofType(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELOFTYPE, REGTYPE_INSTANCE); + } + + private static AccessMethod accessMethod(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELAM, AM_INSTANCE); + } + + private static Tablespace tablespace(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELTABLESPACE, TABLESPACE_INSTANCE); + } + + private static RegClass toastRelation(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELTOASTRELID, REGCLASS_INSTANCE); + } + + private static boolean hasIndex(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELHASINDEX, BOOLEAN_INSTANCE); + } + + private static boolean isShared(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELISSHARED, BOOLEAN_INSTANCE); + } + + private static Persistence persistence(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return persistenceFromCatalog( + s.get(Att.RELPERSISTENCE, INT1_INSTANCE)); + } + + private static Kind kind(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return kindFromCatalog( + s.get(Att.RELKIND, INT1_INSTANCE)); + } + + private static short nAttributes(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELNATTS, INT2_INSTANCE); + } + + private static short checks(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELCHECKS, INT2_INSTANCE); + } + + private static boolean hasRules(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELHASRULES, BOOLEAN_INSTANCE); + } + + private static boolean hasTriggers(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELHASTRIGGERS, BOOLEAN_INSTANCE); + } + + private static boolean hasSubclass(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELHASSUBCLASS, BOOLEAN_INSTANCE); + } + + private static boolean rowSecurity(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELROWSECURITY, BOOLEAN_INSTANCE); + } + + private static boolean forceRowSecurity(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.RELFORCEROWSECURITY, BOOLEAN_INSTANCE); + } + + private static boolean isPopulated(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELISPOPULATED, BOOLEAN_INSTANCE); + } + + private static ReplicaIdentity replicaIdentity(RegClassImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return replicaIdentityFromCatalog( + s.get(Att.RELREPLIDENT, INT1_INSTANCE)); + } + + private static boolean isPartition(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELISPARTITION, BOOLEAN_INSTANCE); + } + + private static Map options(RegClassImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + + private static Foreign foreign(RegClassImpl o) + throws SQLException + { + if ( Kind.FOREIGN_TABLE != o.kind() ) + return null; + return new Foreign(o.oid()); + } + + /* API methods */ + + @Override + public TupleDescriptor.Interned tupleDescriptor() + { + try + { + MethodHandle h = m_slots[SLOT_TUPLEDESCRIPTOR]; + return ((TupleDescriptor.Interned[])h.invokeExact(this, h))[0]; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType ofType() + { + try + { + MethodHandle h = m_slots[SLOT_OFTYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public AccessMethod accessMethod() + { + try + { + MethodHandle h = m_slots[SLOT_AM]; + return (AccessMethod)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + // filenode + + @Override + public Tablespace tablespace() + { + try + { + MethodHandle h = m_slots[SLOT_TABLESPACE]; + return (Tablespace)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* Of limited interest ... estimates used by planner + * + int pages(); + float tuples(); + int allVisible(); + */ + + @Override + public RegClass toastRelation() + { + try + { + MethodHandle h = m_slots[SLOT_TOASTRELATION]; + return (RegClass)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasIndex() + { + try + { + MethodHandle h = m_slots[SLOT_HASINDEX]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean isShared() + { + try + { + MethodHandle h = m_slots[SLOT_ISSHARED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Persistence persistence() + { + try + { + MethodHandle h = m_slots[SLOT_PERSISTENCE]; + return (Persistence)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Kind kind() + { + try + { + MethodHandle h = m_slots[SLOT_KIND]; + return (Kind)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public short nAttributes() + { + try + { + MethodHandle h = m_slots[SLOT_NATTRIBUTES]; + return (short)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public short checks() + { + try + { + MethodHandle h = m_slots[SLOT_CHECKS]; + return (short)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasRules() + { + try + { + MethodHandle h = m_slots[SLOT_HASRULES]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasTriggers() + { + try + { + MethodHandle h = m_slots[SLOT_HASTRIGGERS]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasSubclass() + { + try + { + MethodHandle h = m_slots[SLOT_HASSUBCLASS]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean rowSecurity() + { + try + { + MethodHandle h = m_slots[SLOT_ROWSECURITY]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean forceRowSecurity() + { + try + { + MethodHandle h = m_slots[SLOT_FORCEROWSECURITY]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean isPopulated() + { + try + { + MethodHandle h = m_slots[SLOT_ISPOPULATED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public ReplicaIdentity replicaIdentity() + { + try + { + MethodHandle h = m_slots[SLOT_REPLIDENT]; + return (ReplicaIdentity)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean isPartition() + { + try + { + MethodHandle h = m_slots[SLOT_ISPARTITION]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + // rewrite + // frozenxid + // minmxid + + @Override + public Map options() + { + try + { + MethodHandle h = m_slots[SLOT_OPTIONS]; + return (Map)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public SQLXML partitionBound() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + TupleTableSlot s = cacheTuple(); + return s.get(Att.RELPARTBOUND, SYNTHETIC_INSTANCE); + } + + @Override + public ForeignServer foreignServer() + { + try + { + MethodHandle h = m_slots[SLOT_FOREIGN]; + Foreign f = (Foreign)h.invokeExact(this, h); + return null == f ? null : f.server; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Map foreignOptions() + { + try + { + MethodHandle h = m_slots[SLOT_FOREIGN]; + Foreign f = (Foreign)h.invokeExact(this, h); + return null == f ? null : f.options; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + private static Persistence persistenceFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'p': return Persistence.PERMANENT; + case (byte)'u': return Persistence.UNLOGGED; + case (byte)'t': return Persistence.TEMPORARY; + } + throw unchecked(new SQLException( + "unrecognized Persistence type '" + (char)b + "' in catalog", + "XX000")); + } + + private static Kind kindFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'r': return Kind.TABLE; + case (byte)'i': return Kind.INDEX; + case (byte)'S': return Kind.SEQUENCE; + case (byte)'t': return Kind.TOAST; + case (byte)'v': return Kind.VIEW; + case (byte)'m': return Kind.MATERIALIZED_VIEW; + case (byte)'c': return Kind.COMPOSITE_TYPE; + case (byte)'f': return Kind.FOREIGN_TABLE; + case (byte)'p': return Kind.PARTITIONED_TABLE; + case (byte)'I': return Kind.PARTITIONED_INDEX; + } + throw unchecked(new SQLException( + "unrecognized Kind type '" + (char)b + "' in catalog", + "XX000")); + } + + private static ReplicaIdentity replicaIdentityFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'d': return ReplicaIdentity.DEFAULT; + case (byte)'n': return ReplicaIdentity.NOTHING; + case (byte)'f': return ReplicaIdentity.ALL; + case (byte)'i': return ReplicaIdentity.INDEX; + } + throw unchecked(new SQLException( + "unrecognized ReplicaIdentity type '" + (char)b + "' in catalog", + "XX000")); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegCollationImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegCollationImpl.java new file mode 100644 index 000000000..55f486e99 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegCollationImpl.java @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.COLLOID; // syscache + +import org.postgresql.pljava.pg.adt.EncodingAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.NameAdapter.AS_STRING_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT1_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegCollation RegCollation} interface. + */ +class RegCollationImpl extends Addressed +implements Nonshared, Namespaced, Owned, RegCollation +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return COLLOID; + } + + /* Implementation of Named, Namespaced, Owned */ + + private static Simple name(RegCollationImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.COLLNAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegCollationImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.COLLNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegCollationImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.COLLOWNER, REGROLE_INSTANCE); + } + + /* Implementation of RegCollation */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegCollationImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_ENCODING; + static final int SLOT_COLLATE; + static final int SLOT_CTYPE; + static final int SLOT_PROVIDER; + static final int SLOT_VERSION; + static final int SLOT_DETERMINISTIC; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegCollationImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegCollationImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + + .withReceiverType(null) + .withDependent( "encoding", SLOT_ENCODING = i++) + .withDependent( "collate", SLOT_COLLATE = i++) + .withDependent( "ctype", SLOT_CTYPE = i++) + .withDependent( "provider", SLOT_PROVIDER = i++) + .withDependent( "version", SLOT_VERSION = i++) + .withDependent("deterministic", SLOT_DETERMINISTIC = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute COLLNAME; + static final Attribute COLLNAMESPACE; + static final Attribute COLLOWNER; + static final Attribute COLLENCODING; + static final Attribute COLLCOLLATE; + static final Attribute COLLCTYPE; + static final Attribute COLLPROVIDER; + static final Attribute COLLVERSION; + static final Attribute COLLISDETERMINISTIC; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "collname", + "collnamespace", + "collowner", + "collencoding", + "collcollate", + "collctype", + "collprovider", + "collversion", + "collisdeterministic" + ).iterator(); + + COLLNAME = itr.next(); + COLLNAMESPACE = itr.next(); + COLLOWNER = itr.next(); + COLLENCODING = itr.next(); + COLLCOLLATE = itr.next(); + COLLCTYPE = itr.next(); + COLLPROVIDER = itr.next(); + COLLVERSION = itr.next(); + COLLISDETERMINISTIC = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static CharsetEncoding encoding(RegCollationImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.COLLENCODING, EncodingAdapter.INSTANCE); + } + + private static String collate(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.COLLCOLLATE, AS_STRING_INSTANCE); + } + + private static String ctype(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.COLLCTYPE, AS_STRING_INSTANCE); + } + + private static Provider provider(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte p = s.get(Att.COLLPROVIDER, INT1_INSTANCE); + switch ( p ) + { + case (byte)'d': + return Provider.DEFAULT; + case (byte)'c': + return Provider.LIBC; + case (byte)'i': + return Provider.ICU; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized collation provider value %#x", p)); + } + } + + private static String version(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.COLLVERSION, TextAdapter.INSTANCE); + } + + private static boolean deterministic(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.COLLISDETERMINISTIC, BOOLEAN_INSTANCE); + } + + /* API methods */ + + @Override + public CharsetEncoding encoding() + { + try + { + MethodHandle h = m_slots[SLOT_ENCODING]; + return (CharsetEncoding)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String collate() + { + try + { + MethodHandle h = m_slots[SLOT_COLLATE]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String ctype() + { + try + { + MethodHandle h = m_slots[SLOT_CTYPE]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Provider provider() // since PG 10 + { + try + { + MethodHandle h = m_slots[SLOT_PROVIDER]; + return (Provider)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String version() // since PG 10 + { + try + { + MethodHandle h = m_slots[SLOT_VERSION]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean deterministic() // since PG 12 + { + try + { + MethodHandle h = m_slots[SLOT_DETERMINISTIC]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegConfigImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegConfigImpl.java new file mode 100644 index 000000000..da2e74033 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegConfigImpl.java @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.TSCONFIGOID; // syscache + +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegConfig RegConfig} interface. + */ +class RegConfigImpl extends Addressed +implements Nonshared, Namespaced, Owned, RegConfig +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TSCONFIGOID; + } + + /* Implementation of Named, Namespaced, Owned */ + + private static Simple name(RegConfigImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.CFGNAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegConfigImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.CFGNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegConfigImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.CFGOWNER, REGROLE_INSTANCE); + } + + /* Implementation of RegConfig */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegConfigImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static + { + s_initializer = + new Builder<>(RegConfigImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegConfigImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + + .build() + .compose(CatalogObjectImpl.Addressed.s_initializer); + } + + static class Att + { + static final Attribute CFGNAME; + static final Attribute CFGNAMESPACE; + static final Attribute CFGOWNER; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "cfgname", + "cfgnamespace", + "cfgowner" + ).iterator(); + + CFGNAME = itr.next(); + CFGNAMESPACE = itr.next(); + CFGOWNER = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegDictionaryImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegDictionaryImpl.java new file mode 100644 index 000000000..da562900a --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegDictionaryImpl.java @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.TSDICTOID; // syscache + +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegDictionary RegDictionary} interface. + */ +class RegDictionaryImpl extends Addressed +implements Nonshared, Namespaced, Owned, RegDictionary +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TSDICTOID; + } + + /* Implementation of Named, Namespaced, Owned */ + + private static Simple name(RegDictionaryImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.DICTNAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegDictionaryImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.DICTNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegDictionaryImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.DICTOWNER, REGROLE_INSTANCE); + } + + /* Implementation of RegDictionary */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegDictionaryImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static + { + s_initializer = + new Builder<>(RegDictionaryImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegDictionaryImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + + .build() + .compose(CatalogObjectImpl.Addressed.s_initializer); + } + + static class Att + { + static final Attribute DICTNAME; + static final Attribute DICTNAMESPACE; + static final Attribute DICTOWNER; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "dictname", + "dictnamespace", + "dictowner" + ).iterator(); + + DICTNAME = itr.next(); + DICTNAMESPACE = itr.next(); + DICTOWNER = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegNamespaceImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegNamespaceImpl.java new file mode 100644 index 000000000..ad573fbe4 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegNamespaceImpl.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.NAMESPACEOID; // syscache + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import org.postgresql.pljava.pg.adt.NameAdapter; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegNamespace RegNamespace} interface. + */ +class RegNamespaceImpl extends Addressed +implements + Nonshared, Named, Owned, + AccessControlled, RegNamespace +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return NAMESPACEOID; + } + + /* Implementation of Named, Owned, AccessControlled */ + + private static Simple name(RegNamespaceImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.NSPNAME, NameAdapter.SIMPLE_INSTANCE); + } + + private static RegRole owner(RegNamespaceImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.NSPOWNER, REGROLE_INSTANCE); + } + + private static List grants(RegNamespaceImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.NSPACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of RegNamespace */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegNamespaceImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static + { + s_initializer = + new Builder<>(RegNamespaceImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegNamespaceImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + } + + static class Att + { + static final Attribute NSPNAME; + static final Attribute NSPOWNER; + static final Attribute NSPACL; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "nspname", + "nspowner", + "nspacl" + ).iterator(); + + NSPNAME = itr.next(); + NSPOWNER = itr.next(); + NSPACL = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegOperatorImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegOperatorImpl.java new file mode 100644 index 000000000..62671bb48 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegOperatorImpl.java @@ -0,0 +1,457 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.OPEROID; // syscache + +import static org.postgresql.pljava.pg.adt.NameAdapter.OPERATOR_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGOPERATOR_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT1_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Operator; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegOperator RegOperator} interface. + */ +class RegOperatorImpl extends Addressed +implements Nonshared, Namespaced, Owned, RegOperator +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return OPEROID; + } + + /* Implementation of Named, Namespaced, Owned */ + + private static Operator name(RegOperatorImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.OPRNAME, OPERATOR_INSTANCE); + } + + private static RegNamespace namespace(RegOperatorImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.OPRNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegOperatorImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.OPROWNER, REGROLE_INSTANCE); + } + + /* Implementation of RegOperator */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegOperatorImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_KIND; + static final int SLOT_CANMERGE; + static final int SLOT_CANHASH; + static final int SLOT_LEFTOPERAND; + static final int SLOT_RIGHTOPERAND; + static final int SLOT_RESULT; + static final int SLOT_COMMUTATOR; + static final int SLOT_NEGATOR; + static final int SLOT_EVALUATOR; + static final int SLOT_RESTRICTIONESTIMATOR; + static final int SLOT_JOINESTIMATOR; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegOperatorImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegOperatorImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + + .withReceiverType(null) + .withDependent( "kind", SLOT_KIND = i++) + .withDependent( "canMerge", SLOT_CANMERGE = i++) + .withDependent( "canHash", SLOT_CANHASH = i++) + .withDependent( "leftOperand", SLOT_LEFTOPERAND = i++) + .withDependent( "rightOperand", SLOT_RIGHTOPERAND = i++) + .withDependent( "result", SLOT_RESULT = i++) + .withDependent( "commutator", SLOT_COMMUTATOR = i++) + .withDependent( "negator", SLOT_NEGATOR = i++) + .withDependent( "evaluator", SLOT_EVALUATOR = i++) + .withDependent( + "restrictionEstimator", SLOT_RESTRICTIONESTIMATOR = i++) + .withDependent("joinEstimator", SLOT_JOINESTIMATOR = i++) + + .build() + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute OPRNAME; + static final Attribute OPRNAMESPACE; + static final Attribute OPROWNER; + static final Attribute OPRKIND; + static final Attribute OPRCANMERGE; + static final Attribute OPRCANHASH; + static final Attribute OPRLEFT; + static final Attribute OPRRIGHT; + static final Attribute OPRRESULT; + static final Attribute OPRCOM; + static final Attribute OPRNEGATE; + static final Attribute OPRCODE; + static final Attribute OPRREST; + static final Attribute OPRJOIN; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "oprname", + "oprnamespace", + "oprowner", + "oprkind", + "oprcanmerge", + "oprcanhash", + "oprleft", + "oprright", + "oprresult", + "oprcom", + "oprnegate", + "oprcode", + "oprrest", + "oprjoin" + ).iterator(); + + OPRNAME = itr.next(); + OPRNAMESPACE = itr.next(); + OPROWNER = itr.next(); + OPRKIND = itr.next(); + OPRCANMERGE = itr.next(); + OPRCANHASH = itr.next(); + OPRLEFT = itr.next(); + OPRRIGHT = itr.next(); + OPRRESULT = itr.next(); + OPRCOM = itr.next(); + OPRNEGATE = itr.next(); + OPRCODE = itr.next(); + OPRREST = itr.next(); + OPRJOIN = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static Kind kind(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.OPRKIND, INT1_INSTANCE); + switch ( b ) + { + case (byte)'b': + return Kind.INFIX; + case (byte)'l': + return Kind.PREFIX; + case (byte)'r': + @SuppressWarnings("deprecation") + Kind k = Kind.POSTFIX; + return k; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized operator kind value %#x", b)); + } + } + + private static boolean canMerge(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRCANMERGE, BOOLEAN_INSTANCE); + } + + private static boolean canHash(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRCANHASH, BOOLEAN_INSTANCE); + } + + private static RegType leftOperand(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRLEFT, REGTYPE_INSTANCE); + } + + private static RegType rightOperand(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRRIGHT, REGTYPE_INSTANCE); + } + + private static RegType result(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRRESULT, REGTYPE_INSTANCE); + } + + private static RegOperator commutator(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRCOM, REGOPERATOR_INSTANCE); + } + + private static RegOperator negator(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRNEGATE, REGOPERATOR_INSTANCE); + } + + private static RegProcedure evaluator(RegOperatorImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.OPRCODE, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure + restrictionEstimator(RegOperatorImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = + (RegProcedure) + s.get(Att.OPRREST, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure + joinEstimator(RegOperatorImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.OPRJOIN, REGPROCEDURE_INSTANCE); + return p; + } + + /* API methods */ + + @Override + public Kind kind() + { + try + { + MethodHandle h = m_slots[SLOT_KIND]; + return (Kind)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean canMerge() + { + try + { + MethodHandle h = m_slots[SLOT_CANMERGE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean canHash() + { + try + { + MethodHandle h = m_slots[SLOT_CANHASH]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType leftOperand() + { + try + { + MethodHandle h = m_slots[SLOT_LEFTOPERAND]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType rightOperand() + { + try + { + MethodHandle h = m_slots[SLOT_RIGHTOPERAND]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType result() + { + try + { + MethodHandle h = m_slots[SLOT_RESULT]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegOperator commutator() + { + try + { + MethodHandle h = m_slots[SLOT_COMMUTATOR]; + return (RegOperator)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegOperator negator() + { + try + { + MethodHandle h = m_slots[SLOT_NEGATOR]; + return (RegOperator)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure evaluator() + { + try + { + MethodHandle h = m_slots[SLOT_EVALUATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure restrictionEstimator() + { + try + { + MethodHandle h = m_slots[SLOT_RESTRICTIONESTIMATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure joinEstimator() + { + try + { + MethodHandle h = m_slots[SLOT_JOINESTIMATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegProcedureImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegProcedureImpl.java new file mode 100644 index 000000000..af8757b27 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegProcedureImpl.java @@ -0,0 +1,1306 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.BitSet; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import java.util.concurrent.CopyOnWriteArraySet; + +import java.util.function.Function; +import java.util.function.Supplier; + +import java.util.stream.IntStream; + +import org.postgresql.pljava.annotation.Function.Effects; +import org.postgresql.pljava.annotation.Function.OnNullInput; +import org.postgresql.pljava.annotation.Function.Parallel; +import org.postgresql.pljava.annotation.Function.Security; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.Checked; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; +import org.postgresql.pljava.model.RegProcedure.Memo; +import org.postgresql.pljava.model.ProceduralLanguage.PLJavaBased; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.CatalogObjectImpl.Factory.PROCOID_CB; +import static org.postgresql.pljava.pg.ModelConstants.PROCOID; // syscache +import org.postgresql.pljava.pg.ProceduralLanguageImpl.PLJavaMemo; +import static org.postgresql.pljava.pg.TupleDescImpl.synthesizeDescriptor; + +import static org.postgresql.pljava.pg.adt.ArrayAdapter + .FLAT_STRING_LIST_INSTANCE; +import org.postgresql.pljava.pg.adt.GrantAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.PLANG_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.FLOAT4_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT1_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; +import static org.postgresql.pljava.pg.adt.XMLAdapter.SYNTHETIC_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegProcedure RegProcedure} interface. + */ +class RegProcedureImpl> extends Addressed> +implements + Nonshared>, Namespaced, Owned, + AccessControlled, RegProcedure +{ + private static final Function s_initializer; + + /** + * Count of instances subject to invalidation. + *

    + * Only accessed in invalidate and SP.onFirstUse, both on the PG thread. + */ + private static int s_instances; + + private static class SP extends SwitchPoint + { + @Override + protected void onFirstUse() + { + if ( 1 == ++ s_instances ) + sysCacheInvalArmed(PROCOID_CB, true); + } + } + + private final SwitchPoint[] m_sp; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known> classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return PROCOID; + } + + /* Implementation of Named, Namespaced, Owned, AccessControlled */ + + private static Simple name(RegProcedureImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.PRONAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.PRONAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegProcedureImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.PROOWNER, REGROLE_INSTANCE); + } + + private static List grants(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.PROACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of RegProcedure */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegProcedureImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_sp = new SwitchPoint[] { new SP() }; + } + + @Override + void invalidate(List sps, List postOps) + { + SwitchPoint sp = m_sp[0]; + if ( sp.unused() ) + return; + sps.add(sp); + m_sp[0] = new SP(); + if ( 0 == -- s_instances ) + sysCacheInvalArmed(PROCOID_CB, false); + + M why = m_why; + PLJavaMemo how = m_how; + boolean dependsOnTransforms = m_dependsOnTransforms; + m_why = null; + m_how = null; + m_dependsOnTransforms = false; + if ( why instanceof AbstractMemo ) + ((AbstractMemo)why).invalidate(sps, postOps); + if ( null != how ) + how.invalidate(sps, postOps); + if ( dependsOnTransforms ) + TransformImpl.removeDependentRoutine(this, transforms()); + } + + static final int SLOT_LANGUAGE; + static final int SLOT_COST; + static final int SLOT_ROWS; + static final int SLOT_VARIADICTYPE; + static final int SLOT_SUPPORT; + static final int SLOT_KIND; + static final int SLOT_SECURITY; + static final int SLOT_LEAKPROOF; + static final int SLOT_ONNULLINPUT; + static final int SLOT_RETURNSSET; + static final int SLOT_EFFECTS; + static final int SLOT_PARALLEL; + static final int SLOT_RETURNTYPE; + static final int SLOT_ARGTYPES; + static final int SLOT_ALLARGTYPES; + static final int SLOT_ARGMODES; + static final int SLOT_ARGNAMES; + static final int SLOT_TRANSFORMTYPES; + static final int SLOT_SRC; + static final int SLOT_BIN; + static final int SLOT_CONFIG; + + /* + * Slots for some additional computed values that are not exposed in API + * but will be useful here in the internals. + */ + static final int SLOT_INPUTSTEMPLATE; + static final int SLOT_UNRESOLVEDINPUTS; + static final int SLOT_OUTPUTSTEMPLATE; + static final int SLOT_UNRESOLVEDOUTPUTS; + static final int SLOT_TRANSFORMS; + + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegProcedureImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> o.m_sp[0]) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(RegProcedureImpl.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "language", SLOT_LANGUAGE = i++) + .withDependent( "cost", SLOT_COST = i++) + .withDependent( "rows", SLOT_ROWS = i++) + .withDependent( "variadicType", SLOT_VARIADICTYPE = i++) + .withDependent( "support", SLOT_SUPPORT = i++) + .withDependent( "kind", SLOT_KIND = i++) + .withDependent( "security", SLOT_SECURITY = i++) + .withDependent( "leakproof", SLOT_LEAKPROOF = i++) + .withDependent( "onNullInput", SLOT_ONNULLINPUT = i++) + .withDependent( "returnsSet", SLOT_RETURNSSET = i++) + .withDependent( "effects", SLOT_EFFECTS = i++) + .withDependent( "parallel", SLOT_PARALLEL = i++) + .withDependent( "returnType", SLOT_RETURNTYPE = i++) + .withDependent( "argTypes", SLOT_ARGTYPES = i++) + .withDependent( "allArgTypes", SLOT_ALLARGTYPES = i++) + .withDependent( "argModes", SLOT_ARGMODES = i++) + .withDependent( "argNames", SLOT_ARGNAMES = i++) + .withDependent("transformTypes", SLOT_TRANSFORMTYPES = i++) + .withDependent( "src", SLOT_SRC = i++) + .withDependent( "bin", SLOT_BIN = i++) + .withDependent( "config", SLOT_CONFIG = i++) + + .withDependent( "inputsTemplate", SLOT_INPUTSTEMPLATE = i++) + .withDependent( "unresolvedInputs", SLOT_UNRESOLVEDINPUTS = i++) + .withDependent( "outputsTemplate", SLOT_OUTPUTSTEMPLATE = i++) + .withDependent("unresolvedOutputs", SLOT_UNRESOLVEDOUTPUTS = i++) + .withDependent( "transforms", SLOT_TRANSFORMS = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute PRONAME; + static final Attribute PRONAMESPACE; + static final Attribute PROOWNER; + static final Attribute PROACL; + static final Attribute PROLANG; + static final Attribute PROCOST; + static final Attribute PROROWS; + static final Attribute PROVARIADIC; + static final Attribute PROSUPPORT; + static final Attribute PROKIND; + static final Attribute PROSECDEF; + static final Attribute PROLEAKPROOF; + static final Attribute PROISSTRICT; + static final Attribute PRORETSET; + static final Attribute PROVOLATILE; + static final Attribute PROPARALLEL; + static final Attribute PRORETTYPE; + static final Attribute PROARGTYPES; + static final Attribute PROALLARGTYPES; + static final Attribute PROARGMODES; + static final Attribute PROARGNAMES; + static final Attribute PROTRFTYPES; + static final Attribute PROSRC; + static final Attribute PROBIN; + static final Attribute PROCONFIG; + static final Attribute PROARGDEFAULTS; + static final Attribute PROSQLBODY; + + static + { + Iterator itr = attNames( + "proname", + "pronamespace", + "proowner", + "proacl", + "prolang", + "procost", + "prorows", + "provariadic", + "prosupport", + "prokind", + "prosecdef", + "proleakproof", + "proisstrict", + "proretset", + "provolatile", + "proparallel", + "prorettype", + "proargtypes", + "proallargtypes", + "proargmodes", + "proargnames", + "protrftypes", + "prosrc", + "probin", + "proconfig", + "proargdefaults" + ).alsoIf(PG_VERSION_NUM >= 140000, + "prosqlbody" + ).project(CLASSID.tupleDescriptor()); + + PRONAME = itr.next(); + PRONAMESPACE = itr.next(); + PROOWNER = itr.next(); + PROACL = itr.next(); + PROLANG = itr.next(); + PROCOST = itr.next(); + PROROWS = itr.next(); + PROVARIADIC = itr.next(); + PROSUPPORT = itr.next(); + PROKIND = itr.next(); + PROSECDEF = itr.next(); + PROLEAKPROOF = itr.next(); + PROISSTRICT = itr.next(); + PRORETSET = itr.next(); + PROVOLATILE = itr.next(); + PROPARALLEL = itr.next(); + PRORETTYPE = itr.next(); + PROARGTYPES = itr.next(); + PROALLARGTYPES = itr.next(); + PROARGMODES = itr.next(); + PROARGNAMES = itr.next(); + PROTRFTYPES = itr.next(); + PROSRC = itr.next(); + PROBIN = itr.next(); + PROCONFIG = itr.next(); + PROARGDEFAULTS = itr.next(); + PROSQLBODY = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* mutable non-API fields that will only be used on the PG thread */ + + /* + * This is the idea behind the API memo() method. + * + * A Why memo can be retrieved with the memo() method. The method does not + * synchronize. It is documented to return a valid result only in certain + * circumstances, which an individual Why subinterface should detail. A Why + * memo's type is constrained by the type parameter M. + * + * As the only foreseeable How memo for now is PLJavaMemo, that field is + * of fixed type for now. PLJavaBased documents that it is valid when + * the dispatcher passes it to a language-handler method along with + * a RegProcedure. The dispatcher, while allowing the handler to run + * in another thread, is always on "the PG thread" when manipulating m_how. + */ + M m_why; + PLJavaMemo m_how; + + /* + * This flag is only set in ProceduralLanguageImpl.transformsFor when it is + * about to return a nonempty list of transforms and has registered this + * routine as depending on them. It is checked and cleared in invalidate + * above. If it is set, invalidate can safely use transforms() to retrieve + * the list, which is in the cache slot until actual invalidation of the + * SwitchPoint. + */ + boolean m_dependsOnTransforms = false; + + /* + * Computation methods for ProceduralLanguage.PLJavaBased API methods + * that happen to be implemented here for now. + */ + + static final EnumSet s_parameterModes = + EnumSet.of(ArgMode.IN, ArgMode.INOUT, ArgMode.VARIADIC); + + static final EnumSet s_resultModes = + EnumSet.of(ArgMode.INOUT, ArgMode.OUT, ArgMode.TABLE); + + static final BitSet s_noBits = new BitSet(0); + + private static TupleDescriptor inputsTemplate(RegProcedureImpl o) + throws SQLException + { + List names = o.argNames(); + List types = o.allArgTypes(); + + if ( null == types ) + { + types = o.argTypes(); + return synthesizeDescriptor(types, names, null); + } + + List modes = o.argModes(); + BitSet select = new BitSet(modes.size()); + IntStream.range(0, modes.size()) + .filter(i -> s_parameterModes.contains(modes.get(i))) + .forEach(select::set); + + return synthesizeDescriptor(types, names, select); + } + + private static BitSet unresolvedInputs(RegProcedureImpl o) + throws SQLException + { + TupleDescriptor td = o.inputsTemplate(); + BitSet unr = new BitSet(0); + IntStream.range(0, td.size()) + .filter(i -> td.get(i).type().needsResolution()) + .forEach(unr::set); + return unr; + } + + private static TupleDescriptor outputsTemplate(RegProcedureImpl o) + throws SQLException + { + RegTypeImpl returnType = (RegTypeImpl)o.returnType(); + + if ( RegType.VOID == returnType ) + return null; + + if ( RegType.RECORD != returnType ) + return returnType.notionalDescriptor(); + + /* + * For plain unmodified RECORD, there's more work to do. If there are + * declared outputs, gin up a descriptor from those. If there aren't, + * this can only be a function that relies on every call site supplying + * a column definition list; return null. + */ + List modes = o.argModes(); + if ( null == modes ) + return null; // Nothing helpful here. Must rely on call site. + + BitSet select = new BitSet(modes.size()); + IntStream.range(0, modes.size()) + .filter(i -> s_resultModes.contains(modes.get(i))) + .forEach(select::set); + + if ( select.isEmpty() ) + return null; // No INOUT/OUT/TABLE cols; still need call site. + + /* + * Build a descriptor from the INOUT/OUT/TABLE types and names. + */ + + List types = o.allArgTypes(); + List names = o.argNames(); + + return synthesizeDescriptor(types, names, select); + } + + private static BitSet unresolvedOutputs(RegProcedureImpl o) + throws SQLException + { + TupleDescriptor td = o.outputsTemplate(); + if ( null == td ) + return RegType.VOID == o.returnType() ? s_noBits : null; + BitSet unr = new BitSet(0); + IntStream.range(0, td.size()) + .filter(i -> td.get(i).type().needsResolution()) + .forEach(unr::set); + return unr; + } + + private static Checked.Supplier,SQLException> transforms( + RegProcedureImpl o) + throws SQLException + { + List types = o.transformTypes(); + if ( null == types || types.isEmpty() ) + return () -> null; + + ProceduralLanguageImpl pl = (ProceduralLanguageImpl)o.language(); + return pl.transformsFor(types, o); + } + + /* computation methods for API */ + + private static ProceduralLanguage language(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROLANG, PLANG_INSTANCE); + } + + private static float cost(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROCOST, FLOAT4_INSTANCE); + } + + private static float rows(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROROWS, FLOAT4_INSTANCE); + } + + private static RegType variadicType(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROVARIADIC, REGTYPE_INSTANCE); + } + + private static RegProcedure support(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.PROSUPPORT, REGPROCEDURE_INSTANCE); + return p; + } + + private static Kind kind(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.PROKIND, INT1_INSTANCE); + switch ( b ) + { + case (byte)'f': + return Kind.FUNCTION; + case (byte)'p': + return Kind.PROCEDURE; + case (byte)'a': + return Kind.AGGREGATE; + case (byte)'w': + return Kind.WINDOW; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized procedure/function kind value %#x", b)); + } + } + + private static Security security(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + if ( s.get(Att.PROSECDEF, BOOLEAN_INSTANCE) ) + return Security.DEFINER; + return Security.INVOKER; + } + + private static boolean leakproof(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROLEAKPROOF, BOOLEAN_INSTANCE); + } + + private static OnNullInput onNullInput(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + if ( s.get(Att.PROISSTRICT, BOOLEAN_INSTANCE) ) + return OnNullInput.RETURNS_NULL; + return OnNullInput.CALLED; + } + + private static boolean returnsSet(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PRORETSET, BOOLEAN_INSTANCE); + } + + private static Effects effects(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.PROVOLATILE, INT1_INSTANCE); + switch ( b ) + { + case (byte)'i': + return Effects.IMMUTABLE; + case (byte)'s': + return Effects.STABLE; + case (byte)'v': + return Effects.VOLATILE; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized procedure/function volatility value %#x", b)); + } + } + + private static Parallel parallel(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.PROPARALLEL, INT1_INSTANCE); + switch ( b ) + { + case (byte)'s': + return Parallel.SAFE; + case (byte)'r': + return Parallel.RESTRICTED; + case (byte)'u': + return Parallel.UNSAFE; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized procedure/function parallel safety value %#x",b)); + } + } + + private static RegType returnType(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PRORETTYPE, REGTYPE_INSTANCE); + } + + private static List argTypes(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROARGTYPES, + ArrayAdapters.REGTYPE_LIST_INSTANCE); + } + + private static List allArgTypes(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROALLARGTYPES, + ArrayAdapters.REGTYPE_LIST_INSTANCE); + } + + private static List argModes(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROARGMODES, + ArrayAdapters.ARGMODE_LIST_INSTANCE); + } + + private static List argNames(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROARGNAMES, + ArrayAdapters.TEXT_NAME_LIST_INSTANCE); + } + + private static List transformTypes(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROTRFTYPES, + ArrayAdapters.REGTYPE_LIST_INSTANCE); + } + + private static String src(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROSRC, TextAdapter.INSTANCE); + } + + private static String bin(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROBIN, TextAdapter.INSTANCE); + } + + private static List config(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROCONFIG, FLAT_STRING_LIST_INSTANCE); + } + + /* + * API-like methods not actually exposed as RegProcedure API. + * There are exposed on the RegProcedure.Memo.How subinterface + * ProceduralLanguage.PLJavaBased. These implementations could + * conceivably be moved to the implementation of that, so that + * not all RegProcedure instances would haul around five extra slots. + */ + public TupleDescriptor inputsTemplate() + { + try + { + MethodHandle h = m_slots[SLOT_INPUTSTEMPLATE]; + return (TupleDescriptor)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + public BitSet unresolvedInputs() + { + try + { + MethodHandle h = m_slots[SLOT_UNRESOLVEDINPUTS]; + BitSet unr = (BitSet)h.invokeExact(this, h); + return (BitSet)unr.clone(); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + public TupleDescriptor outputsTemplate() + { + try + { + MethodHandle h = m_slots[SLOT_OUTPUTSTEMPLATE]; + return (TupleDescriptor)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + public BitSet unresolvedOutputs() + { + try + { + MethodHandle h = m_slots[SLOT_UNRESOLVEDOUTPUTS]; + BitSet unr = (BitSet)h.invokeExact(this, h); + return null == unr ? null : (BitSet)unr.clone(); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + public List transforms() + { + try + { + MethodHandle h = m_slots[SLOT_TRANSFORMS]; + Checked.Supplier,SQLException> s = + (Checked.Supplier,SQLException>) + h.invokeExact(this, h); + return s.get(); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* API methods */ + + @Override + public ProceduralLanguage language() + { + try + { + MethodHandle h = m_slots[SLOT_LANGUAGE]; + return (ProceduralLanguage)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public float cost() + { + try + { + MethodHandle h = m_slots[SLOT_COST]; + return (float)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public float rows() + { + try + { + MethodHandle h = m_slots[SLOT_ROWS]; + return (float)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType variadicType() + { + try + { + MethodHandle h = m_slots[SLOT_VARIADICTYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure support() + { + try + { + MethodHandle h = m_slots[SLOT_SUPPORT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Kind kind() + { + try + { + MethodHandle h = m_slots[SLOT_KIND]; + return (Kind)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Security security() + { + try + { + MethodHandle h = m_slots[SLOT_SECURITY]; + return (Security)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean leakproof() + { + try + { + MethodHandle h = m_slots[SLOT_LEAKPROOF]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public OnNullInput onNullInput() + { + try + { + MethodHandle h = m_slots[SLOT_ONNULLINPUT]; + return (OnNullInput)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean returnsSet() + { + try + { + MethodHandle h = m_slots[SLOT_RETURNSSET]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Effects effects() + { + try + { + MethodHandle h = m_slots[SLOT_EFFECTS]; + return (Effects)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Parallel parallel() + { + try + { + MethodHandle h = m_slots[SLOT_PARALLEL]; + return (Parallel)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType returnType() + { + try + { + MethodHandle h = m_slots[SLOT_RETURNTYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List argTypes() + { + try + { + MethodHandle h = m_slots[SLOT_ARGTYPES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List allArgTypes() + { + try + { + MethodHandle h = m_slots[SLOT_ALLARGTYPES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List argModes() + { + try + { + MethodHandle h = m_slots[SLOT_ARGMODES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List argNames() + { + try + { + MethodHandle h = m_slots[SLOT_ARGNAMES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public SQLXML argDefaults() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + TupleTableSlot s = cacheTuple(); + return s.get(Att.PROARGDEFAULTS, SYNTHETIC_INSTANCE); + } + + @Override + public List transformTypes() + { + try + { + MethodHandle h = m_slots[SLOT_TRANSFORMTYPES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String src() + { + try + { + MethodHandle h = m_slots[SLOT_SRC]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String bin() + { + try + { + MethodHandle h = m_slots[SLOT_BIN]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public SQLXML sqlBody() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + if ( null == Att.PROSQLBODY ) // missing in this PG version + return null; + + TupleTableSlot s = cacheTuple(); + return s.get(Att.PROSQLBODY, SYNTHETIC_INSTANCE); + } + + @Override + public List config() + { + try + { + MethodHandle h = m_slots[SLOT_CONFIG]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public M memo() + { + /* + * See the m_why declaration comments on this lack of synchronization. + */ + return m_why; + } + + /** + * Abstract superclass of both {@link Why Why} and {@link How How} memo + * implementations. + */ + public static abstract class AbstractMemo + { + protected AbstractMemo() + { + assert threadMayEnterPG() : "AbstractMemo thread"; + } + + abstract void invalidate(List sps, List postOps); + + /** + * Abstract base class for a {@link Why Why} memo implementation. + */ + public static abstract class Why> + extends AbstractMemo implements Memo.Why + { + /** + * The {@code RegProcedure} instance carrying this memo. + */ + protected final RegProcedureImpl m_carrier; + + protected Why(RegProcedure carrier) + { + @SuppressWarnings("unchecked") + RegProcedureImpl narrowed = (RegProcedureImpl)carrier; + if ( null != narrowed.m_why ) + throw new AssertionError("carrier already has why memo"); + m_carrier = narrowed; + } + + public RegProcedureImpl apply() + { + assert threadMayEnterPG() : "AbstractMemo.Why thread"; + assert null == m_carrier.m_why : "carrier memo became nonnull"; + + @SuppressWarnings("unchecked") + M self = (M)this; + + m_carrier.m_why = self; + return m_carrier; + } + + void invalidate(List sps, List postOps) + { + m_carrier.m_why = null; + } + } + + /** + * Abstract base class for a {@link How How} memo implementation. + */ + public static abstract class How> + extends AbstractMemo implements Memo.How + { + /** + * The {@code RegProcedure} instance carrying this memo. + */ + protected final RegProcedureImpl m_carrier; + + protected How(RegProcedure carrier) + { + RegProcedureImpl narrowed = (RegProcedureImpl)carrier; + if ( null != narrowed.m_how ) + throw new AssertionError("carrier already has how memo"); + m_carrier = narrowed; + } + + public RegProcedureImpl apply() + { + assert threadMayEnterPG() : "AbstractMemo.how thread"; + assert null == m_carrier.m_how : "carrier memo became nonnull"; + + // generalize later if there is ever any other possibility + PLJavaMemo self = (PLJavaMemo)this; + + m_carrier.m_how = self; + return m_carrier; + } + + void invalidate(List sps, List postOps) + { + m_carrier.m_how = null; + } + } + } + + /** + * Abstract superclass of a {@code Why} memo used on routines that play + * specific support roles for other catalog objects (such as a + * {@code Handler} or {@code Validator} for a {@code ProceduralLanguage} + * or a {@code FromSQL} or {@code ToSQL} for a {@code Transform>}, where + * dependent objects should be invalidated if the support routine is. + *

    + * Because a support routine can be depended on by more than one object + * (multiple languages, say, can share the same handler or validator + * routines), the memo carries a {@code Set} of dependent objects, not + * just a single reference. The {@code Set} implementation is chosen on + * an expectation of rare mutations and relatively small sets. + *

    + * A concrete subclass should supply an appropriately-typed static + * {@code addDependent} method that delegates to the protected + * {@link #add add} method here. The static {@code removeDependent} + * method of this class can be invoked directly (typically qualified + * by the concrete subclass name, for consistency with the + * {@code addDependent} method). + */ + static abstract class SupportMemo< + M extends Memo.Why, + A extends CatalogObjectImpl.Addressed + > + extends AbstractMemo.Why + { + private final Set m_dependents; + + protected SupportMemo( + RegProcedure carrier, A dep) + { + super(carrier); + m_dependents = new CopyOnWriteArraySet<>(Set.of(dep)); + } + + /** + * Has the effect of {@code super.invalidate} (simply nulling + * the carrier {@code RegProcedure}'s reference to this memo), + * and additionally calls + * {@link CatalogObjectImpl.Addressed#invalidate invalidate} + * on each recorded dependent A object. + */ + @Override + void invalidate(List sps, List postOps) + { + super.invalidate(sps, postOps); + m_dependents.forEach(a -> a.invalidate(sps, postOps)); + } + + /** + * Removes dep as a recorded dependency on + * proc, with no effect if proc isn't carrying + * a memo that extends this class or if its dependency set does not + * contain dep. + */ + static < + M extends Memo.Why, + A extends CatalogObjectImpl.Addressed + > + void removeDependent(RegProcedure proc, A dep) + { + M memo = proc.memo(); + if ( memo instanceof SupportMemo ) + ((SupportMemo)memo).m_dependents.remove(dep); + } + + /** + * Adds dep as a recorded dependency on proc, + * using an existing memo corresponding to type T + * if present, or getting a new one from supplier and + * applying it. + *

    + * The supplier will typically be a lambda that passes + * proc and dep to the constructor of + * the concrete subclass of this class. + *

    + * No action will be taken if proc is the invalid + * instance. It is not expected that proc will already + * be carrying a memo of some other type; an exception will result + * if it is. + */ + protected static < + O extends Memo.Why, + M extends Memo.Why, + T extends SupportMemo, + A extends CatalogObjectImpl.Addressed + > + void add( + RegProcedure proc, A dep, + Class witness, Supplier supplier) + { + if ( ! proc.isValid() ) + return; + O memo = proc.memo(); + if ( witness.isInstance(memo) ) + { + @SuppressWarnings("unchecked") + SupportMemo sm = (SupportMemo)memo; + sm.m_dependents.add(dep); + } + else + supplier.get().apply(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegRoleImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegRoleImpl.java new file mode 100644 index 000000000..9a28a9e3c --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegRoleImpl.java @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.file.attribute.GroupPrincipal; +import java.nio.file.attribute.UserPrincipal; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; + +import org.postgresql.pljava.RolePrincipal; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.AUTHOID; // syscache +import static org.postgresql.pljava.pg.ModelConstants.AUTHMEMMEMROLE; +import static org.postgresql.pljava.pg.ModelConstants.AUTHMEMROLEMEM; + +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT4_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegRole RegRole} interface. + *

    + * That this class can in fact be cast to {@link RegRole.Grantee Grantee} is an + * unadvertised implementation detail. + */ +class RegRoleImpl extends Addressed +implements + Shared, Named, + AccessControlled, RegRole.Grantee +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return AUTHOID; + } + + /* Implementation of Named, AccessControlled */ + + private static Simple name(RegRoleImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.ROLNAME, SIMPLE_INSTANCE); + } + + private static List grants(RegRoleImpl o) + { + throw notyet("CatCList support needed"); + } + + /* Implementation of RegRole */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegRoleImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_MEMBEROF; + static final int SLOT_SUPERUSER; + static final int SLOT_INHERIT; + static final int SLOT_CREATEROLE; + static final int SLOT_CREATEDB; + static final int SLOT_CANLOGIN; + static final int SLOT_REPLICATION; + static final int SLOT_BYPASSRLS; + static final int SLOT_CONNECTIONLIMIT; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegRoleImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegRoleImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "memberOf", SLOT_MEMBEROF = i++) + .withDependent( "superuser", SLOT_SUPERUSER = i++) + .withDependent( "inherit", SLOT_INHERIT = i++) + .withDependent( "createRole", SLOT_CREATEROLE = i++) + .withDependent( "createDB", SLOT_CREATEDB = i++) + .withDependent( "canLogIn", SLOT_CANLOGIN = i++) + .withDependent( "replication", SLOT_REPLICATION = i++) + .withDependent( "bypassRLS", SLOT_BYPASSRLS = i++) + .withDependent("connectionLimit", SLOT_CONNECTIONLIMIT = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute ROLNAME; + static final Attribute ROLSUPER; + static final Attribute ROLINHERIT; + static final Attribute ROLCREATEROLE; + static final Attribute ROLCREATEDB; + static final Attribute ROLCANLOGIN; + static final Attribute ROLREPLICATION; + static final Attribute ROLBYPASSRLS; + static final Attribute ROLCONNLIMIT; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "rolname", + "rolsuper", + "rolinherit", + "rolcreaterole", + "rolcreatedb", + "rolcanlogin", + "rolreplication", + "rolbypassrls", + "rolconnlimit" + ).iterator(); + + ROLNAME = itr.next(); + ROLSUPER = itr.next(); + ROLINHERIT = itr.next(); + ROLCREATEROLE = itr.next(); + ROLCREATEDB = itr.next(); + ROLCANLOGIN = itr.next(); + ROLREPLICATION = itr.next(); + ROLBYPASSRLS = itr.next(); + ROLCONNLIMIT = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static List memberOf(RegRoleImpl o) + { + throw notyet("CatCList support needed"); + } + + private static boolean superuser(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLSUPER, BOOLEAN_INSTANCE); + } + + private static boolean inherit(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLINHERIT, BOOLEAN_INSTANCE); + } + + private static boolean createRole(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLCREATEROLE, BOOLEAN_INSTANCE); + } + + private static boolean createDB(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLCREATEDB, BOOLEAN_INSTANCE); + } + + private static boolean canLogIn(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLCANLOGIN, BOOLEAN_INSTANCE); + } + + private static boolean replication(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLREPLICATION, BOOLEAN_INSTANCE); + } + + private static boolean bypassRLS(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLBYPASSRLS, BOOLEAN_INSTANCE); + } + + private static int connectionLimit(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLCONNLIMIT, INT4_INSTANCE); + } + + /* API methods */ + + @Override + public List memberOf() + { + try + { + MethodHandle h = m_slots[SLOT_MEMBEROF]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean superuser() + { + try + { + MethodHandle h = m_slots[SLOT_SUPERUSER]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean inherit() + { + try + { + MethodHandle h = m_slots[SLOT_INHERIT]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean createRole() + { + try + { + MethodHandle h = m_slots[SLOT_CREATEROLE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean createDB() + { + try + { + MethodHandle h = m_slots[SLOT_CREATEDB]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean canLogIn() + { + try + { + MethodHandle h = m_slots[SLOT_CANLOGIN]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean replication() + { + try + { + MethodHandle h = m_slots[SLOT_REPLICATION]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean bypassRLS() + { + try + { + MethodHandle h = m_slots[SLOT_BYPASSRLS]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public int connectionLimit() + { + try + { + MethodHandle h = m_slots[SLOT_CONNECTIONLIMIT]; + return (int)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* Implementation of RegRole.Grantee */ + + /* + * As it turns out, PostgreSQL doesn't use a notion like Identifier.Pseudo + * for the name of the public grantee. It uses the ordinary, folding name + * "public" and reserves it, forbidding that any actual role have any name + * that matches it according to the usual folding rules. So, construct that + * name here. + */ + private static final Simple s_public_name = Simple.fromCatalog("public"); + + @Override + public Simple nameAsGrantee() + { + return isPublic() ? s_public_name : name(); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegTypeImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegTypeImpl.java new file mode 100644 index 000000000..c088c240c --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegTypeImpl.java @@ -0,0 +1,1436 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.sql.SQLType; +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.Iterator; +import java.util.List; + +import java.util.function.Function; +import java.util.function.Supplier; + +import org.postgresql.pljava.TargetList.Projection; + +import static org.postgresql.pljava.internal.SwitchPointCache.doNotCache; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.CatalogObjectImpl.Factory.TYPEOID_CB; + +import static org.postgresql.pljava.pg.ModelConstants.TYPEOID; // syscache +import static org.postgresql.pljava.pg.ModelConstants.alignmentFromCatalog; +import static org.postgresql.pljava.pg.ModelConstants.storageFromCatalog; +import static org.postgresql.pljava.pg.TupleDescImpl.synthesizeDescriptor; + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import org.postgresql.pljava.pg.adt.NameAdapter; +import org.postgresql.pljava.pg.adt.OidAdapter; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCLASS_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCOLLATION_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; +import static org.postgresql.pljava.pg.adt.XMLAdapter.SYNTHETIC_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.*; + +import org.postgresql.pljava.annotation.BaseUDT.Alignment; +import org.postgresql.pljava.annotation.BaseUDT.Storage; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Qualified; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/* + * Can get lots of information, including TupleDesc, domain constraints, etc., + * from the typcache. A typcache entry is immortal but bits of it can change. + * So it may be safe to keep a reference to the entry forever, but detect when + * bits have changed. See in particular tupDesc_identifier. + * + * Many of the attributes of pg_type are available in the typcache. But + * lookup_type_cache() does not have a _noerror version. If there is any doubt + * about the existence of a type to be looked up, one must either do a syscache + * lookup first anyway, or have a plan to catch an undefined_object error. + * Same if you happen to look up a type still in the "only a shell" stage. + * At that rate, may as well rely on the syscache for all the pg_type info. + */ + +/** + * Implementation of the {@link RegType RegType} interface. + */ +abstract class RegTypeImpl extends Addressed +implements + Nonshared, Namespaced, Owned, + AccessControlled, RegType +{ + /** + * Per-instance switch point, to be invalidated selectively + * by a syscache callback. + *

    + * Only {@link NoModifier NoModifier} carries one; derived instances of + * {@link Modified Modified} or {@link Blessed Blessed} return that one. + */ + abstract SwitchPoint cacheSwitchPoint(); + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TYPEOID; + } + + /* Implementation of Named, Namespaced, Owned, AccessControlled */ + + private static Simple name(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.TYPNAME, NameAdapter.SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPOWNER, REGROLE_INSTANCE); + } + + private static List grants(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of RegType */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegTypeImpl(MethodHandle[] slots) + { + super(slots); + } + + /** + * Holder for the {@code RegClass} corresponding to {@code relation()}, + * only non-null during a call of {@code dualHandshake}. + */ + private RegClass m_dual = null; + + /** + * A lazily-populated synthetic tuple descriptor with a single element + * of this type. + */ + private TupleDescriptor m_singleton; + + /** + * Called by the corresponding {@code RegClass} instance if it has just + * looked us up. + *

    + * Because the {@code SwitchPointCache} recomputation methods always execute + * on the PG thread, plain access to an instance field does the trick here. + */ + void dualHandshake(RegClass dual) + { + try + { + m_dual = dual; + dual = relation(); + assert dual == m_dual : "RegClass/RegType handshake outcome"; + } + finally + { + m_dual = null; + } + } + + static final Function s_initializer; + + static final int SLOT_TUPLEDESCRIPTOR; + static final int SLOT_NOTIONALDESC; // defined even for non-row type + static final int SLOT_LENGTH; + static final int SLOT_BYVALUE; + static final int SLOT_TYPE; + static final int SLOT_CATEGORY; + static final int SLOT_PREFERRED; + static final int SLOT_DEFINED; + static final int SLOT_DELIMITER; + static final int SLOT_RELATION; + static final int SLOT_ELEMENT; + static final int SLOT_ARRAY; + static final int SLOT_INPUT; + static final int SLOT_OUTPUT; + static final int SLOT_RECEIVE; + static final int SLOT_SEND; + static final int SLOT_MODIFIERINPUT; + static final int SLOT_MODIFIEROUTPUT; + static final int SLOT_ANALYZE; + static final int SLOT_SUBSCRIPT; + static final int SLOT_ALIGNMENT; + static final int SLOT_STORAGE; + static final int SLOT_NOTNULL; + static final int SLOT_BASETYPE; + static final int SLOT_DIMENSIONS; + static final int SLOT_COLLATION; + static final int SLOT_DEFAULTTEXT; + + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegTypeImpl.class) + .withLookup(lookup().in(RegTypeImpl.class)) + .withSwitchPoint(RegTypeImpl::cacheSwitchPoint) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(RegTypeImpl.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent("name", SLOT_NAME) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withReturnType(null) + .withDependent("namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent("owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent("grants", SLOT_ACL) + + .withReceiverType(null) + .withSwitchPoint(o -> + { + RegClassImpl c = (RegClassImpl)o.relation(); + if ( c.isValid() ) + return c.cacheSwitchPoint(); + return o.cacheSwitchPoint(); + }) + .withDependent( + "tupleDescriptorCataloged", SLOT_TUPLEDESCRIPTOR = i++) + .withDependent("notionalDescriptor", SLOT_NOTIONALDESC = i++) + + .withSwitchPoint(RegTypeImpl::cacheSwitchPoint) + .withDependent( "length", SLOT_LENGTH = i++) + .withDependent( "byValue", SLOT_BYVALUE = i++) + .withDependent( "type", SLOT_TYPE = i++) + .withDependent( "category", SLOT_CATEGORY = i++) + .withDependent( "preferred", SLOT_PREFERRED = i++) + .withDependent( "defined", SLOT_DEFINED = i++) + .withDependent( "delimiter", SLOT_DELIMITER = i++) + .withDependent( "relation", SLOT_RELATION = i++) + .withDependent( "element", SLOT_ELEMENT = i++) + .withDependent( "array", SLOT_ARRAY = i++) + .withDependent( "input", SLOT_INPUT = i++) + .withDependent( "output", SLOT_OUTPUT = i++) + .withDependent( "receive", SLOT_RECEIVE = i++) + .withDependent( "send", SLOT_SEND = i++) + .withDependent( "modifierInput", SLOT_MODIFIERINPUT = i++) + .withDependent( "modifierOutput", SLOT_MODIFIEROUTPUT = i++) + .withDependent( "analyze", SLOT_ANALYZE = i++) + .withDependent( "subscript", SLOT_SUBSCRIPT = i++) + .withDependent( "alignment", SLOT_ALIGNMENT = i++) + .withDependent( "storage", SLOT_STORAGE = i++) + .withDependent( "notNull", SLOT_NOTNULL = i++) + .withDependent( "baseType", SLOT_BASETYPE = i++) + .withDependent( "dimensions", SLOT_DIMENSIONS = i++) + .withDependent( "collation", SLOT_COLLATION = i++) + .withDependent( "defaultText", SLOT_DEFAULTTEXT = i++) + + .build(); + NSLOTS = i; + } + + static class Att + { + static final Projection TYPBASETYPE_TYPTYPMOD; + + static final Attribute TYPNAME; + static final Attribute TYPNAMESPACE; + static final Attribute TYPOWNER; + static final Attribute TYPACL; + static final Attribute TYPLEN; + static final Attribute TYPBYVAL; + static final Attribute TYPTYPE; + static final Attribute TYPCATEGORY; + static final Attribute TYPISPREFERRED; + static final Attribute TYPISDEFINED; + static final Attribute TYPDELIM; + static final Attribute TYPRELID; + static final Attribute TYPELEM; + static final Attribute TYPARRAY; + static final Attribute TYPINPUT; + static final Attribute TYPOUTPUT; + static final Attribute TYPRECEIVE; + static final Attribute TYPSEND; + static final Attribute TYPMODIN; + static final Attribute TYPMODOUT; + static final Attribute TYPANALYZE; + static final Attribute TYPALIGN; + static final Attribute TYPSTORAGE; + static final Attribute TYPNOTNULL; + static final Attribute TYPNDIMS; + static final Attribute TYPCOLLATION; + static final Attribute TYPDEFAULT; + static final Attribute TYPDEFAULTBIN; + static final Attribute TYPSUBSCRIPT; + + static + { + AttNames itr = attNames( + "typbasetype", // these two are wanted + "typtypmod", // together, first, below + "typname", + "typnamespace", + "typowner", + "typacl", + "typlen", + "typbyval", + "typtype", + "typcategory", + "typispreferred", + "typisdefined", + "typdelim", + "typrelid", + "typelem", + "typarray", + "typinput", + "typoutput", + "typreceive", + "typsend", + "typmodin", + "typmodout", + "typanalyze", + "typalign", + "typstorage", + "typnotnull", + "typndims", + "typcollation", + "typdefault", + "typdefaultbin" + ).alsoIf(PG_VERSION_NUM >= 140000, + "typsubscript" + ).project(CLASSID.tupleDescriptor()); + + TYPBASETYPE_TYPTYPMOD = itr.project(itr.next(), itr.next()); + + TYPNAME = itr.next(); + TYPNAMESPACE = itr.next(); + TYPOWNER = itr.next(); + TYPACL = itr.next(); + TYPLEN = itr.next(); + TYPBYVAL = itr.next(); + TYPTYPE = itr.next(); + TYPCATEGORY = itr.next(); + TYPISPREFERRED = itr.next(); + TYPISDEFINED = itr.next(); + TYPDELIM = itr.next(); + TYPRELID = itr.next(); + TYPELEM = itr.next(); + TYPARRAY = itr.next(); + TYPINPUT = itr.next(); + TYPOUTPUT = itr.next(); + TYPRECEIVE = itr.next(); + TYPSEND = itr.next(); + TYPMODIN = itr.next(); + TYPMODOUT = itr.next(); + TYPANALYZE = itr.next(); + TYPALIGN = itr.next(); + TYPSTORAGE = itr.next(); + TYPNOTNULL = itr.next(); + TYPNDIMS = itr.next(); + TYPCOLLATION = itr.next(); + TYPDEFAULT = itr.next(); + TYPDEFAULTBIN = itr.next(); + TYPSUBSCRIPT = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods for non-API internal slots */ + + private static TupleDescriptor notionalDescriptor(RegTypeImpl o) + { + assert RECORD != o && VOID != o : "called on type " + o; + + for ( RegType t = o ; t.isValid() ; t = t.baseType() ) + { + TupleDescriptor td = t.tupleDescriptor(); + if ( null != td ) + return td; + } + + return new TupleDescImpl.OfType(o); + } + + /* computation methods for API */ + + /** + * Obtain the tuple descriptor for an ordinary cataloged composite type. + *

    + * Every such type has a corresponding {@link RegClass RegClass}, which has + * the {@code SwitchPoint} that will govern the descriptor's invalidation, + * and a one-element array in which the descriptor should be stored. This + * method returns the array. + */ + private static TupleDescriptor.Interned[] + tupleDescriptorCataloged(RegTypeImpl o) + { + RegClassImpl c = (RegClassImpl)o.relation(); + + /* + * If this is not a composite type, c won't be valid, and our API + * contract is to return null (which means, here, return {null}). + */ + if ( ! c.isValid() ) + return new TupleDescriptor.Interned[] { null }; + + TupleDescriptor.Interned[] r = c.m_tupDescHolder; + + /* + * If c is RegClass.CLASSID itself, it has the descriptor by now + * (bootstrapped at the latest during the above relation() call, + * if it wasn't there already). + */ + if ( RegClass.CLASSID == c ) + { + assert null != r && null != r[0] : + "RegClass TupleDescriptor bootstrap outcome"; + return r; + } + + assert null == r : "RegClass has tuple descriptor when RegType doesn't"; + + /* + * Otherwise, do the work here, and store the descriptor in r. + * Can pass -1 for the modifier; Blessed types do not use this method. + */ + + ByteBuffer b = _lookupRowtypeTupdesc(o.oid(), -1); + assert null != b : "cataloged composite type tupdesc lookup"; + b.order(nativeOrder()); + r = new TupleDescriptor.Interned[]{ new TupleDescImpl.Cataloged(b, c) }; + return c.m_tupDescHolder = r; + } + + private static TupleDescriptor.Interned[] tupleDescriptorBlessed(Blessed o) + { + TupleDescriptor.Interned[] r = new TupleDescriptor.Interned[1]; + ByteBuffer b = _lookupRowtypeTupdesc(o.oid(), o.modifier()); + + /* + * If there is no registered tuple descriptor for this typmod, return an + * empty value to the current caller, but do not cache it; a later call + * could find one has been registered. + */ + if ( null == b ) + { + doNotCache(); + return r; + } + + b.order(nativeOrder()); + r[0] = new TupleDescImpl.Blessed(b, o); + return o.m_tupDescHolder = r; + } + + private static short length(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPLEN, INT2_INSTANCE); + } + + private static boolean byValue(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPBYVAL, BOOLEAN_INSTANCE); + } + + private static Type type(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return typeFromCatalog( + t.get(Att.TYPTYPE, INT1_INSTANCE)); + } + + private static char category(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return (char) + (0xff & t.get(Att.TYPCATEGORY, INT1_INSTANCE)); + } + + private static boolean preferred(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPISPREFERRED, BOOLEAN_INSTANCE); + } + + private static boolean defined(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPISDEFINED, BOOLEAN_INSTANCE); + } + + private static byte delimiter(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPDELIM, INT1_INSTANCE); + } + + private static RegClass relation(RegTypeImpl o) throws SQLException + { + /* + * If this is a handshake occurring when the corresponding RegClass + * has just looked *us* up, we are done. + */ + if ( null != o.m_dual ) + return o.m_dual; + + /* + * Otherwise, look up the corresponding RegClass, and do the same + * handshake in reverse. Either way, the connection is set up + * bidirectionally with one cache lookup starting from either. That + * can avoid extra work in operations (like TupleDescriptor caching) + * that may touch both objects, without complicating their code. + */ + TupleTableSlot t = o.cacheTuple(); + RegClass c = t.get(Att.TYPRELID, REGCLASS_INSTANCE); + + if ( c.isValid() ) + ((RegClassImpl)c).dualHandshake(o); + + return c; + } + + private static RegType element(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPELEM, REGTYPE_INSTANCE); + } + + private static RegType array(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPARRAY, REGTYPE_INSTANCE); + } + + private static RegProcedure input(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPINPUT, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure output(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPOUTPUT, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure receive(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPRECEIVE, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure send(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPSEND, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure modifierInput(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPMODIN, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure modifierOutput( + RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPMODOUT, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure analyze(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPANALYZE, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure subscript(RegTypeImpl o) + throws SQLException + { + RegProcedure p; + + if ( null == Att.TYPSUBSCRIPT ) // missing in this PG version + p = of(RegProcedure.CLASSID, InvalidOid); + else + { + TupleTableSlot t = o.cacheTuple(); + p = t.get(Att.TYPSUBSCRIPT, REGPROCEDURE_INSTANCE); + } + + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure narrowed = (RegProcedure)p; + + return narrowed; + } + + private static Alignment alignment(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return alignmentFromCatalog( + t.get(Att.TYPALIGN, INT1_INSTANCE)); + } + + private static Storage storage(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return storageFromCatalog( + t.get(Att.TYPSTORAGE, INT1_INSTANCE)); + } + + private static boolean notNull(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPNOTNULL, BOOLEAN_INSTANCE); + } + + private static RegType baseType(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return Att.TYPBASETYPE_TYPTYPMOD + .applyOver(t, c -> + c.apply(OidAdapter.INT4_INSTANCE, INT4_INSTANCE, + ( oid, mod ) -> + CatalogObjectImpl.Factory.formMaybeModifiedType(oid, mod))); + } + + private static int dimensions(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPNDIMS, INT4_INSTANCE); + } + + private static RegCollation collation(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPCOLLATION, REGCOLLATION_INSTANCE); + } + + private static String defaultText(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPDEFAULT, TextAdapter.INSTANCE); + } + + /* API-like methods only used internally for now */ + + /** + * So that {@code TupleTableSlot} may be used uniformly as the API for + * Java <-> PostgreSQL data type conversions, let every type except + * unmodified {@code RECORD} or {@code VOID} have a "notional" + * {@code TupleDescriptor}. + *

    + * For a cataloged or interned row type, or a domain over a cataloged row + * type, it is that type's {@link #tupleDescriptor tupleDescriptor} (or that + * of the transitive base type, in the case of a domain). Such a descriptor + * will be of type {@link TupleDescriptor.Interned Interned}. Otherwise, + * it is a {@link TupleDescriptor.Ephemeral} whose one, unnamed, attribute + * has this type. + *

    + * The caller is expected to to have checked for {@code RECORD} or + * {@code VOID} and not to call this method on those types. + */ + public TupleDescriptor notionalDescriptor() + { + try + { + MethodHandle h = m_slots[SLOT_NOTIONALDESC]; + return (TupleDescriptor)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* API methods */ + + @Override + public TupleDescriptor.Interned tupleDescriptor() + { + try + { + MethodHandle h = m_slots[SLOT_TUPLEDESCRIPTOR]; + return ((TupleDescriptor.Interned[])h.invokeExact(this, h))[0]; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public short length() + { + try + { + MethodHandle h = m_slots[SLOT_LENGTH]; + return (short)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public boolean byValue() + { + try + { + MethodHandle h = m_slots[SLOT_BYVALUE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public Type type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (Type)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public char category() + { + try + { + MethodHandle h = m_slots[SLOT_CATEGORY]; + return (char)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean preferred() + { + try + { + MethodHandle h = m_slots[SLOT_PREFERRED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean defined() + { + try + { + MethodHandle h = m_slots[SLOT_DEFINED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public byte delimiter() + { + try + { + MethodHandle h = m_slots[SLOT_DELIMITER]; + return (byte)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegClass relation() + { + try + { + MethodHandle h = m_slots[SLOT_RELATION]; + return (RegClass)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public RegType element() + { + try + { + MethodHandle h = m_slots[SLOT_ELEMENT]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public RegType array() + { + try + { + MethodHandle h = m_slots[SLOT_ARRAY]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure input() + { + try + { + MethodHandle h = m_slots[SLOT_INPUT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure output() + { + try + { + MethodHandle h = m_slots[SLOT_OUTPUT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure receive() + { + try + { + MethodHandle h = m_slots[SLOT_RECEIVE]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure send() + { + try + { + MethodHandle h = m_slots[SLOT_SEND]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure modifierInput() + { + try + { + MethodHandle h = m_slots[SLOT_MODIFIERINPUT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure modifierOutput() + { + try + { + MethodHandle h = m_slots[SLOT_MODIFIEROUTPUT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure analyze() + { + try + { + MethodHandle h = m_slots[SLOT_ANALYZE]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure subscript() + { + try + { + MethodHandle h = m_slots[SLOT_SUBSCRIPT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public Alignment alignment() + { + try + { + MethodHandle h = m_slots[SLOT_ALIGNMENT]; + return (Alignment)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public Storage storage() + { + try + { + MethodHandle h = m_slots[SLOT_STORAGE]; + return (Storage)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public boolean notNull() + { + try + { + MethodHandle h = m_slots[SLOT_NOTNULL]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType baseType() + { + try + { + MethodHandle h = m_slots[SLOT_BASETYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public int dimensions() + { + try + { + MethodHandle h = m_slots[SLOT_DIMENSIONS]; + return (int)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegCollation collation() + { + try + { + MethodHandle h = m_slots[SLOT_COLLATION]; + return (RegCollation)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public SQLXML defaultBin() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + TupleTableSlot s = cacheTuple(); + return s.get(Att.TYPDEFAULTBIN, SYNTHETIC_INSTANCE); + } + + @Override + public String defaultText() + { + try + { + MethodHandle h = m_slots[SLOT_DEFAULTTEXT]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * Return the expected zero value for {@code subId}. + *

    + * For keying the {@code CacheMap}, we sneak type modifiers in there + * (PG types do not otherwise use {@code subId}), but that's an + * implementation detail that could be done a different way if upstream + * ever decided to have subIds for types, and having it show in the address + * triple of a modified type could be surprising to an old PostgreSQL hand. + */ + @Override + public int subId() + { + return 0; + } + + /** + * Return the type modifier. + *

    + * In this implementation, where we snuck it in as the third component + * of the cache key, sneak it back out. + */ + @Override + public int modifier() + { + int m = super.subId(); + if ( -1 == m ) + return 0; + return m; + } + + /** + * Return a synthetic tuple descriptor with a single element of this type. + */ + public TupleDescriptor singletonTupleDescriptor() + { + TupleDescriptor td = m_singleton; + if ( null != td ) + return td; + /* + * In case of a race, the synthetic tuple descriptors will be + * equivalent anyway. + */ + return m_singleton = new TupleDescImpl.OfType(this); + } + + /** + * Represents a type that has been mentioned without an accompanying type + * modifier (or with the 'unspecified' value -1 for its type modifier). + */ + static class NoModifier extends RegTypeImpl + { + /** + * Count of instances subject to invalidation. + *

    + * Only accessed in invalidate and SP.onFirstUse, both on the PG thread. + */ + private static int s_instances; + + private static class SP extends SwitchPoint + { + @Override + protected void onFirstUse() + { + if ( 1 == ++ s_instances ) + sysCacheInvalArmed(TYPEOID_CB, true); + } + } + + private final SwitchPoint[] m_sp; + + @Override + SwitchPoint cacheSwitchPoint() + { + return m_sp[0]; + } + + NoModifier() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_sp = new SwitchPoint[] { new SP() }; + } + + @Override + void invalidate(List sps, List postOps) + { + SwitchPoint sp = m_sp[0]; + if ( sp.unused() ) + return; + sps.add(sp); + m_sp[0] = new SP(); + if ( 0 == -- s_instances ) + sysCacheInvalArmed(TYPEOID_CB, false); + } + + @Override + public int modifier() + { + return -1; + } + + @Override + public RegType modifier(int typmod) + { + if ( -1 == typmod ) + return this; + return + CatalogObjectImpl.Factory.formMaybeModifiedType(oid(), typmod); + } + + @Override + public RegType withoutModifier() + { + return this; + } + } + + /** + * Subclass that additionally implements + * {@link RegType.Unresolved RegType.Unresolved}. + */ + static class Unresolved extends NoModifier implements RegType.Unresolved + { + } + + /** + * Represents a type that is not {@code RECORD} and has a type modifier that + * is not the unspecified value. + *

    + * When the {@code RECORD} type appears in PostgreSQL with a type modifier, + * that is a special case; see {@link Blessed Blessed}. + */ + static class Modified extends RegTypeImpl + { + private final NoModifier m_base; + + @Override + SwitchPoint cacheSwitchPoint() + { + return m_base.m_sp[0]; + } + + Modified(NoModifier base) + { + super(base.m_slots); + m_base = base; // must keep it live, not only share its slots + } + + @Override + public RegType modifier(int typmod) + { + if ( modifier() == typmod ) + return this; + return m_base.modifier(typmod); + } + + @Override + public RegType withoutModifier() + { + return m_base; + } + + /** + * Whether a just-mentioned modified type "exists" depends on whether + * its unmodified type exists and has a modifier input function. + *

    + * No attempt is made here to verify that the modifier value is one that + * the modifier input/output functions would produce or accept. + */ + @Override + public boolean exists() + { + return m_base.exists() && modifierInput().isValid(); + } + + @Override + public String toString() + { + String prefix = super.toString(); + return prefix + "(" + modifier() + ")"; + } + } + + /** + * Represents the "row type" of a {@link TupleDescriptor TupleDescriptor} + * that has been programmatically constructed and interned ("blessed"). + *

    + * Such a type is represented in PostgreSQL as the type {@code RECORD} + * with a type modifier assigned uniquely for the life of the backend. + */ + static class Blessed extends RegTypeImpl + { + /** + * Associated tuple descriptor, redundantly kept accessible here as well + * as opaquely bound into a {@code SwitchPointCache} method handle. + *

    + * A {@code Blessed} descriptor has no associated {@code RegClass}, so + * a slot for the descriptor is provided here. No invalidation events + * are expected for a blessed type, but the one-element array form here + * matches that used in {@code RegClass} for cataloged descriptors, to + * avoid multiple cases in the code. Only accessed from + * {@code SwitchPointCache} computation methods and + * {@code TupleDescImpl} factory methods, all of which execute on the PG + * thread; no synchronization fuss needed. + *

    + * When null, no computation method has run, and the state is not known. + * Otherwise, the single element is the result to be returned by + * the {@code tupleDescriptor()} API method. + */ + TupleDescriptor.Interned[] m_tupDescHolder; + private final MethodHandle[] m_moreSlots; + private static final Function s_initializer; + private static final int SLOT_TDBLESSED; + private static final int NSLOTS; + + static + { + int i = 0; + s_initializer = + new Builder<>(Blessed.class) + .withLookup(lookup().in(RegTypeImpl.class)) + .withSwitchPoint(Blessed::cacheSwitchPoint) + .withSlots(o -> o.m_moreSlots) + .withCandidates(RegTypeImpl.class.getDeclaredMethods()) + .withDependent("tupleDescriptorBlessed", SLOT_TDBLESSED = i++) + .build(); + NSLOTS = i; + } + + @Override + SwitchPoint cacheSwitchPoint() + { + return ((NoModifier)RECORD).m_sp[0]; + } + + Blessed() + { + super(((RegTypeImpl)RECORD).m_slots); + // RECORD is static final, no other effort needed to keep it live + m_moreSlots = s_initializer.apply(new MethodHandle[NSLOTS]); + } + + /** + * The tuple descriptor registered in the type cache for this 'blessed' + * type, or null if none. + *

    + * A null value is not sticky; it would be possible to 'mention' a + * blessed type with a not-yet-used typmod, which could then later exist + * after a tuple descriptor has been interned. (Such usage would be odd, + * though; typically one will obtain a blessed instance from an existing + * tuple descriptor.) + */ + @Override + public TupleDescriptor.Interned tupleDescriptor() + { + try + { + MethodHandle h = m_moreSlots[SLOT_TDBLESSED]; + return ((TupleDescriptor.Interned[])h.invokeExact(this, h))[0]; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType modifier(int typmod) + { + throw new UnsupportedOperationException( + "may not alter the type modifier of an interned row type"); + } + + @Override + public RegType withoutModifier() + { + return RECORD; + } + + /** + * Whether a just-mentioned blessed type "exists" depends on whether + * there is a tuple descriptor registered for it in the type cache. + *

    + * A false value is not sticky; it would be possible to 'mention' a + * blessed type with a not-yet-used typmod, which could then later exist + * after a tuple descriptor has been interned. (Such usage would be odd, + * though; typically one will obtain a blessed instance from an existing + * tuple descriptor.) + */ + @Override + public boolean exists() + { + return null != tupleDescriptor(); + } + + @Override + public String toString() + { + String prefix = super.toString(); + return prefix + "[" + modifier() + "]"; + } + } + + private static Type typeFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'b': return Type.BASE; + case (byte)'c': return Type.COMPOSITE; + case (byte)'d': return Type.DOMAIN; + case (byte)'e': return Type.ENUM; + case (byte)'m': return Type.MULTIRANGE; + case (byte)'p': return Type.PSEUDO; + case (byte)'r': return Type.RANGE; + } + throw unchecked(new SQLException( + "unrecognized Type type '" + (char)b + "' in catalog", "XX000")); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ResourceOwnerImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/ResourceOwnerImpl.java new file mode 100644 index 000000000..ff041d5c7 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ResourceOwnerImpl.java @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; + +import org.postgresql.pljava.internal.CacheMap; +import org.postgresql.pljava.internal.DualState; +import static org.postgresql.pljava.internal.DualState.m; +import org.postgresql.pljava.internal.LifespanImpl; + +import org.postgresql.pljava.model.ResourceOwner; + +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.DatumUtils.fetchPointer; +import static org.postgresql.pljava.pg.DatumUtils.storePointer; + +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_DATUM; + +/** + * A PostgreSQL {@code ResourceOwner}, one of the things that can serve as + * a PL/Java {@code Lifespan}. + *

    + * The designer of this PostgreSQL object believed strongly in encapsulation, + * so very strongly that there is not any C header exposing its structure, + * and any operations to be exposed here will have to be calls through JNI. + * While a {@code ResourceOwner} does have a name (which will appear in log + * messages involving it), there's not even an exposed API to retrieve that. + * So this object will be not much more than a stub, known by its address + * and capable of serving as a PL/Java lifespan. + */ +public class ResourceOwnerImpl extends LifespanImpl +implements ResourceOwner, LifespanImpl.Addressed +{ + static final ByteBuffer[] s_knownOwners; + + static final CacheMap s_map = + CacheMap.newThreadConfined(() -> ByteBuffer.allocate(SIZEOF_DATUM)); + + static + { + ByteBuffer[] bs = EarlyNatives._window(ByteBuffer.class); + /* + * The first one windows CurrentResourceOwner. Set the correct byte + * order but do not make it read-only; operations may be provided + * for setting it. + */ + bs[0] = bs[0].order(nativeOrder()); + /* + * The rest are made native-ordered and read-only. + */ + for ( int i = 1; i < bs.length; ++ i ) + bs[i] = asReadOnlyNativeOrder(bs[i]); + s_knownOwners = bs; + } + + static ResourceOwner known(int which) + { + ByteBuffer global = s_knownOwners[which]; + return doInPG(() -> + { + long rso = fetchPointer(global, 0); + if ( 0 == rso ) + return null; + + return fromAddress(rso); + }); + } + + public static ResourceOwner fromAddress(long address) + { + assert threadMayEnterPG() : m("ResourceOwner thread"); + + /* + * Cache strongly; see LifespanImpl javadoc. + */ + return s_map.stronglyCache( + b -> + { + if ( 4 == SIZEOF_DATUM ) + b.putInt((int)address); + else + b.putLong(address); + }, + b -> new ResourceOwnerImpl(b) + ); + } + + /** + * Specialized method intended, so far, only for + * {@code PgSavepoint}'s use. + *

    + * Only to be called on the PG thread. + */ + public static long getCurrentRaw() + { + assert threadMayEnterPG() : m("ResourceOwner thread"); + return fetchPointer(s_knownOwners[0], 0); + } + + /** + * Even more specialized method intended, so far, only for + * {@code PgSavepoint}'s use. + *

    + * Only to be called on the PG thread. + */ + public static void setCurrentRaw(long owner) + { + assert threadMayEnterPG() : m("ResourceOwner thread"); + storePointer(s_knownOwners[0], 0, owner); + } + + /* + * Called only from JNI. + */ + private static void callback(long nativePointer) + { + CacheMap.Entry e = s_map.find( + b -> + { + if ( 4 == SIZEOF_DATUM ) + b.putInt((int)nativePointer); + else + b.putLong(nativePointer); + } + ); + + if ( null == e ) + return; + + ResourceOwnerImpl r = e.get(); + if ( null == r ) + return; + + r.invalidate(); + e.remove(); + } + + /** + * The {@code ByteBuffer} keying this object. + *

    + * As described for {@code CatalogObjectImpl}, as we'd like to be able + * to retrieve the address, and that's what's in the ByteBuffer that is + * held as the key in the CacheMap anyway, just keep a reference to that + * here. We must treat it as read-only, even if it hasn't officially + * been made that way. + *

    + * The contents are needed only for non-routine operations like + * {@code toString}, where an extra {@code fetchPointer} doesn't + * break the bank. + */ + private final ByteBuffer m_key; + private boolean m_valid = true; + + private ResourceOwnerImpl(ByteBuffer key) + { + m_key = key; + } + + @Override // Addressed + public long address() + { + if ( m_valid ) + return fetchPointer(m_key, 0); + throw new IllegalStateException( + "address may not be taken of invalidated ResourceOwner"); + } + + @Override + public String toString() + { + return String.format("%s[%#x]", + super.toString(), fetchPointer(m_key, 0)); + } + + private void invalidate() + { + lifespanRelease(); + m_valid = false; + // nothing else to do here. + } + + private static class EarlyNatives + { + /** + * Returns an array of ByteBuffer, one covering each PostgreSQL + * known resource owner global, in the same order as the arbitrary + * indices defined in the API class CatalogObject.Factory, which are + * what will be passed to the known() method. + *

    + * Takes a {@code Class} argument, to save the native + * code a lookup. + */ + private static native ByteBuffer[] _window( + Class component); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TablespaceImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TablespaceImpl.java new file mode 100644 index 000000000..d3d2b8811 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TablespaceImpl.java @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.TABLESPACEOID; // syscache + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/** + * Implementation of the {@link Tablespace Tablespace} interface. + */ +class TablespaceImpl extends Addressed +implements + Shared, Named, Owned, + AccessControlled, Tablespace +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TABLESPACEOID; + } + + /* Implementation of Named, Owned, AccessControlled */ + + private static Simple name(TablespaceImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SPCNAME, SIMPLE_INSTANCE); + } + + private static RegRole owner(TablespaceImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SPCOWNER, REGROLE_INSTANCE); + } + + private static List grants(TablespaceImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SPCACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of Tablespace */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + TablespaceImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_OPTIONS; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(TablespaceImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(TablespaceImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent("options", SLOT_OPTIONS = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute SPCNAME; + static final Attribute SPCOWNER; + static final Attribute SPCACL; + static final Attribute SPCOPTIONS; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "spcname", + "spcowner", + "spcacl", + "spcoptions" + ).iterator(); + + SPCNAME = itr.next(); + SPCOWNER = itr.next(); + SPCACL = itr.next(); + SPCOPTIONS = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static Map options(TablespaceImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.SPCOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + + /* API methods */ + + @Override + public Map options() + { + try + { + MethodHandle h = m_slots[SLOT_OPTIONS]; + return (Map)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TargetListImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TargetListImpl.java new file mode 100644 index 000000000..092b573c6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TargetListImpl.java @@ -0,0 +1,1917 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import org.postgresql.pljava.Adapter.AdapterException; +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsBoolean; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.TargetList; +import org.postgresql.pljava.TargetList.Cursor; +import org.postgresql.pljava.TargetList.Projection; + +import + org.postgresql.pljava.internal.AbstractNoSplitList.IteratorNonSpliterator; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import java.lang.ref.WeakReference; + +import java.sql.SQLException; + +import java.util.AbstractSequentialList; +import java.util.Arrays; +import static java.util.Arrays.copyOfRange; +import java.util.BitSet; +import java.util.Collection; +import java.util.IntSummaryStatistics; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.NoSuchElementException; +import static java.util.Objects.checkFromToIndex; +import static java.util.Objects.checkIndex; +import static java.util.Objects.requireNonNull; +import java.util.Spliterator; +import static java.util.Spliterator.IMMUTABLE; +import static java.util.Spliterator.NONNULL; +import static java.util.Spliterator.ORDERED; +import static java.util.Spliterator.SIZED; +import java.util.Spliterators; +import static java.util.Spliterators.spliteratorUnknownSize; + +import java.util.function.IntUnaryOperator; + +import static java.util.stream.Collectors.joining; +import java.util.stream.IntStream; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +/** + * Implementation of {@link TargetList TargetList}. + */ + /* + * This abstract base class in fact implements neither TargetList nor + * Projection. + * + * It always holds a TupleDescriptor and a BitSet. In the concrete subclasses + * that represent a subset of the attributes with no repetition or permutation, + * the BitSet is all there is. Subclasses that need to represent repetition + * (TargetList only) or permutation (TargetList or Projection) also include + * a mapping array. + * + * Bits in the BitSet always (even in multiply-derived projections) correspond + * to indices in the original TupleDescriptor, streamlining contains() tests. + */ +abstract class TargetListImpl extends AbstractSequentialList +{ + protected static final Projection EMPTY = new P(null, new BitSet()); + + protected final TupleDescriptor m_tdesc; + protected final BitSet m_bitset; + + protected TargetListImpl(TupleDescriptor tdesc, BitSet bitset) + { + m_tdesc = tdesc; + m_bitset = bitset; // not cloned here; caller should ensure no aliasing + } + + @Override // Collection + public boolean contains(Object o) + { + if ( ! (o instanceof AttributeImpl) ) + return false; + AttributeImpl ai = (AttributeImpl)o; + return m_bitset.get(ai.subId() - 1) && m_tdesc.contains(ai); + } + + @Override // List + public int indexOf(Object o) // override in M where reordering is possible + { + if ( ! contains(o) ) + return -1; + int index = ((AttributeImpl)o).subId() - 1; + return (int)m_bitset.stream().takeWhile(i -> i < index).count(); + } + + @Override // List + public int lastIndexOf(Object o) + { + return indexOf(o); // override in MT where o could appear more than once + } + + @Override + public ListIterator listIterator() + { + return listIterator(0); + } + + @Override + public ListIterator listIterator(int index) + { + checkIndex(index, size() + 1); // ListIterator can point beyond end + int attno = m_bitset.stream().skip(index).findFirst().orElse(-1); + return new BLI(m_tdesc, m_bitset, attno, index); + } + + @Override // Collection + public int size() + { + return m_bitset.cardinality(); + } + + public R applyOver( + Iterable tuples, Cursor.Function f) + throws X, SQLException + { + return TargetListImpl.applyOver((TargetList)this, tuples, f); + } + + public R applyOver( + TupleTableSlot tuple, Cursor.Function f) + throws X, SQLException + { + return TargetListImpl.applyOver((TargetList)this, tuple, f); + } + + public Projection project(Simple... names) + { + return project(m_tdesc, (TargetList)this, m_bitset, names); + } + + public Projection project(Attribute... attrs) + { + return project(m_tdesc, (TargetList)this, m_bitset.length(), attrs); + } + + public Projection project(int... indices) + { + if ( null == indices ) + throw new NullPointerException("project() indices null"); + return project(Flavor.ZEROBASED, indices.length, i -> indices[i]); + } + + public Projection sqlProject(int... indices) + { + if ( null == indices ) + throw new NullPointerException("sqlProject() indices null"); + return project(Flavor.SQL, indices.length, i -> indices[i]); + } + + public Projection project(short... indices) + { + if ( null == indices ) + throw new NullPointerException("project() indices null"); + return project(Flavor.ZEROBASED, indices.length, i -> indices[i]); + } + + public Projection sqlProject(short... indices) + { + if ( null == indices ) + throw new NullPointerException("sqlProject() indices null"); + return project(Flavor.SQL, indices.length, i -> indices[i]); + } + + public Projection project(BitSet indices) + { + return project(Flavor.ZEROBASED, indices); + } + + public Projection sqlProject(BitSet indices) + { + return project(Flavor.SQL, indices); + } + + abstract Projection project(Flavor flavor, int n, IntUnaryOperator indices); + + abstract Projection project(Flavor flavor, BitSet indices); + + /** + * Whether, when building a {@link TargetList TargetList} + * or {@link Projection} from numeric indices, to treat the indices as + * {@link #ZEROBASED ZEROBASED} or {@link #SQL SQL} (one-based). + */ + enum Flavor + { + ZEROBASED( + 0, "project", + "project() indices must be distinct, >= 0, and < %d: %s") + { + BitSet cloned(BitSet b) + { + return (BitSet)b.clone(); + } + }, + SQL( + 1, "sqlProject", + "sqlProject() indices must be distinct, > 0, and <= %d: %s") + { + BitSet cloned(BitSet b) + { + return b.get(1, b.length()); + } + }; + + int offset; + String method; + String checkFormat; + + Flavor(int offset, String method, String checkFormat) + { + this.offset = offset; + this.method = method; + this.checkFormat = checkFormat; + } + + abstract BitSet cloned(BitSet b); + + /* + * On success, returns the max of the supplied indices, minus offset. + * Returns -1 for the empty set. + */ + int check(int size, int inLen, IntUnaryOperator indices) + { + if ( 0 == inLen ) + return -1; + + IntSummaryStatistics s = + IntStream.range(0, inLen).map(indices).distinct() + .summaryStatistics(); + int max = s.getMax(); + + if ( s.getCount() < inLen || inLen > size + || s.getMin() < offset || size + offset <= max ) + throw new IllegalArgumentException( + String.format(checkFormat, size, + IntStream.range(0, inLen).map(indices) + .mapToObj(Integer::toString) + .collect(joining(",")))); + + return max - offset; + } + + int check(int size, BitSet indices) + { + if ( null == indices ) + throw new NullPointerException(method + "() indices null"); + + if ( indices.isEmpty() ) + return -1; + + int max = indices.length() - 1; + int min = indices.nextSetBit(0); + + if ( min < offset || size + offset <= max ) + throw new IllegalArgumentException( + String.format(checkFormat, size, indices)); + + return max - offset; + } + } + + static final class P extends TargetListImpl implements Projection + { + protected P(TupleDescriptor tdesc, BitSet bitset) + { + /* + * Nothing here prevents construction of an instance with bits for + * all of tdesc's columns. The idea is never to return such a thing + * (just return tdesc itself for that case), but one may be + * constructed as a temporary within the static methods here that + * TupleDescriptor uses. + */ + super(tdesc, bitset); + } + + @Override // List + public Projection subList(int fromIndex, int toIndex) + { + int n = size(); + if ( 0 == fromIndex && n == toIndex ) + return this; + checkFromToIndex(fromIndex, toIndex, n); + if ( fromIndex == toIndex ) + return EMPTY; + + BitSet newBits = new BitSet(m_bitset.length()); + + m_bitset.stream().skip(fromIndex).limit(toIndex - fromIndex) + .forEach(newBits::set); + + return new P(m_tdesc, newBits); + } + + @Override + Projection project(Flavor flavor, int inLen, IntUnaryOperator indices) + { + final int offset = flavor.offset; + + int n = size(); + int max = flavor.check(n, inLen, indices); + if ( -1 == max ) + return EMPTY; + + boolean increasing = increasing(inLen, indices); + + if ( increasing ) // no permutation involved, make a P instance + { + if ( inLen == n ) // n distinct increasing values 0..n-1 + return this; // can only be this exactly + + BitSet newBits = new BitSet(m_bitset.length()); + + for ( + int i = 0, // index in supplied indices + j = 0, // index 1st col in this proj + v = m_bitset.nextSetBit(0)// tupledesc index of 1st col + ; + v >= 0 // nextSetBit returns -1 when done + ; + ++ j, // next col in this projection + v = m_bitset.nextSetBit(v + 1) + ) + { + if ( j < indices.applyAsInt(i)-offset )//j not a wanted col + continue; + newBits.set(v); // set tupledesc index in new set + if ( ++ i == inLen ) // next wanted index + break; + } + + return new P(m_tdesc, newBits); + } + + /* + * The indices are not strictly increasing; make MP instance with + * a map array to represent permutation. + * + * First expand this current projection's tupledesc indices + * from BitSet into array form. + */ + short[] td_indices = new short [ n ]; + + for ( + int i = 0, + v = m_bitset.nextSetBit(0) + ; + v >= 0 + ; + ++ i, + v = m_bitset.nextSetBit(++v) + ) + { + td_indices[i] = (short)v; + } + + /* + * Now construct a new BitSet and map array for an MP instance + */ + BitSet newBits = new BitSet(td_indices[max]); + short[] map = new short [ inLen ]; + for ( int i = 0; i < map.length; ++ i ) + { + newBits.set(map[i] = td_indices[indices.applyAsInt(i)-offset]); + } + + return new MP(m_tdesc, newBits, map); + } + + @Override + Projection project(Flavor flavor, BitSet indices) + { + final int offset = flavor.offset; + + int n = size(); + int max = flavor.check(n, indices); + if ( -1 == max ) + return EMPTY; + + if ( indices.cardinality() == n ) + return this; + + BitSet newBits = new BitSet(m_bitset.length()); + + for ( + int i = 0, + v = m_bitset.nextSetBit(0), + w = indices.nextSetBit(0) + ; + v >= 0 + ; + ++ i, + v = m_bitset.nextSetBit(v + 1) + ) + { + if ( i < w - offset ) + continue; + newBits.set(v); + w = indices.nextSetBit(w); + if ( w < 0 ) + break; + } + + return new P(m_tdesc, newBits); + } + } + + abstract static class M extends TargetListImpl + { + protected final short[] m_map; + + M(TupleDescriptor tdesc, BitSet bits, short[] map) + { + super(tdesc, bits); + m_map = map; + } + + @Override // Collection + public int size() + { + return m_map.length; + } + + @Override // List + public Attribute get(int index) + { + checkIndex(index, m_map.length); + return m_tdesc.get(m_map[index]); + } + + @Override // List + public int indexOf(Object o) + { + if ( ! contains(o) ) + return -1; + int index = ((AttributeImpl)o).subId() - 1; + for ( int i = 0; i < m_map.length; ++ i ) + if ( index == m_map[i] ) + return i; + + throw new AssertionError("contains vs. indexOf"); + } + + @Override + public ListIterator listIterator(int index) + { + checkIndex(index, size() + 1); + return new MLI(m_tdesc, m_map, index); + } + + @Override // List + public TargetList subList(int fromIndex, int toIndex) + { + if ( 0 == fromIndex && m_map.length == toIndex ) + return (TargetList)this; + checkFromToIndex(fromIndex, toIndex, m_map.length); + if ( fromIndex == toIndex ) + return EMPTY; + + BitSet newBits = new BitSet(m_bitset.length()); + short[] map = copyOfRange(m_map, fromIndex, toIndex); + + boolean increasing = true; + boolean duplicates = false; + + for ( short mapped : map ) + { + if ( newBits.get(mapped) ) + duplicates = true; + if ( mapped < newBits.length() - 1 ) + increasing = false; + newBits.set(mapped); + } + + if ( duplicates ) + return new MT(m_tdesc, newBits, map); + + if ( increasing ) + return new P(m_tdesc, newBits); + + return new MP(m_tdesc, newBits, map); + } + + @Override + Projection project(Flavor flavor, int inLen, IntUnaryOperator indices) + { + final int offset = flavor.offset; + + int n = size(); + int max = flavor.check(n, inLen, indices); + if ( -1 == max ) + return EMPTY; + + if ( ( inLen == n ) && increasing(inLen, indices) + && this instanceof Projection ) + return (Projection)this; + + BitSet newBits = new BitSet(m_map[max]); + short[] map = new short [ inLen ]; + + boolean increasing = true; + boolean duplicates = false; + + for ( int i = 0 ; i < inLen ; ++ i ) + { + short mapped = m_map[indices.applyAsInt(i) - offset]; + if ( newBits.get(mapped) ) + duplicates = true; + if ( mapped < newBits.length() - 1 ) + increasing = false; + newBits.set(mapped); + map[i] = mapped; + } + + if ( duplicates ) + throw new IllegalArgumentException( + flavor.method + "() result would have repeated attributes" + + " and not be a Projection"); + + if ( increasing ) + return new P(m_tdesc, newBits); + + return new MP(m_tdesc, newBits, map); + } + + @Override + Projection project(Flavor flavor, BitSet indices) + { + final int offset = flavor.offset; + + int n = size(); + int max = flavor.check(n, indices); + if ( -1 == max ) + return EMPTY; + + BitSet newBits = new BitSet(m_bitset.length()); + short[] map = new short [ indices.cardinality() ]; + + boolean increasing = true; + boolean duplicates = false; + + for ( + int i = 0, + v = indices.nextSetBit(0) + ; + v >= 0 + ; + ++i, + v = m_bitset.nextSetBit(v + 1) + ) + { + short mapped = m_map[v - offset]; + if ( mapped < newBits.length() - 1 ) + increasing = false; + if ( newBits.get(mapped) ) + duplicates = true; + newBits.set(mapped); + map[i] = mapped; + } + + if ( duplicates ) + throw new IllegalArgumentException( + flavor.method + "() result would have repeated attributes" + + " and not be a Projection"); + + if ( increasing ) + return new P(m_tdesc, newBits); + + return new MP(m_tdesc, newBits, map); + } + } + + static final class MP extends M implements Projection + { + MP(TupleDescriptor tdesc, BitSet bits, short[] map) + { + super(tdesc, bits, map); + } + + @Override // List + public Projection subList(int fromIndex, int toIndex) + { + return (Projection)super.subList(fromIndex, toIndex); + } + } + + static final class MT extends M implements TargetList + { + MT(TupleDescriptor tdesc, BitSet bits, short[] map) + { + super(tdesc, bits, map); + } + + @Override // List + public int lastIndexOf(Object o) + { + if ( ! contains(o) ) + return -1; + int index = ((AttributeImpl)o).subId() - 1; + for ( int i = m_map.length; i --> 0; ) + if ( index == m_map[i] ) + return i; + + throw new AssertionError("contains vs. lastIndexOf"); + } + } + + static boolean increasing(int nValues, IntUnaryOperator values) + { + if ( nValues < 2 ) + return true; + for ( int i = 1; i < nValues; ++ i ) + if ( values.applyAsInt(i) <= values.applyAsInt(i-1) ) + return false; + return true; + } + + static Projection subList(TupleDescriptor src, int fromIndex, int toIndex) + { + int n = src.size(); + + if ( 0 == fromIndex && n == toIndex ) + return src; + checkFromToIndex(fromIndex, toIndex, n); + if ( fromIndex == toIndex ) + return EMPTY; + BitSet newBits = new BitSet(toIndex); + newBits.set(fromIndex, toIndex); + return new P(src, newBits); + } + + static Projection project(TupleDescriptor src, int... indices) + { + if ( null == indices ) + throw new NullPointerException("project() indices null"); + return project(Flavor.ZEROBASED, src, indices.length, i -> indices[i]); + } + + static Projection sqlProject(TupleDescriptor src, int... indices) + { + if ( null == indices ) + throw new NullPointerException("sqlProject() indices null"); + return project(Flavor.SQL, src, indices.length, i -> indices[i]); + } + + static Projection project(TupleDescriptor src, short... indices) + { + if ( null == indices ) + throw new NullPointerException("project() indices null"); + return project(Flavor.ZEROBASED, src, indices.length, i -> indices[i]); + } + + static Projection sqlProject(TupleDescriptor src, short... indices) + { + if ( null == indices ) + throw new NullPointerException("sqlProject() indices null"); + return project(Flavor.SQL, src, indices.length, i -> indices[i]); + } + + static Projection project(TupleDescriptor src, Simple... names) + { + int n = src.size(); + BitSet b = new BitSet(n); + b.set(0, n); + return project(src, src, b, names); + } + + static Projection project(TupleDescriptor src, Attribute... attrs) + { + return project(src, src, src.size(), attrs); + } + + private static Projection project( + Flavor flavor, TupleDescriptor src, int inLen, IntUnaryOperator indices) + { + final int offset = flavor.offset; + + int n = src.size(); + int max = flavor.check(n, inLen, indices); + if ( -1 == max ) + return EMPTY; + + if ( ( inLen == n ) && increasing(inLen, indices) ) + return src; + + BitSet newBits = new BitSet(max); + short[] map = new short [ inLen ]; + + boolean increasing = true; + + for ( int i = 0 ; i < inLen ; ++ i ) + { + int idx = indices.applyAsInt(i) - offset; + if ( idx < newBits.length() - 1 ) + increasing = false; + newBits.set(idx); + map[i] = (short)idx; + } + + if ( increasing ) + return new P(src, newBits); + + return new MP(src, newBits, map); + } + + private static Projection project( + TupleDescriptor base, TargetList proxy, BitSet proxyHas, + Simple... names) + { + if ( requireNonNull(names, "project() names null").length == 0 ) + return EMPTY; + + /* + * An exception could be thrown here if names.length > n, but that + * condition ensures the later exception for names left unmatched + * will have to be thrown, and as long as that's going to happen + * anyway, the extra work to see just what names didn't match + * produces a more helpful message. + */ + + BitSet namesYetToMatch = new BitSet(names.length); + namesYetToMatch.set(0, names.length); + + BitSet newBits = new BitSet(proxyHas.length()); + short[] map = new short [ names.length ]; + + boolean increasing = true; + int jMax = -1; + +outer: for ( + int i = proxyHas.nextSetBit(0); + 0 <= i; + i = proxyHas.nextSetBit(i+1) + ) + { + Simple name = base.get(i).name(); + + for ( + int j = namesYetToMatch.nextSetBit(0); + 0 <= j; + j = namesYetToMatch.nextSetBit(j+1) + ) + { + if ( name.equals(names[j]) ) + { + if ( j < jMax ) + increasing = false; + else + jMax = j; + newBits.set(i); + map[j] = (short)i; + namesYetToMatch.clear(j); + if ( namesYetToMatch.isEmpty() ) + break outer; + break; + } + } + } + + if ( ! namesYetToMatch.isEmpty() ) + throw new IllegalArgumentException(String.format( + "project() left unmatched by name: %s", + Arrays.toString( + namesYetToMatch.stream().mapToObj(i->names[i]) + .toArray(Simple[]::new) + ))); + + return project(base, proxy, newBits, map, increasing); + } + + private static Projection project( + TupleDescriptor base, TargetList proxy, + int highestProxyAttrPlus1, Attribute... attrs) + { + if ( requireNonNull(attrs, "project() attrs null").length == 0 ) + return EMPTY; + + BitSet attrsYetToMatch = new BitSet(attrs.length); + attrsYetToMatch.set(0, attrs.length); + + BitSet newBits = new BitSet(highestProxyAttrPlus1); + short[] map = new short [ attrs.length ]; + + boolean increasing = true; + + for ( int i = 0 ; i < attrs.length ; ++ i ) + { + Attribute attr = attrs[i]; + if ( ! proxy.contains(attr) ) + continue; + int idx = attr.subId() - 1; + if ( newBits.get(idx) ) // it's a duplicate + continue; + if ( idx < newBits.length() - 1 ) + increasing = false; + newBits.set(idx); + map[i] = (short)idx; + attrsYetToMatch.clear(i); + } + + if ( ! attrsYetToMatch.isEmpty() ) + throw new IllegalArgumentException(String.format( + "project() extraneous attributes: %s", + Arrays.toString( + attrsYetToMatch.stream().mapToObj(i->attrs[i]) + .toArray(Attribute[]::new)))); + + return project(base, proxy, newBits, map, increasing); + } + + static Projection project(TupleDescriptor src, BitSet indices) + { + return project(Flavor.ZEROBASED, src, indices); + } + + static Projection sqlProject(TupleDescriptor src, BitSet indices) + { + return project(Flavor.SQL, src, indices); + } + + private static Projection project( + Flavor flavor, TupleDescriptor src, BitSet indices) + { + int n = src.size(); + int max = flavor.check(n, indices); + if ( -1 == max ) + return EMPTY; + + if ( indices.cardinality() == n ) + return src; + + return new P(src, flavor.cloned(indices)); + } + + /* + * A factored-out epilogue. If we have generated newBits/map representing + * n distinct attributes and n was proxy.size(), then proxy was a Projection + * to start with and may be what to return. + */ + private static Projection project( + TupleDescriptor base, TargetList proxy, + BitSet newBits, short[] map, boolean increasing) + { + if ( map.length == proxy.size() ) + { + if ( increasing ) + { + if ( proxy instanceof P || proxy instanceof TupleDescriptor ) + return (Projection)proxy; + } + else if ( proxy instanceof MP ) + if ( Arrays.equals(map, ((MP)proxy).m_map) ) + return (Projection)proxy; + } + + return increasing ? new P(base, newBits) : new MP(base, newBits, map); + } + + static R applyOver( + TargetList tl, Iterable tuples, Cursor.Function f) + throws X, SQLException + { + try + { + return f.apply(new CursorImpl(tl, tuples)); + } + catch ( AdapterException e ) + { + throw e.unwrap(SQLException.class); + } + } + + static R applyOver( + TargetList tl, TupleTableSlot tuple, Cursor.Function f) + throws X, SQLException + { + try + { + return f.apply(new CursorImpl(tl, tuple)); + } + catch ( AdapterException e ) + { + throw e.unwrap(SQLException.class); + } + } + + abstract static class ALI implements ListIterator + { + protected final TupleDescriptor m_tdesc; + /* + * Invariant on m_idx: except transiently during an operation, it + * doesn't point to the item last returned. It points where the + * *next* item will come from if fetching in the same direction. + * It is incremented/decremented after every item fetch. + * After fetching everything possible backward, it has an otherwise + * invalid value, -1. After fetching everything possible forward, it + * has an otherwise invalid value, the underlying source's length. + * These are, in fact, the values previousIndex() or nextIndex(), + * respectively, will return in those cases. + * Any forward operation that follows a previous() begins by + * incrementing this index (for real, if next(), or notionally, for + * hasNext or nextIndex); likewise, any backward operation that + * follows a next() begins by (really or notionally) decrementing + * it. + * The constructor should be called passing idx and forward so chosen + * that a call of nextIndex() will produce the caller's desired result. + * That can be accomplished either by passing the intended index itself + * and forward=true, or the intended index minus one and forward=false. + * See the BLI constructor for where both approaches can be useful for + * edge cases. + */ + protected int m_idx; + protected boolean m_forward; + + ALI(TupleDescriptor td, int idx, boolean forward) + { + m_tdesc = td; + m_idx = idx; + m_forward = forward; + } + + @Override + public boolean hasPrevious() + { + return m_idx >= (m_forward ? 1 : 0); + } + + @Override + public int nextIndex() + { + return m_forward ? m_idx : m_idx + 1; + } + + @Override + public int previousIndex() + { + return m_forward ? m_idx - 1 : m_idx; + } + + @Override + public void remove() + { + throw new UnsupportedOperationException("ListIterator.remove"); + } + + @Override + public void set(Attribute e) + { + throw new UnsupportedOperationException("ListIterator.set"); + } + + @Override + public void add(Attribute e) + { + throw new UnsupportedOperationException("ListIterator.add"); + } + } + + static class BLI extends ALI + { + private final BitSet m_bitset; + /* + * The bit index last returned by the bitset's nextSetBit or + * previousSetBit method and used to make a return value from + * next() or previous(). This is not the m_idx'th set bit, because + * m_idx is left as the index to be used next in the same direction. + * A *change* of direction will bump m_idx back into correspondence with + * this value, and the value can be reused (and then m_idx will be + * bumped again and left pointing past it in the new direction). + * BitSet's nextSetBit and previousSetBit methods can return -1 when + * no such bit exists in either direction, but none of the iterator + * options should store such a value here. They should simply leave + * the last-used value here, and adjust m_idx and m_forward so that it + * will be reused if the direction changes. + * On construction, the caller may pass -1 if listIterator(index) has + * been called with index the otherwise-invalid value equal to + * bits.length. For that case, we pass idx and forward=true to + * the superclass constructor, and initialize m_attno here to + * bits.length() - 1, so that value can be used for the first backward + * fetch. In all other cases, the super constructor gets idx - 1 and + * forward=false, so the value stored here will be used for the first + * forward fetch. The only way a -1 value can be stored here is in + * the constructor, if the bitset is empty. + */ + private int m_attno; + + BLI(TupleDescriptor td, BitSet bits, int attno, int idx) + { + super(td, -1 == attno ? idx : idx - 1, -1 == attno); + m_bitset = bits; + m_attno = -1 != attno ? attno : bits.length() - 1; + } + + @Override + public boolean hasNext() + { + if ( -1 == m_attno ) + return false; + if ( m_forward ) + return -1 != m_bitset.nextSetBit(m_attno + 1); + /* + * Existing direction is backward, so next() would be a direction + * change, and the valid value in m_attno is what it would use. + */ + return true; + } + + @Override + public Attribute next() + { + int attno = m_attno; + if ( ! m_forward ) + { + m_forward = true; + ++ m_idx; + } + else if ( -1 != attno ) + { + attno = m_bitset.nextSetBit(attno + 1); + if ( -1 != attno ) + m_attno = attno; + } + + if ( -1 == attno ) + throw new NoSuchElementException(); + + ++ m_idx; + return m_tdesc.get(attno); + } + + @Override + public Attribute previous() + { + int attno = m_attno; + if ( m_forward ) + { + m_forward = false; + -- m_idx; + } + else if ( -1 != attno ) + { + attno = m_bitset.previousSetBit(attno - 1); + if ( -1 != attno ) + m_attno = attno; + } + + if ( -1 == attno ) + throw new NoSuchElementException(); + + -- m_idx; + return m_tdesc.get(attno); + } + } + + static class MLI extends ALI + { + private final short[] m_map; + + MLI(TupleDescriptor td, short[] map, int idx) + { + super(td, idx, true); + m_map = map; + } + + @Override + public boolean hasNext() + { + return m_map.length > (m_forward ? m_idx : m_idx + 1); + } + + @Override + public Attribute next() + { + if ( ! m_forward ) + { + m_forward = true; + ++ m_idx; + } + + if ( m_idx > m_map.length - 1 ) + throw new NoSuchElementException(); + + return m_tdesc.get(m_map[m_idx ++]); + } + + @Override + public Attribute previous() + { + if ( m_forward ) + { + m_forward = false; + -- m_idx; + } + + if ( m_idx < 0 ) + throw new NoSuchElementException(); + + return m_tdesc.get(m_map[m_idx --]); + } + } + + /** + * Implementation of {@link TargetList.Cursor TargetList.Cursor}. + */ + static class CursorImpl implements TargetList.Cursor, AutoCloseable + { + private final TargetList m_tlist; + private final int m_targets; + private Iterable m_slots; + private TupleTableSlot m_currentSlot; + private int m_currentTarget; + private int m_nestLevel; + private WeakReference m_activeIterator; + + CursorImpl(TargetList tlist, Iterable slots) + { + m_tlist = tlist; + m_targets = tlist.size(); + m_slots = requireNonNull(slots, "applyOver() tuples null"); + } + + CursorImpl(TargetList tlist, TupleTableSlot slot) + { + m_tlist = tlist; + m_targets = tlist.size(); + m_currentSlot = requireNonNull(slot, "applyOver() tuple null"); + } + + @Override // Iterable + public Iterator iterator() + { + if ( 0 < m_nestLevel ) + throw new IllegalStateException( + "Cursor.iterator() called within a curried CursorFunction"); + + /* + * Only one Iterator should be active at a time. There is nothing in + * Iterator's API to indicate when one is no longer active (its user + * might just stop iterating it), so just keep track of whether an + * earlier-created one is still around and, if so, sabotage it. + */ + WeakReference iRef = m_activeIterator; + if ( null != iRef ) + { + Itr i = iRef.get(); + if ( null != i ) + { + i.slot_iter = new Iterator() + { + @Override + public boolean hasNext() + { + throw new IllegalStateException( + "another iterator for this Cursor has been " + + "started"); + } + @Override + public TupleTableSlot next() + { + hasNext(); + return null; + } + }; + } + } + + if ( null == m_slots ) + { + m_slots = List.of(m_currentSlot); + m_currentSlot = null; + } + + Itr i = new Itr(); + m_activeIterator = new WeakReference<>(i); + return i; + } + + @Override // Cursor + public Stream stream() + { + Iterator itr = iterator(); + Spliterator spl; + int chr = IMMUTABLE | NONNULL | ORDERED; + long est = Long.MAX_VALUE; + + if ( m_slots instanceof Collection ) + { + est = ((Collection)m_slots).size(); + chr |= SIZED; + } + + spl = new IteratorNonSpliterator<>(itr, est, chr); + + return StreamSupport.stream(spl, false); + } + + class Itr implements Iterator + { + private Iterator slot_iter = m_slots.iterator(); + + @Override + public boolean hasNext() + { + return slot_iter.hasNext(); + } + + @Override + public Cursor next() + { + m_currentSlot = slot_iter.next(); + m_currentTarget = 0; + return CursorImpl.this; + } + } + + @Override // Iterator + public boolean hasNext() + { + return m_currentTarget < m_targets; + } + + @Override // Iterator + public Attribute next() + { + if ( m_currentTarget < m_targets ) + return m_tlist.get(m_currentTarget++); + + throw new NoSuchElementException( + "fewer Attributes in TargetList than parameters to assign"); + } + + private CursorImpl nest() + { + ++ m_nestLevel; + return this; + } + + @Override // AutoCloseable + public void close() + { + if ( 0 == -- m_nestLevel ) + m_currentTarget = 0; + } + + @Override + public R apply( + L0 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + return f.apply(); + } + } + + @Override + public R apply( + As a0, + L1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + As a0, As a1, + L2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + As a0, As a1, As a2, + L3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + L4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, + L5 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + return f.apply(v0, v1, v2, v3, v4); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, + L6 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + F v5 = m_currentSlot.get(next(), a5); + return f.apply(v0, v1, v2, v3, v4, v5); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, + L7 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + F v5 = m_currentSlot.get(next(), a5); + G v6 = m_currentSlot.get(next(), a6); + return f.apply(v0, v1, v2, v3, v4, v5, v6); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, As a7, + L8 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + F v5 = m_currentSlot.get(next(), a5); + G v6 = m_currentSlot.get(next(), a6); + H v7 = m_currentSlot.get(next(), a7); + return f.apply(v0, v1, v2, v3, v4, v5, v6, v7); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, As a7, + As a8, As a9, As aa, As ab, + As ac, As ad, As ae, As af, + L16 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + F v5 = m_currentSlot.get(next(), a5); + G v6 = m_currentSlot.get(next(), a6); + H v7 = m_currentSlot.get(next(), a7); + I v8 = m_currentSlot.get(next(), a8); + J v9 = m_currentSlot.get(next(), a9); + K va = m_currentSlot.get(next(), aa); + L vb = m_currentSlot.get(next(), ab); + M vc = m_currentSlot.get(next(), ac); + N vd = m_currentSlot.get(next(), ad); + O ve = m_currentSlot.get(next(), ae); + P vf = m_currentSlot.get(next(), af); + return f.apply( + v0, v1, v2, v3, v4, v5, v6, v7, + v8, v9, va, vb, vc, vd, ve, vf); + } + } + + @Override + public R apply( + AsLong a0, + J1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + long v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsLong a0, AsLong a1, + J2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + long v0 = m_currentSlot.get(next(), a0); + long v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsLong a0, AsLong a1, AsLong a2, + J3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + long v0 = m_currentSlot.get(next(), a0); + long v1 = m_currentSlot.get(next(), a1); + long v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsLong a0, AsLong a1, AsLong a2, AsLong a3, + J4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + long v0 = m_currentSlot.get(next(), a0); + long v1 = m_currentSlot.get(next(), a1); + long v2 = m_currentSlot.get(next(), a2); + long v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsDouble a0, + D1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + double v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsDouble a0, AsDouble a1, + D2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + double v0 = m_currentSlot.get(next(), a0); + double v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsDouble a0, AsDouble a1, AsDouble a2, + D3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + double v0 = m_currentSlot.get(next(), a0); + double v1 = m_currentSlot.get(next(), a1); + double v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsDouble a0, AsDouble a1, AsDouble a2, AsDouble a3, + D4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + double v0 = m_currentSlot.get(next(), a0); + double v1 = m_currentSlot.get(next(), a1); + double v2 = m_currentSlot.get(next(), a2); + double v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsInt a0, + I1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + int v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsInt a0, AsInt a1, + I2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + int v0 = m_currentSlot.get(next(), a0); + int v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsInt a0, AsInt a1, AsInt a2, + I3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + int v0 = m_currentSlot.get(next(), a0); + int v1 = m_currentSlot.get(next(), a1); + int v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsInt a0, AsInt a1, AsInt a2, AsInt a3, + I4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + int v0 = m_currentSlot.get(next(), a0); + int v1 = m_currentSlot.get(next(), a1); + int v2 = m_currentSlot.get(next(), a2); + int v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsFloat a0, + F1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + float v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsFloat a0, AsFloat a1, + F2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + float v0 = m_currentSlot.get(next(), a0); + float v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsFloat a0, AsFloat a1, AsFloat a2, + F3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + float v0 = m_currentSlot.get(next(), a0); + float v1 = m_currentSlot.get(next(), a1); + float v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsFloat a0, AsFloat a1, AsFloat a2, AsFloat a3, + F4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + float v0 = m_currentSlot.get(next(), a0); + float v1 = m_currentSlot.get(next(), a1); + float v2 = m_currentSlot.get(next(), a2); + float v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsShort a0, + S1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + short v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsShort a0, AsShort a1, + S2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + short v0 = m_currentSlot.get(next(), a0); + short v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsShort a0, AsShort a1, AsShort a2, + S3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + short v0 = m_currentSlot.get(next(), a0); + short v1 = m_currentSlot.get(next(), a1); + short v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsShort a0, AsShort a1, AsShort a2, AsShort a3, + S4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + short v0 = m_currentSlot.get(next(), a0); + short v1 = m_currentSlot.get(next(), a1); + short v2 = m_currentSlot.get(next(), a2); + short v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsChar a0, + C1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + char v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsChar a0, AsChar a1, + C2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + char v0 = m_currentSlot.get(next(), a0); + char v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsChar a0, AsChar a1, AsChar a2, + C3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + char v0 = m_currentSlot.get(next(), a0); + char v1 = m_currentSlot.get(next(), a1); + char v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsChar a0, AsChar a1, AsChar a2, AsChar a3, + C4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + char v0 = m_currentSlot.get(next(), a0); + char v1 = m_currentSlot.get(next(), a1); + char v2 = m_currentSlot.get(next(), a2); + char v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsByte a0, + B1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + byte v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsByte a0, AsByte a1, + B2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + byte v0 = m_currentSlot.get(next(), a0); + byte v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsByte a0, AsByte a1, AsByte a2, + B3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + byte v0 = m_currentSlot.get(next(), a0); + byte v1 = m_currentSlot.get(next(), a1); + byte v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsByte a0, AsByte a1, AsByte a2, AsByte a3, + B4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + byte v0 = m_currentSlot.get(next(), a0); + byte v1 = m_currentSlot.get(next(), a1); + byte v2 = m_currentSlot.get(next(), a2); + byte v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsBoolean a0, + Z1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + boolean v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsBoolean a0, AsBoolean a1, + Z2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + boolean v0 = m_currentSlot.get(next(), a0); + boolean v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsBoolean a0, AsBoolean a1, AsBoolean a2, + Z3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + boolean v0 = m_currentSlot.get(next(), a0); + boolean v1 = m_currentSlot.get(next(), a1); + boolean v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsBoolean a0, AsBoolean a1, AsBoolean a2, AsBoolean a3, + Z4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + boolean v0 = m_currentSlot.get(next(), a0); + boolean v1 = m_currentSlot.get(next(), a1); + boolean v2 = m_currentSlot.get(next(), a2); + boolean v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TransformImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TransformImpl.java new file mode 100644 index 000000000..4103e1f0a --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TransformImpl.java @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import java.util.concurrent.CopyOnWriteArraySet; + +import java.util.function.Function; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.CatalogObjectImpl.Factory.TRFOID_CB; +import static org.postgresql.pljava.pg.ModelConstants.TRFOID; // syscache +import static org.postgresql.pljava.pg.ModelConstants.TRFTYPELANG; // syscache +import static org.postgresql.pljava.pg.TupleTableSlotImpl.heapTupleGetLightSlot; +import org.postgresql.pljava.pg.RegProcedureImpl.SupportMemo; + +import static org.postgresql.pljava.pg.adt.OidAdapter.PLANG_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.TRANSFORM_INSTANCE; + +/** + * Implementation of the {@link Transform Transform} interface. + */ +class TransformImpl extends Addressed +implements Nonshared, Transform +{ + private static final Function s_initializer; + + /** + * Count of instances subject to invalidation. + *

    + * Only accessed in invalidate and SP.onFirstUse, both on the PG thread. + */ + private static int s_instances; + + private static class SP extends SwitchPoint + { + @Override + protected void onFirstUse() + { + if ( 1 == ++ s_instances ) + sysCacheInvalArmed(TRFOID_CB, true); + } + } + + private final SwitchPoint[] m_sp; + + /** + * Looks up a single {@code Transform} given a type and procedural language. + *

    + * Only to be called "on the PG thread". + * @return a {@code Transform} if found, otherwise null. + */ + static Transform fromTypeLang(RegType type, ProceduralLanguage lang) + { + assert threadMayEnterPG() : "Transform.fromTypeLang thread"; + + /* + * All we need here is the transform's oid, which a custom native + * method could obtain more cheaply without copying the tuple, but + * _searchSysCacheCopy2 can do the job without adding yet another JNI + * method. We will allocate in the current context, assumed to be + * short-lived, context and use heapTupleGetLightSlot(..., false) to let + * the context take care of cleanup, as no reference to this slot will + * escape this call. + */ + ByteBuffer heapTuple = + _searchSysCacheCopy2(TRFTYPELANG, type.oid(), lang.oid()); + if ( null == heapTuple ) + return null; + + TupleDescImpl td = (TupleDescImpl)CLASSID.tupleDescriptor(); + TupleTableSlot s = heapTupleGetLightSlot(td, heapTuple, null, false); + return s.get(Att.OID, TRANSFORM_INSTANCE); + } + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TRFOID; + } + + /* Implementation of Transform */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + TransformImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_sp = new SwitchPoint[] { new SP() }; + } + + @Override + void invalidate(List sps, List postOps) + { + SwitchPoint sp = m_sp[0]; + if ( sp.unused() ) + return; + sps.add(sp); + m_sp[0] = new SP(); + if ( 0 == -- s_instances ) + sysCacheInvalArmed(TRFOID_CB, false); + + boolean languageCached = m_languageCached; + m_languageCached = false; + if ( languageCached ) + ((ProceduralLanguageImpl)language()).removeKnownTransform(this); + + Iterator> itr = m_dependentRoutines.iterator(); + m_dependentRoutines.clear(); // CopyOnWriteArraySet iterator still good + itr.forEachRemaining(p -> p.invalidate(sps, postOps)); + + FromSQLMemo.removeDependent(fromSQL(), this); + ToSQLMemo.removeDependent(toSQL(), this); + } + + static final int SLOT_TYPE; + static final int SLOT_LANG; + static final int SLOT_FROMSQL; + static final int SLOT_TOSQL; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(TransformImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> o.m_sp[0]) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(TransformImpl.class.getDeclaredMethods()) + .withReceiverType(null) + .withDependent( "type", SLOT_TYPE = i++) + .withDependent("language", SLOT_LANG = i++) + .withDependent( "fromSQL", SLOT_FROMSQL = i++) + .withDependent( "toSQL", SLOT_TOSQL = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute OID; // used in fromTypeLang() above + static final Attribute TRFTYPE; + static final Attribute TRFLANG; + static final Attribute TRFFROMSQL; + static final Attribute TRFTOSQL; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "oid", + "trftype", + "trflang", + "trffromsql", + "trftosql" + ).iterator(); + + OID = itr.next(); + TRFTYPE = itr.next(); + TRFLANG = itr.next(); + TRFFROMSQL = itr.next(); + TRFTOSQL = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* mutable non-API data used only on the PG thread */ + + private final Set> + m_dependentRoutines = new CopyOnWriteArraySet<>(); + + private boolean m_languageCached = false; // needed in invalidate + + static void addDependentRoutine(RegProcedureImpl p, List ts) + { + for ( Transform t : ts ) + ((TransformImpl)t).m_dependentRoutines.add(p); + } + + static void removeDependentRoutine(RegProcedureImpl p,List ts) + { + for ( Transform t : ts ) + ((TransformImpl)t).m_dependentRoutines.remove(p); + } + + /* computation methods */ + + private static RegType type(TransformImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.TRFTYPE, REGTYPE_INSTANCE); + } + + private static ProceduralLanguage language(TransformImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + o.m_languageCached = true; + return s.get(Att.TRFLANG, PLANG_INSTANCE); + } + + private static RegProcedure fromSQL(TransformImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") + RegProcedure p = + (RegProcedure)s.get(Att.TRFFROMSQL, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure toSQL(TransformImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") + RegProcedure p = + (RegProcedure)s.get(Att.TRFTOSQL, REGPROCEDURE_INSTANCE); + return p; + } + + /* API methods */ + + @Override + public RegType type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public ProceduralLanguage language() + { + try + { + MethodHandle h = m_slots[SLOT_LANG]; + return (ProceduralLanguage)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure fromSQL() + { + try + { + MethodHandle h = m_slots[SLOT_FROMSQL]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure toSQL() + { + try + { + MethodHandle h = m_slots[SLOT_TOSQL]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link #fromSQL() fromSQL} function. + */ + static class FromSQLMemo + extends SupportMemo implements FromSQL + { + private FromSQLMemo( + RegProcedure carrier, Transform dep) + { + super(carrier, (TransformImpl)dep); + } + + static void addDependent( + RegProcedure proc, Transform dep) + { + SupportMemo.add(proc, (TransformImpl)dep, FromSQLMemo.class, + () -> new FromSQLMemo(proc, dep)); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link #toSQL() toSQL} function. + */ + static class ToSQLMemo + extends SupportMemo implements ToSQL + { + private ToSQLMemo( + RegProcedure carrier, Transform dep) + { + super(carrier, (TransformImpl)dep); + } + + static void addDependent( + RegProcedure proc, Transform dep) + { + SupportMemo.add(proc, (TransformImpl)dep, ToSQLMemo.class, + () -> new ToSQLMemo(proc, dep)); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TriggerImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TriggerImpl.java new file mode 100644 index 000000000..4214327f6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TriggerImpl.java @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.annotation.Native; + +import java.nio.ByteBuffer; +import java.nio.ShortBuffer; + +import java.nio.charset.CharacterCodingException; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import static java.util.Collections.unmodifiableSet; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.annotation.Trigger.Called; +import org.postgresql.pljava.annotation.Trigger.Event; +import org.postgresql.pljava.annotation.Trigger.Scope; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.Checked; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.jdbc.SQLXMLImpl; + +import org.postgresql.pljava.model.*; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; + +import static + org.postgresql.pljava.pg.CatalogObjectImpl.Addressed._sysTableGetByOid; + +import static org.postgresql.pljava.pg.DatumUtils.asAlwaysCopiedDatum; +import static org.postgresql.pljava.pg.DatumUtils.fetchPointer; +import static org.postgresql.pljava.pg.DatumUtils.mapCString; +import static org.postgresql.pljava.pg.DatumUtils.mapFixedLength; + +import org.postgresql.pljava.pg.LookupImpl.CallImpl.TriggerDataImpl; + +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_DATUM; +import static org.postgresql.pljava.pg.ModelConstants.Anum_pg_trigger_oid; +import static org.postgresql.pljava.pg.ModelConstants.TriggerOidIndexId; + +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgname; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgfoid; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgtype; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgenabled; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgisinternal; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgisclone; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgconstrrelid; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgconstrindid; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgconstraint; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgdeferrable; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tginitdeferred; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgnargs; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgnattr; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgattr; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgargs; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgqual; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgoldtable; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgnewtable; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Implements {@code Trigger}. + *

    + * This implementation, at least at first, will have an unusual limitation: + * its accessor methods (other than those of + * {@link CatalogObject.Addressed Addressed}) may only work + * when called by a trigger function or its language handler within the scope + * of the function's preparation and execution. Some may be unimplemented even + * then, as noted in the documentation of the methods themselves. + *

    + * That spares it from having to deal with getting the content from + * {@code pg_trigger}, or cache lifetime, or invalidation; it can operate from + * the copy PostgreSQL supplies for the trigger function call, during the scope + * of the call. + *

    + * At least for now, then, it simply extends {@code CatalogObjectImpl} directly + * rather than {@code CatalogObjectImpl.Addressed}, needing none of the caching + * machinery in the latter. + */ +class TriggerImpl extends CatalogObjectImpl +implements Nonshared, Trigger +{ + @Native private static final char TRIGGER_FIRES_ON_ORIGIN = 'O'; + @Native private static final char TRIGGER_FIRES_ALWAYS = 'A'; + @Native private static final char TRIGGER_FIRES_ON_REPLICA = 'R'; + @Native private static final char TRIGGER_DISABLED = 'D'; + + @Native private static final int TRIGGER_TYPE_ROW = 1 << 0; + @Native private static final int TRIGGER_TYPE_BEFORE = 1 << 1; + @Native private static final int TRIGGER_TYPE_INSERT = 1 << 2; + @Native private static final int TRIGGER_TYPE_DELETE = 1 << 3; + @Native private static final int TRIGGER_TYPE_UPDATE = 1 << 4; + @Native private static final int TRIGGER_TYPE_TRUNCATE = 1 << 5; + @Native private static final int TRIGGER_TYPE_INSTEAD = 1 << 6; + + @Native private static final int TRIGGER_TYPE_LEVEL_MASK = TRIGGER_TYPE_ROW; + @Native private static final int TRIGGER_TYPE_STATEMENT = 0; + + @Native private static final int TRIGGER_TYPE_TIMING_MASK = + TRIGGER_TYPE_BEFORE | TRIGGER_TYPE_INSTEAD; + @Native private static final int TRIGGER_TYPE_AFTER = 0; + + @Native private static final int TRIGGER_TYPE_EVENT_MASK = + TRIGGER_TYPE_INSERT | TRIGGER_TYPE_DELETE | + TRIGGER_TYPE_UPDATE | TRIGGER_TYPE_TRUNCATE; + + /* + * By inspection of the above, event bits are contiguous and can be shifted + * right by this amount to make a zero-based index of power sets, which is + * relied on below; if that changes, fix whatever needs fixing. + */ + private static final int EVENT_SHIFT = 2; + + private static Set indexToSet(int index) + { + int type = index << EVENT_SHIFT; + EnumSet s = EnumSet.noneOf(Event.class); + if ( 0 != (type & TRIGGER_TYPE_INSERT) ) + s.add(Event.INSERT); + if ( 0 != (type & TRIGGER_TYPE_DELETE) ) + s.add(Event.DELETE); + if ( 0 != (type & TRIGGER_TYPE_UPDATE) ) + s.add(Event.UPDATE); + if ( 0 != (type & TRIGGER_TYPE_TRUNCATE) ) + s.add(Event.TRUNCATE); + return unmodifiableSet(s); + } + + private static final List> EVENT_SETS = List.of( + indexToSet( 0), indexToSet( 1), indexToSet( 2), indexToSet( 3), + indexToSet( 4), indexToSet( 5), indexToSet( 6), indexToSet( 7), + indexToSet( 8), indexToSet( 9), indexToSet(10), indexToSet(11), + indexToSet(12), indexToSet(13), indexToSet(14), indexToSet(15)); + + private static Set typeToSet(int type) + { + type &= TRIGGER_TYPE_EVENT_MASK; + return EVENT_SETS.get(type >>> EVENT_SHIFT); + } + + static + { + assert + typeToSet(TRIGGER_TYPE_INSERT).equals(EnumSet.of(Event.INSERT)) && + typeToSet(TRIGGER_TYPE_DELETE).equals(EnumSet.of(Event.DELETE)) && + typeToSet(TRIGGER_TYPE_UPDATE).equals(EnumSet.of(Event.UPDATE)) && + typeToSet(TRIGGER_TYPE_TRUNCATE).equals(EnumSet.of(Event.TRUNCATE)) + : "Trigger.events representation has changed"; + } + + private TriggerDataImpl m_td; + private ByteBuffer m_bb; + + /** + * Executes work in a scope during which this instance is + * associated with the supplied {@link TriggerDataImpl TriggerDataImpl} + * instance and returns any result. + *

    + * Used by the dispatcher in a somewhat incestuous arrangement further + * described at {@link TriggerDataImpl#m_trigger}. + */ + + T withTriggerData(TriggerDataImpl td, Checked.Supplier work) + throws E + { + final Object[] save = {null, null}; + try + { + doInPG(() -> + { + save[0] = m_td; + save[1] = m_bb; + m_td = td; + m_bb = td.m_trigger; + }); + return work.get(); + } + finally + { + doInPG(() -> + { + m_td.m_trigger = m_bb = (ByteBuffer)save[1]; + m_td = (TriggerDataImpl)save[0]; + }); + } + } + + /* API methods of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + /* + * The API javadoc does say the methods of Addressed will work even outside + * of the trigger-call context, and this is one of those, so give it a + * simple if nonoptimal implementation doing an index lookup to cover that. + */ + @Override + public boolean exists() + { + if ( null != m_bb ) + return true; + + ByteBuffer heapTuple; + TupleDescImpl td = (TupleDescImpl)CLASSID.tupleDescriptor(); + + try + { + return doInPG(() -> + null != _sysTableGetByOid( + CLASSID.oid(), oid(), Anum_pg_trigger_oid, + TriggerOidIndexId, td.address()) + ); + } + catch ( SQLException e ) + { + throw unchecked(e); + } + } + + /* API method of Named */ + + @Override + public Simple name() + { + if ( null == m_bb ) + throw notyet(); + + try + { + long p = fetchPointer(m_bb, OFFSET_TRG_tgname); + ByteBuffer b = mapCString(p); + return Simple.fromCatalog(SERVER_ENCODING.decode(b).toString()); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError(e); + } + } + + /* API methods */ + + @Override + public RegClass relation() + { + if ( null == m_td ) + throw notyet(); + + return m_td.relation(); + } + + @Override + public Trigger parent() + { + throw notyet(); + } + + @Override + public RegProcedure function() + { + if ( null == m_bb ) + throw notyet(); + + int oid = m_bb.getInt(OFFSET_TRG_tgfoid); + + @SuppressWarnings("unchecked") + RegProcedure f = + (RegProcedure)of(RegProcedure.CLASSID, oid); + + return f; + } + + @Override + public Called called() + { + if ( null == m_bb ) + throw notyet(); + + int type = Short.toUnsignedInt(m_bb.getShort(OFFSET_TRG_tgtype)); + type &= TRIGGER_TYPE_TIMING_MASK; + switch ( type ) + { + case TRIGGER_TYPE_BEFORE : return Called.BEFORE; + case TRIGGER_TYPE_AFTER : return Called.AFTER; + case TRIGGER_TYPE_INSTEAD: return Called.INSTEAD_OF; + default: + throw new AssertionError("Trigger.called enum"); + } + } + + @Override + public Set events() + { + if ( null == m_bb ) + throw notyet(); + + int type = Short.toUnsignedInt(m_bb.getShort(OFFSET_TRG_tgtype)); + return typeToSet(type); + } + + @Override + public Scope scope() + { + if ( null == m_bb ) + throw notyet(); + + int type = Short.toUnsignedInt(m_bb.getShort(OFFSET_TRG_tgtype)); + type &= TRIGGER_TYPE_LEVEL_MASK; + switch ( type ) + { + case TRIGGER_TYPE_ROW : return Scope.ROW; + case TRIGGER_TYPE_STATEMENT: return Scope.STATEMENT; + default: + throw new AssertionError("Trigger.scope enum"); + } + } + + @Override + public ReplicationRole enabled() + { + if ( null == m_bb ) + throw notyet(); + + char c = (char)(0xff & m_bb.get(OFFSET_TRG_tgenabled)); + + switch ( c ) + { + case TRIGGER_FIRES_ON_ORIGIN : return ReplicationRole.ON_ORIGIN; + case TRIGGER_FIRES_ALWAYS : return ReplicationRole.ALWAYS; + case TRIGGER_FIRES_ON_REPLICA : return ReplicationRole.ON_REPLICA; + case TRIGGER_DISABLED : return ReplicationRole.DISABLED; + default: + throw new AssertionError("Trigger.enabled enum"); + } + } + + @Override + public boolean internal() + { + if ( null == m_bb ) + throw notyet(); + + return 0 != m_bb.get(OFFSET_TRG_tgisinternal); + } + + @Override + public RegClass constraintRelation() + { + if ( null == m_bb ) + throw notyet(); + + int oid = m_bb.getInt(OFFSET_TRG_tgconstrrelid); + return InvalidOid == oid ? null : of(RegClass.CLASSID, oid); + } + + @Override + public RegClass constraintIndex() + { + if ( null == m_bb ) + throw notyet(); + + int oid = m_bb.getInt(OFFSET_TRG_tgconstrindid); + return InvalidOid == oid ? null : of(RegClass.CLASSID, oid); + } + + @Override + public Constraint constraint() + { + if ( null == m_bb ) + throw notyet(); + + int oid = m_bb.getInt(OFFSET_TRG_tgconstraint); + return InvalidOid == oid ? null : of(Constraint.CLASSID, oid); + } + + @Override + public boolean deferrable() + { + if ( null == m_bb ) + throw notyet(); + + return 0 != m_bb.get(OFFSET_TRG_tgdeferrable); + } + + @Override + public boolean initiallyDeferred() + { + if ( null == m_bb ) + throw notyet(); + + return 0 != m_bb.get(OFFSET_TRG_tginitdeferred); + } + + @Override + public Projection columns() + { + if ( null == m_bb ) + throw notyet(); + + int nattr = Short.toUnsignedInt(m_bb.get(OFFSET_TRG_tgnattr)); + + if ( 0 == nattr ) + return null; + + long attvp = fetchPointer(m_bb, OFFSET_TRG_tgattr); + ByteBuffer attvb = mapFixedLength(attvp, nattr * Short.BYTES); + ShortBuffer sb = attvb.asShortBuffer(); + short[] attnums = new short [ nattr ]; + sb.get(attnums); + return relation().tupleDescriptor().sqlProject(attnums); + } + + @Override + public List arguments() + { + if ( null == m_bb ) + throw notyet(); + + int nargs = Short.toUnsignedInt(m_bb.get(OFFSET_TRG_tgnargs)); + + if ( 0 == nargs ) + return List.of(); + + long argvp = fetchPointer(m_bb, OFFSET_TRG_tgargs); + ByteBuffer argvb = mapFixedLength(argvp, nargs * SIZEOF_DATUM); + String[] argv = new String[nargs]; + for ( int i = 0 ; i < nargs ; ++ i ) + { + long p = fetchPointer(argvb, i * SIZEOF_DATUM); + ByteBuffer b = mapCString(p); + try + { + argv[i] = SERVER_ENCODING.decode(b).toString(); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError(e); + } + } + return List.of(argv); + } + + @Override + public SQLXML when() + { + if ( null == m_bb ) + throw notyet(); + + long p = fetchPointer(m_bb, OFFSET_TRG_tgqual); + + if ( 0 == p ) + return null; + + ByteBuffer bb = mapCString(p); + + Datum.Input in = asAlwaysCopiedDatum(bb, 0, bb.limit()); + + try + { + return SQLXMLImpl.newReadable(in, RegType.PG_NODE_TREE, true); + } + catch ( SQLException e ) + { + throw unchecked(e); + } + } + + @Override + public Simple tableOld() + { + if ( null == m_bb ) + throw notyet(); + + long p = fetchPointer(m_bb, OFFSET_TRG_tgoldtable); + + if ( 0 == p ) + return null; + + ByteBuffer b = mapCString(p); + + try + { + return Simple.fromCatalog(SERVER_ENCODING.decode(b).toString()); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError(e); + } + } + + @Override + public Simple tableNew() + { + if ( null == m_bb ) + throw notyet(); + + long p = fetchPointer(m_bb, OFFSET_TRG_tgnewtable); + + if ( 0 == p ) + return null; + + ByteBuffer b = mapCString(p); + + try + { + return Simple.fromCatalog(SERVER_ENCODING.decode(b).toString()); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError(e); + } + } + + @Override + public boolean isClone() + { + if ( null == m_bb ) + throw notyet(); + + return 0 != m_bb.get(OFFSET_TRG_tgisclone); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TupleDescImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TupleDescImpl.java new file mode 100644 index 000000000..dcc8211d7 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TupleDescImpl.java @@ -0,0 +1,854 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import org.postgresql.pljava.model.*; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; +import static org.postgresql.pljava.model.RegType.RECORD; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.DualState; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.pg.TargetListImpl; +import static org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.*; +import static org.postgresql.pljava.pg.DatumUtils.addressOf; +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.constant; +import static java.lang.invoke.MethodHandles.lookup; +import static java.lang.invoke.MethodType.methodType; + +import static java.lang.Math.ceil; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CoderResult; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; + +import java.util.AbstractList; +import static java.util.Arrays.fill; +import java.util.BitSet; +import java.util.List; +import java.util.Map; + +import java.util.concurrent.ConcurrentHashMap; + +import java.util.function.BiFunction; +import java.util.function.IntFunction; +import java.util.function.IntSupplier; +import java.util.function.ToIntBiFunction; + +/** + * Implementation of {@link TupleDescriptor TupleDescriptor}. + *

    + * A {@link Cataloged Cataloged} descriptor corresponds to a known composite + * type declared in the PostgreSQL catalogs; its {@link #rowType rowType} method + * returns that type. A {@link Blessed Blessed} descriptor has been constructed + * on the fly and then interned in the type cache, such that the type + * {@code RECORD} and its type modifier value will identify it uniquely for + * the life of the backend; {@code rowType} will return the corresponding + * {@link RegTypeImpl.Blessed} instance. An {@link Ephemeral Ephemeral} + * descriptor has been constructed ad hoc and not interned; {@code rowType} will + * return {@link RegType#RECORD RECORD} itself, which isn't a useful identifier + * (many such ephemeral descriptors, all different, could exist at once). + * An ephemeral descriptor is only useful as long as a reference to it is held. + *

    + * A {@code Cataloged} descriptor can be obtained from the PG {@code relcache} + * or the {@code typcache}, should respond to cache invalidation for + * the corresponding relation, and is reference-counted, so the count should be + * incremented when cached here, and decremented/released if this instance + * goes unreachable from Java. + *

    + * A {@code Blessed} descriptor can be obtained from the PG {@code typcache} + * by {@code lookup_rowtype_tupdesc}. No invalidation logic is needed, as it + * will persist, and its identifying typmod will remain unique, for the life of + * the backend. It may or may not be reference-counted. + *

    + * An {@code Ephemeral} tuple descriptor may need to be copied out of + * a short-lived memory context where it is found, either into a longer-lived + * context (and invalidated when that context is), or onto the Java heap and + * used until GC'd. + */ +abstract class TupleDescImpl extends AbstractList +implements TupleDescriptor +{ + private final MethodHandle m_tdH; + private final Attribute[] m_attrs; + private final State m_state; + + /* + * Implementation of Projection + */ + + @Override // Projection + public Projection subList(int fromIndex, int toIndex) + { + return TargetListImpl.subList(this, fromIndex, toIndex); + } + + @Override // Projection + public Projection project(Simple... names) + { + return TargetListImpl.project(this, names); + } + + @Override // Projection + public Projection project(int... indices) + { + return TargetListImpl.project(this, indices); + } + + @Override // Projection + public Projection sqlProject(int... indices) + { + return TargetListImpl.sqlProject(this, indices); + } + + @Override // Projection + public Projection project(short... indices) + { + return TargetListImpl.project(this, indices); + } + + @Override // Projection + public Projection sqlProject(short... indices) + { + return TargetListImpl.sqlProject(this, indices); + } + + @Override // Projection + public Projection project(Attribute... attrs) + { + return TargetListImpl.project(this, attrs); + } + + @Override // Projection + public Projection project(BitSet indices) + { + return TargetListImpl.project(this, indices); + } + + @Override // Projection + public Projection sqlProject(BitSet indices) + { + return TargetListImpl.sqlProject(this, indices); + } + + @Override // TargetList + public R applyOver( + Iterable tuples, Cursor.Function f) + throws X, SQLException + { + return TargetListImpl.applyOver(this, tuples, f); + } + + @Override // TargetList + public R applyOver( + TupleTableSlot tuple, Cursor.Function f) + throws X, SQLException + { + return TargetListImpl.applyOver(this, tuple, f); + } + + private static final int s_perAttributeSize; + + /** + * A "getAndAdd" (with just plain memory effects, as it will only be used on + * the PG thread) tailored to the width of the tdrefcount field (which is, + * oddly, declared as C int rather than a specific-width type). + */ + private static final ToIntBiFunction s_getAndAddPlain; + + private static final MethodHandle s_everNull; + private static final MethodHandle s_throwInvalidated; + + private static ByteBuffer throwInvalidated() + { + throw new IllegalStateException( + "use of stale TupleDescriptor outdated by a DDL change"); + } + + static + { + assert Integer.BYTES == SIZEOF_Oid : "sizeof Oid"; + assert Integer.BYTES == SIZEOF_pg_attribute_atttypmod : "sizeof typmod"; + + s_perAttributeSize = SIZEOF_FORM_PG_ATTRIBUTE + + ( (PG_VERSION_NUM < 180000) ? 0 : SIZEOF_CompactAttribute ); + + if ( 4 == SIZEOF_TUPLEDESC_TDREFCOUNT ) + { + s_getAndAddPlain = (b,i) -> + { + int count = b.getInt(OFFSET_TUPLEDESC_TDREFCOUNT); + b.putInt(OFFSET_TUPLEDESC_TDREFCOUNT, count + i); + return count; + }; + } + else + throw new ExceptionInInitializerError( + "Implementation needed for platform with " + + "sizeof TupleDesc->tdrefcount = " +SIZEOF_TUPLEDESC_TDREFCOUNT); + + s_everNull = constant(ByteBuffer.class, null); + + try + { + s_throwInvalidated = lookup().findStatic(TupleDescImpl.class, + "throwInvalidated", methodType(ByteBuffer.class)); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + private ByteBuffer bufferIfValid() + { + try + { + return (ByteBuffer)m_tdH.invokeExact(); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * Called after the {@code SwitchPoint} has been invalidated. + *

    + * Only happens for a {@link Cataloged} descriptor, on the PG thread, as a + * consequence of invalidation of the {@link RegClass} that defines it. + */ + void invalidate() + { + assert threadMayEnterPG() : "TupleDescImpl slice thread"; + + m_state.release(); + fill(m_attrs, null); + } + + /** + * Address of the native tuple descriptor (not supported on + * an {@code Ephemeral} instance). + */ + long address() throws SQLException + { + try + { + m_state.pin(); + return m_state.address(); + } + finally + { + m_state.unpin(); + } + } + + /** + * Slice off the portion of the buffer representing one attribute. + *

    + * Only called by {@code AttributeImpl}. + */ + ByteBuffer slice(int index) + { + assert threadMayEnterPG() : "TupleDescImpl slice thread"; + + ByteBuffer td = bufferIfValid(); + + int len = SIZEOF_FORM_PG_ATTRIBUTE; + int off = OFFSET_TUPLEDESC_ATTRS + len * index; + len = ATTRIBUTE_FIXED_PART_SIZE; // TupleDesc hasn't got the whole thing + + /* + * Prior to PG 18, OFFSET_TUPLEDESC_ATTRS from the true beginning of + * the buffer is where the first Form_pg_attribute starts, so + * OFFSET_TUPLEDESC_ATTRS + len * index is the whole story. As of PG 18, + * the value we pick up for OFFSET_TUPLEDESC_ATTRS is actually the + * offset of compact_attrs, and there are natts of those before + * the first Form_pg_attribute, so we really want OFFSET_TUPLEDESC_ATTRS + * + len * index + (natts * SIZEOF_CompactAttribute). That final term is + * constant for this TupleDescriptor, so the constructor has simply set + * the buffer's position() to that value. When we slice off bnew below, + * it begins at that position, so our off as computed above is right. + * + * Java 13 has a slice(off, len) method that may be tidier; it would + * have to be passed off+position(). + */ + ByteBuffer bnew = td.slice(); + bnew.position(off).limit(off + len); + return bnew.slice().order(td.order()); + } + + /** + * Construct a descriptor given a {@code ByteBuffer} windowing a native one. + *

    + * Important: As of PostgreSQL 18, the offset to the first + * {@code Form_pg_attribute} slice is no longer fixed, but depends on the + * number of attributes. The number of attributes is, of course, known here + * in this constructor, but is not easily available to the + * {@link #slice slice} method as it is being called by the + * {@code Attribute} instances being constructed before the {@code m_attrs} + * field is assigned here. + *

    + * This will be handled by advancing the {@code ByteBuffer}'s + * {@code position} here by the necessary offset (which means, in general, + * the position will be some nonsensical place in the buffer, but will yield + * the desired attribute slice when {@code slice} computes an offset + * relative to it. All other accesses to {@code TupleDesc} fields + * must be made using absolute offsets, not position-relative ones. + * @param td ByteBuffer over a native TupleDesc + * @param sp SwitchPoint that the instance will rely on to detect + * invalidation, or null if invalidation will not be possible. + * @param useState whether a native TupleDesc is associated, and therefore a + * State object must be used to release it on unreachability of this object. + * @param ctor constructor to be used for each Attribute instance. (The + * Attribute constructors also determine, indirectly, what SwitchPoint, if + * any, the Attribute instances will rely on to detect invalidation.) + */ + private TupleDescImpl( + ByteBuffer td, SwitchPoint sp, boolean useState, + BiFunction ctor) + { + assert threadMayEnterPG() : "TupleDescImpl construction thread"; + + m_state = useState ? new State(this, td) : null; + + td = asReadOnlyNativeOrder(td); + MethodHandle c = constant(ByteBuffer.class, td); + m_tdH = (null == sp) ? c : sp.guardWithTest(c, s_throwInvalidated); + + Attribute[] attrs = + new Attribute [ (td.capacity() - OFFSET_TUPLEDESC_ATTRS) + / s_perAttributeSize ]; + + /* + * ATTENTION: as described in the javadoc, this leaves the buffer's + * position nonsensical for ordinary purposes, but will produce correct + * results in the slice method above. All other accesses to fields of + * the underlying TupleDesc must be absolute, not relative to this + * position. + */ + if ( PG_VERSION_NUM >= 180000 ) + td.position(attrs.length * SIZEOF_CompactAttribute); + + for ( int i = 0 ; i < attrs.length ; ++ i ) + attrs[i] = ctor.apply(this, 1 + i); + + m_attrs = attrs; + } + + /** + * Constructor used only by OfType to produce a synthetic tuple descriptor + * with one element of a specified RegType. + */ + private TupleDescImpl(RegType type) + { + m_state = null; + m_tdH = s_everNull; + m_attrs = new Attribute[] { new AttributeImpl.OfType(this, type) }; + } + + /** + * Return a {@code TupleDescImpl} given a byte buffer that maps a PostgreSQL + * {@code TupleDesc} structure. + *

    + * This method is called from native code, and assumes the caller has not + * (or not knowingly) obtained the descriptor directly from the type cache, + * so if it is not reference-counted (its count is -1) it will be assumed + * unsafe to directly cache. In that case, if it represents a cataloged + * or interned ("blessed") descriptor, we will get one directly from the + * cache and return that, or if it is ephemeral, we will return one based + * on a defensive copy. + *

    + * If the descriptor is reference-counted, and we use it (that is, we do not + * find an existing version in our cache), we increment the reference count + * here. That does not have the effect of requesting leak warnings + * at the exit of PostgreSQL's current resource owner, because we have every + * intention of hanging on to it longer, until GC or an invalidation + * callback tells us not to. + *

    + * While we can just read the type oid, typmod, and reference count through + * the byte buffer, as long as the only caller is C code, it saves some fuss + * just to have it pass those values. If the C caller has the relation oid + * handy also, it can pass that as well and save a lookup here. + */ + private static TupleDescriptor fromByteBuffer( + ByteBuffer td, int typoid, int typmod, int reloid, int refcount) + { + TupleDescriptor.Interned result; + + td.order(nativeOrder()); + + /* + * Case 1: if the type is not RECORD, it's a cataloged composite type. + * Build an instance of Cataloged (unless the implicated RegClass has + * already got one). + */ + if ( RECORD.oid() != typoid ) + { + RegTypeImpl t = + (RegTypeImpl)Factory.formMaybeModifiedType(typoid, typmod); + + RegClassImpl c = + (RegClassImpl)( InvalidOid == reloid ? t.relation() + : Factory.staticFormObjectId(RegClass.CLASSID, reloid) ); + + assert c.isValid() : "Cataloged row type without matching RegClass"; + + if ( -1 == refcount ) // don't waste time on an ephemeral copy. + return c.tupleDescriptor(); // just go get the real one. + + TupleDescriptor.Interned[] holder = c.m_tupDescHolder; + if ( null != holder ) + { + result = holder[0]; + assert null != result : "disagree whether RegClass has desc"; + return result; + } + + holder = new TupleDescriptor.Interned[1]; + /* + * The constructor assumes the reference count has already been + * incremented to account for the reference constructed here. + */ + s_getAndAddPlain.applyAsInt(td, 1); + holder[0] = result = new Cataloged(td, c); + c.m_tupDescHolder = holder; + return result; + } + + /* + * Case 2: if RECORD with a modifier, it's an interned tuple type. + * Build an instance of Blessed (unless the implicated RegType has + * already got one). + */ + if ( -1 != typmod ) + { + RegTypeImpl.Blessed t = + (RegTypeImpl.Blessed)RECORD.modifier(typmod); + + if ( -1 == refcount ) // don't waste time on an ephemeral copy. + return t.tupleDescriptor(); // just go get the real one. + + TupleDescriptor.Interned[] holder = t.m_tupDescHolder; + if ( null != holder ) + { + result = holder[0]; + assert null != result : "disagree whether RegType has desc"; + return result; + } + + holder = new TupleDescriptor.Interned[1]; + /* + * The constructor assumes the reference count has already been + * incremented to account for the reference constructed here. + */ + s_getAndAddPlain.applyAsInt(td, 1); + holder[0] = result = new Blessed(td, t); + t.m_tupDescHolder = holder; + return result; + } + + /* + * Case 3: it's RECORD with no modifier, an ephemeral tuple type. + * Build an instance of Ephemeral unconditionally, defensively copying + * the descriptor if it isn't reference-counted (which we assert it + * isn't). + */ + assert -1 == refcount : "can any ephemeral TupleDesc be refcounted?"; + return new Ephemeral(td); + } + + /** + * Copy a byte buffer (which may refer to native-managed memory) to one + * with JVM-managed backing memory. + *

    + * Acquiescing to JDK-8318966, it still has to be a direct buffer to avoid + * exceptions when checking alignment. But it will use off-heap memory + * managed by the JVM (reclaimed when the buffer is unreachable), and so + * will not depend on the lifespan of the source buffer. + */ + private static ByteBuffer asManagedNativeOrder(ByteBuffer bb) + { + ByteBuffer copy = + ByteBuffer.allocateDirect(bb.capacity()).put(bb).flip(); + return copy.order(nativeOrder()); + } + + @Override + public Attribute sqlGet(int index) + { + bufferIfValid(); // just for the check + return m_attrs[index - 1]; + } + + /* + * AbstractList implementation + */ + @Override + public int size() + { + return m_attrs.length; + } + + @Override + public Attribute get(int index) + { + bufferIfValid(); // just for the check + return m_attrs[index]; + } + + @Override // Collection + public boolean contains(Object o) + { + if ( ! (o instanceof AttributeImpl) ) + return false; + + AttributeImpl ai = (AttributeImpl)o; + int idx = ai.subId() - 1; + return ( idx < m_attrs.length ) && ( ai == m_attrs[idx] ); + } + + @Override // List + public int indexOf(Object o) + { + if ( ! contains(o) ) + return -1; + + return ((Attribute)o).subId() - 1; + } + + @Override // List + public int lastIndexOf(Object o) + { + return indexOf(o); + } + + /** + * An abstract base shared by the {@code Blessed} and {@code Ephemeral} + * concrete classes, which are populated with + * {@code AttributeImpl.Transient} instances. + *

    + * Supplies their implementation of {@code contains}. {@code OfType} is also + * populated with {@code AttributeImpl.Transient} instances, but it has an + * even more trivial {@code contains} method. + */ + abstract static class NonCataloged extends TupleDescImpl + { + NonCataloged( + ByteBuffer td, SwitchPoint sp, boolean useState, + BiFunction ctor) + { + super(td, sp, useState, ctor); + } + + @Override // Collection + public boolean contains(Object o) + { + if ( ! (o instanceof AttributeImpl.Transient) ) + return false; + + AttributeImpl ai = (AttributeImpl)o; + return this == ai.containingTupleDescriptor(); + } + } + + /** + * A tuple descriptor for a row type that appears in the catalog. + */ + static class Cataloged extends TupleDescImpl implements Interned + { + private final RegClass m_relation;// using its SwitchPoint, keep it live + + Cataloged(ByteBuffer td, RegClassImpl c) + { + /* + * Invalidation of a Cataloged tuple descriptor happens with the + * SwitchPoint attached to the RegClass. Every Cataloged descriptor + * from the cache had better be reference-counted, so unconditional + * true is passed for useState. + */ + super( + td, c.cacheSwitchPoint(), true, + (o, i) -> CatalogObjectImpl.Factory.formAttribute( + c.oid(), i, () -> new AttributeImpl.Cataloged(c)) + ); + + m_relation = c; // we need it alive for its SwitchPoint + } + + @Override + public RegType rowType() + { + return m_relation.type(); + } + } + + /** + * A tuple descriptor that is not in the catalog, but has been interned and + * can be identified by {@code RECORD} and a distinct type modifier for the + * life of the backend. + */ + static class Blessed extends NonCataloged implements Interned + { + private final RegType m_rowType; // using its SwitchPoint, keep it live + + Blessed(ByteBuffer td, RegTypeImpl t) + { + /* + * A Blessed tuple descriptor has no associated RegClass, and is + * expected to live for the life of the backend without invalidation + * events, so we pass null for the SwitchPoint, and a constructor + * that will build AttributeImpl.Transient instances. + * + * If the caller, fromByteBuffer, saw a non-reference-counted + * descriptor, it grabbed one straight from the type cache instead. + * But sometimes, the one in PostgreSQL's type cache is + * non-reference counted, and that's ok, because that one will be + * good for the life of the process. So we do need to check, in this + * constructor, whether to pass true or false for useState. + * (Checking with getAndAddPlain(0) is a bit goofy, but it was + * already set up, matched to the field width, does the job.) + */ + super( + td, null, -1 != s_getAndAddPlain.applyAsInt(td, 0), + (o, i) -> new AttributeImpl.Transient(o, i) + ); + + m_rowType = t; + } + + @Override + public RegType rowType() + { + return m_rowType; + } + } + + /** + * A tuple descriptor that is not in the catalog, has not been interned, and + * is useful only so long as a reference is held. + */ + static class Ephemeral extends NonCataloged + implements TupleDescriptor.Ephemeral + { + private Ephemeral(ByteBuffer td) + { + super( + asManagedNativeOrder(td), null, false, + (o, i) -> new AttributeImpl.Transient(o, i) + ); + } + + @Override + public RegType rowType() + { + return RECORD; + } + + @Override + public Interned intern() + { + TupleDescImpl sup = (TupleDescImpl)this; // bufferIfValid is private + + return doInPG(() -> + { + ByteBuffer td = sup.bufferIfValid(); + + ByteBuffer direct = ByteBuffer.allocateDirect( + td.capacity()).put(td.rewind()); + + int assigned = _assign_record_type_typmod(direct); + + /* + * That will have saved in the typcache an authoritative + * new copy of the descriptor. It will also have written + * the assigned modifier into the 'direct' copy of this + * descriptor, but this is still an Ephemeral instance, + * the wrong Java type. We need to return a new instance + * over the authoritative typcache copy. + */ + return RECORD.modifier(assigned).tupleDescriptor(); + }); + } + } + + /** + * A specialized, synthetic tuple descriptor representing a single column + * of the given {@code RegType}. + */ + static class OfType extends TupleDescImpl + implements TupleDescriptor.Ephemeral + { + OfType(RegType type) + { + super(type); + } + + @Override + public RegType rowType() + { + return RECORD; + } + + @Override + public Interned intern() + { + throw notyet(); + } + + @Override // Collection + public boolean contains(Object o) + { + return get(0) == o; + } + } + + /** + * Based on {@code SingleFreeTupleDesc}, but really does + * {@code ReleaseTupleDesc}. + *

    + * Decrements the reference count and, if it was 1 before decrementing, + * proceeds to the superclass method to free the descriptor. + */ + private static class State + extends DualState.SingleFreeTupleDesc + { + private final IntSupplier m_getAndDecrPlain; + + private State(TupleDescImpl referent, ByteBuffer td) + { + super(referent, null, addressOf(td)); + /* + * The only reference to this non-readonly ByteBuffer retained here + * is what's bound into this getAndDecr for the reference count. + */ + m_getAndDecrPlain = () -> s_getAndAddPlain.applyAsInt(td, -1); + } + + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + if ( nativeStateLive && 1 == m_getAndDecrPlain.getAsInt() ) + super.javaStateUnreachable(nativeStateLive); + } + + private void release() + { + releaseFromJava(); + } + + private long address() + { + return guardedLong(); + } + } + + static Ephemeral synthesizeDescriptor( + List types, List names, BitSet selected) + { + int n = types.size(); + IntFunction toName; + if ( null == names ) + toName = i -> ""; + else + { + assert names.size() == n; + toName = i -> names.get(i).nonFolded(); + } + + if ( null != selected ) + assert selected.length() <= n; + else + { + selected = new BitSet(n); + selected.set(0, n); + } + + CharsetEncoder enc = SERVER_ENCODING.newEncoder(); + float maxbpc = enc.maxBytesPerChar(); + int alignmentModulus = ALIGNOF_INT; + int maxToAlign = alignmentModulus - 1; + int alignmask = maxToAlign; + int sizeTypeTypmodBool = 2 * Integer.BYTES + 1; + + int size = + selected.stream() + .map(i -> toName.apply(i).length()) + .map(len -> + sizeTypeTypmodBool + (int)ceil(len*maxbpc) + 1 + maxToAlign) + .reduce(0, Math::addExact); + + ByteBuffer direct = + ByteBuffer.allocateDirect(size) + .alignedSlice(ALIGNOF_INT).order(nativeOrder()); + + selected.stream().forEachOrdered(i -> + { + int pos = direct.position(); + int misalign = direct.alignmentOffset(pos, alignmentModulus); + pos += - misalign & alignmask; + direct.position(pos); + + RegType t = types.get(i); + direct.putInt(t.oid()).putInt(t.modifier()); + + /* + * The C code will want a value for attndims, about which the docs + * for pg_attribute say: Presently, the number of dimensions of an + * array is not enforced, so any nonzero value effectively means + * "it's an array". + */ + direct.put(t.element().isValid() ? (byte)1 : (byte)0); + + pos = direct.position(); + CharBuffer cb = CharBuffer.wrap(toName.apply(i)); + CoderResult rslt = enc.encode(cb, direct, true); + if ( rslt.isUnderflow() ) + rslt = enc.flush(direct); + if ( ! rslt.isUnderflow() ) + throw new AssertionError("name to server encoding: " + rslt); + enc.reset(); + direct.put((byte)'\0'); + while ( '\0' != direct.get(pos) ) + ++ pos; + if ( ++ pos != direct.position() ) + throw new AssertionError("server encoding of name has NUL"); + }); + + int c = selected.cardinality(); + + return new Ephemeral(doInPG(() -> _synthesizeDescriptor(c, direct))); + } + + /** + * Call the PostgreSQL {@code typcache} function of the same name, but + * return the assigned typmod rather than {@code void}. + */ + private static native int _assign_record_type_typmod(ByteBuffer bb); + + private static native ByteBuffer _synthesizeDescriptor(int n,ByteBuffer bb); +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TupleList.java b/pljava/src/main/java/org/postgresql/pljava/pg/TupleList.java new file mode 100644 index 000000000..f8b2ad878 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TupleList.java @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.nio.ByteBuffer; +import java.nio.IntBuffer; +import java.nio.LongBuffer; + +import java.util.Iterator; +import java.util.List; +import java.util.RandomAccess; +import java.util.Spliterator; +import static java.util.Spliterator.IMMUTABLE; +import static java.util.Spliterator.NONNULL; +import static java.util.Spliterator.ORDERED; +import static java.util.Spliterator.SIZED; +import java.util.Spliterators.AbstractSpliterator; + +import java.util.function.Consumer; +import java.util.function.IntToLongFunction; + +import org.postgresql.pljava.internal.AbstractNoSplitList; +import + org.postgresql.pljava.internal.AbstractNoSplitList.IteratorNonSpliterator; +import org.postgresql.pljava.internal.DualState; +import org.postgresql.pljava.internal.DualState.Pinned; +import org.postgresql.pljava.internal.Invocation; + +import org.postgresql.pljava.model.MemoryContext; // for javadoc +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_DATUM; + +/* + * Plan: a group (maybe a class or interface with nested classes) of + * implementations that look like lists of TupleTableSlot over different kinds + * of result: + * - SPITupleTable (these: a tupdesc, and vals array of HeapTuple pointers) + * - CatCList (n_members and a members array of CatCTup pointers, where each + * CatCTup has a HeapTupleData and HeapTupleHeader nearly but not quite + * adjacent), must find tupdesc + * - Tuplestore ? (is this visible, or concealed behind SPI's cursors?) + * - Tuplesort ? (") + * - SFRM results? (Ah, SFRM_Materialize makes a Tuplestore.) + * - will we ever see a "tuple table" ("which is a List of independent + * TupleTableSlots")? + */ + +/** + * Superinterface of one or more classes that can present a sequence of tuples, + * working from the forms in which PostgreSQL can present them. + */ +public interface TupleList extends List, AutoCloseable +{ + @Override + default void close() + { + } + + TupleList EMPTY = new Empty(); + + /** + * Returns a {@code Spliterator} that never splits. + *

    + * Because a {@code TupleList} is typically built on a single + * {@code TupleTableSlot} holding each tuple in turn, there can be no + * thought of parallel stream execution. + *

    + * Also, because a {@code TupleList} iterator may return the same + * {@code TupleTableSlot} repeatedly, stateful {@code Stream} operations + * such as {@code distinct} or {@code sorted} will make no sense applied + * to those objects. + */ + @Override + default public Spliterator spliterator() + { + return new IteratorNonSpliterator<>(iterator(), size(), + IMMUTABLE | NONNULL | ORDERED | SIZED); + } + + /** + * A permanently-empty {@link TupleList TupleList}. + */ + final static class Empty + extends AbstractNoSplitList implements TupleList + { + private Empty() + { + } + + @Override + public int size() + { + return 0; + } + + @Override + public TupleTableSlot get(int i) + { + throw new IndexOutOfBoundsException( + "Index " + i + " out of bounds for length 0"); + } + } + + /** + * A {@code TupleList} constructed atop a PostgreSQL {@code SPITupleTable}. + *

    + * The native table is allocated in a {@link MemoryContext} that will be + * deleted when {@code SPI_finish} is called on exit of the current + * {@code Invocation}. This class merely maps the native tuple table in + * place, and so will prevent later access. + */ + class SPI extends AbstractNoSplitList + implements TupleList, RandomAccess + { + private final State state; + private final TupleTableSlotImpl ttSlot; + private final int nTuples; + private final IntToLongFunction indexToPointer; + + private static class State + extends DualState.SingleSPIfreetuptable + { + private State(SPI r, long tt) + { + /* + * Each SPITupleTable is constructed in a context of its own + * that is a child of the SPI Proc context, and is used by + * SPI_freetuptable to efficiently free it. By rights, that + * context should be the Lifespan here, but that member of + * SPITupleTable is declared a private member "not intended for + * external callers" in the documentation. + * + * If that admonition is to be obeyed, a next-best choice is the + * current Invocation. As long as SPI connection continues to be + * managed automatically and disconnected when the invocation + * exits (and it makes its lifespanRelease call before + * disconnecting SPI, which it does), it should be safe enough. + */ + super(r, Invocation.current(), tt); + } + + private void close() + { + unlessReleased(() -> + { + releaseFromJava(); + }); + } + } + + /** + * Constructs an instance over an {@code SPITupleTable}. + * @param slot a TupleTableSlotImpl to use. The constructed object's + * iterator will return this slot repeatedly, with each tuple in turn + * stored into it. + * @param spiStructP address of the SPITupleTable structure itself, + * saved here to be freed if this object is closed or garbage-collected. + * @param htarray ByteBuffer over the consecutive HeapTuple pointers at + * spiStructP->vals. + */ + SPI(TupleTableSlotImpl slot, long spiStructP, ByteBuffer htarray) + { + ttSlot = slot; + htarray = asReadOnlyNativeOrder(htarray); + state = new State(this, spiStructP); + + if ( 8 == SIZEOF_DATUM ) + { + LongBuffer tuples = htarray.asLongBuffer(); + nTuples = tuples.capacity(); + indexToPointer = tuples::get; + return; + } + else if ( 4 == SIZEOF_DATUM ) + { + IntBuffer tuples = htarray.asIntBuffer(); + nTuples = tuples.capacity(); + indexToPointer = tuples::get; + return; + } + else + throw new AssertionError("unsupported SIZEOF_DATUM"); + } + + @Override + public TupleTableSlot get(int index) + { + try ( Pinned p = state.pinnedNoChecked() ) + { + ttSlot.store_heaptuple( + indexToPointer.applyAsLong(index), false); + return ttSlot; + } + } + + @Override + public int size() + { + return nTuples; + } + + @Override + public void close() + { + state.close(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TupleTableSlotImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TupleTableSlotImpl.java new file mode 100644 index 000000000..aac490a6c --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TupleTableSlotImpl.java @@ -0,0 +1,1117 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.annotation.Native; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.IntBuffer; +import java.nio.LongBuffer; + +import java.util.List; + +import java.util.function.IntUnaryOperator; + +import java.sql.SQLException; + +import static java.util.Objects.checkIndex; +import static java.util.Objects.requireNonNull; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsBoolean; + +import org.postgresql.pljava.Lifespan; + +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.adt.spi.Datum.Accessor; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.DualState; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegClass; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.notyet; + +import static org.postgresql.pljava.pg.DatumUtils.mapFixedLength; +import static org.postgresql.pljava.pg.DatumUtils.mapCString; +import static org.postgresql.pljava.pg.DatumUtils.asAlwaysCopiedDatum; +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.DatumUtils.inspectVarlena; +import static org.postgresql.pljava.pg.DatumUtils.Accessor.forDeformed; +import static org.postgresql.pljava.pg.DatumUtils.Accessor.forHeap; + +import static org.postgresql.pljava.pg.ModelConstants.HEAPTUPLESIZE; + +import static + org.postgresql.pljava.pg.CatalogObjectImpl.Factory.staticFormObjectId; + +import static org.postgresql.pljava.pg.ModelConstants.*; + +/* + * bool always 1 byte (see c.h). + * + * From PG 12: + * type, flags, nvalid, tupleDescriptor, *values, *isnull, mcxt, tid, tableOid + * flags: EMPTY SHOULDFREE SLOW FIXED + * + * Pre-PG 12 (= for fields present in both): + * type + * individual bool flags + * isempty, shouldFree, shouldFreeMin, slow, fixedTupleDescriptor + * HeapTuple tuple + * =tupleDescriptor + * =mcxt + * buffer + * =nvalid + * =*values + * =*isnull + * mintuple, minhdr, off + * + * tableOid is tuple->t_tableOid, tid is tuple->t_self. + * Can a tuple from a different descendant table then get loaded in the slot? + * Answer: yes. So tableOid can change per tuple. (See ExecStoreHeapTuple.) + * Fetching the tableOid is easy starting with PG 12 (it's right in the TTS + * struct). For PG < 12, a native method will be needed to inspect 'tuple' (or + * just return a ByteBuffer windowing it, to be inspected here). That native + * method will not need to be serialized onto the PG thread, as it only looks at + * an existing struct in memory. + * FWIW, *HeapTuple is a HeapTupleData, and a HeapTupleData has a t_len. + * heap_copytuple allocates HEAPTUPLESIZE + tuple->t_len. The HEAPTUPLESIZE + * covers the HeapTupleData that precedes the HeapTupleHeader; from the start + * of that it's t_len. They could be allocated separately but typically aren't. + * (A HeapTuple in the form of a Datum is without the HeapTupleData part; see + * ExecStoreHeapTupleDatum, which just puts a transient HeapTupleData struct + * on the stack to point to the thing during the operation, deforms it, and + * stores it in virtual form.) + * + * (Also FWIW, to make a MinimalTuple from a HeapTuple, subract + * MINIMAL_TUPLE_OFFSET from the latter's t_len; the result is the amount to + * allocate and the amount to copy and what goes in the result's t_len.) + * + * For now: support only FIXED/fixedTupleDescriptor slots. For those, the native + * code can create ByteBuffers and pass them all at once to the constructor for: + * the TTS struct itself, the values array, the isnull array, and the TupleDesc + * (this constructor can pass that straight to the TupleDesc constructor). If it + * later makes sense to support non-fixed slots, that will mean checking for + * changes, and possibly creating a new TupleDesc and new values/isnull buffers + * on the fly. + * + * A PostgreSQL TupleTableSlot can be configured with TTSOpsVirtual or + * TTSOpsHeapTuple (or others, not contemplated here). The Heap and Deformed + * subclasses here don't exactly mirror that distinction. What they are really + * distinguishing is which flavor of DatumUtils.Accessor will be used. + * + * That is, the Deformed subclass here relies on getsomeattrs and the + * tts_values/tts_isnull arrays of the slot (which are in fact available for any + * flavor of slot). The Heap subclass here overloads m_values and m_isnull to + * directly map the tuple data, rather than relying on tts_values and + * tts_isnull, so it can only work for slot flavors where such regions exist in + * the expected formats. In other words, a Deformed can be constructed over any + * flavor of PostgreSQL slot (and is the only choice if the slot is + * TTSOpsVirtual); a Heap is an alternative choice only available if the + * underlying slot is known to have the expected null bitmap and data layout, + * and may save the overhead of populating tts_isnull and tts_values arrays from + * the underlying tuple. It would still be possible in principle to exploit + * those arrays in the Heap case if they have been populated, to avoid + * repeatedly walking the tuple, but the Heap implementation here, as of this + * writing, doesn't. Perhaps some refactoring / renaming is needed, so Heap has + * its own instance fields for the directly accessed tuple regions, and the + * m_values / m_isnull in the superclass always map the tts_values / tts_isnull + * arrays? + */ + +/** + * Implementation of {@link TupleTableSlot TupleTableSlot}. + */ +public abstract class TupleTableSlotImpl +implements TupleTableSlot +{ + @Native private static final int OFFSET_HeapTupleData_t_len = 0; + @Native private static final int OFFSET_HeapTupleData_t_tableOid = 12; + + @Native private static final int SIZEOF_HeapTupleData_t_len = 4; + @Native private static final int SIZEOF_HeapTupleData_t_tableOid = 4; + + @Native private static final int OFFSET_HeapTupleHeaderData_t_infomask2= 18; + @Native private static final int OFFSET_HeapTupleHeaderData_t_infomask = 20; + @Native private static final int OFFSET_HeapTupleHeaderData_t_hoff = 22; + @Native private static final int OFFSET_HeapTupleHeaderData_t_bits = 23; + + @Native private static final int SIZEOF_HeapTupleHeaderData_t_infomask2 = 2; + @Native private static final int SIZEOF_HeapTupleHeaderData_t_infomask = 2; + @Native private static final int SIZEOF_HeapTupleHeaderData_t_hoff = 1; + + @Native private static final int HEAP_HASNULL = 1; // lives in infomask + @Native private static final int HEAP_HASEXTERNAL = 4; // lives in infomask + @Native private static final int HEAP_NATTS_MASK = 0x07FF; // infomask2 + + @Native private static final int OFFSET_NullableDatum_value = 0; + + protected final ByteBuffer m_tts; + /* These can be final only because non-FIXED slots aren't supported yet. */ + protected final TupleDescriptor m_tupdesc; + protected final ByteBuffer m_values; + protected final ByteBuffer m_isnull; + protected final Accessor[] m_accessors; + protected final Adapter[] m_adapters; + + /* + * Experimenting with yet another pattern for use of DualState. We will + * keep one here and be agnostic about its exact subtype. Methods that + * install a tuple in the slot will be expected to provide a DualState + * instance with this slot as its referent and encapsulating whatever object + * and behavior it needs for cleaning up. Pin/unpin should be done at + * outermost API-exposed methods, not by internal ones. + */ + DualState m_state; + + TupleTableSlotImpl( + ByteBuffer tts, TupleDescriptor tupleDesc, + ByteBuffer values, ByteBuffer isnull) + { + m_tts = null == tts ? null : asReadOnlyNativeOrder(tts); + m_tupdesc = tupleDesc; + /* + * From the Deformed constructor, this is the array of Datum elements. + * From the Heap constructor, it may be null. + */ + m_values = null == values ? null : asReadOnlyNativeOrder(values); + /* + * From the Deformed constructor, this is an array of one-byte booleans. + * From the Heap constructor, it may be null. + */ + m_isnull = null == isnull ? null : asReadOnlyNativeOrder(isnull); + m_adapters = new Adapter [ m_tupdesc.size() ]; + + @SuppressWarnings("unchecked") + Object dummy = + m_accessors = new Accessor [ m_adapters.length ]; + + /* + * A subclass constructor other than Deformed could pass null for tts, + * provided it overrides the inherited relation(), which relies on it. + */ + if ( null == m_tts ) + return; + + /* + * Verify (for now) that this is a FIXED TupleTableSlot. + * JIT will specialize to the test that applies in this PG version + */ + if ( NOCONSTANT != OFFSET_TTS_FLAGS ) + { + if ( 0 != (TTS_FLAG_FIXED & m_tts.getChar(OFFSET_TTS_FLAGS)) ) + return; + } + else if ( NOCONSTANT != OFFSET_TTS_FIXED ) + { + if ( 0 != m_tts.get(OFFSET_TTS_FIXED) ) + return; + } + else + throw new UnsupportedOperationException( + "Cannot construct non-fixed TupleTableSlot (PG < 11)"); + throw new UnsupportedOperationException( + "Cannot construct non-fixed TupleTableSlot"); + } + + static Deformed newDeformed( + ByteBuffer tts, TupleDescriptor tupleDesc, + ByteBuffer values, ByteBuffer isnull) + { + return new Deformed(tts, tupleDesc, values, isnull); + } + + static NullableDatum newNullableDatum( + TupleDescriptor tupleDesc, ByteBuffer values) + { + return new NullableDatum(tupleDesc, values); + } + + /** + * Allocate a 'light' (no native TupleTableSlot struct) + * {@code TupleTableSlotImpl.Heap} object, given a tuple descriptor and + * a byte buffer that maps a single-chunk-allocated {@code HeapTuple} (one + * where the {@code HeapTupleHeader} directly follows the + * {@code HeapTupleData}) that's to be passed to {@code heap_freetuple} when + * no longer needed. + *

    + * If an optional {@code Lifespan} is supplied, the slot will be linked + * to it and invalidated when it expires. Otherwise, the tuple will be + * assumed allocated in an immortal memory context and freed upon the + * {@code javaStateUnreachable} or {@code javaStateReleased} events. + */ + static Heap heapTupleGetLightSlot( + TupleDescriptor td, ByteBuffer ht, Lifespan lifespan) + { + return heapTupleGetLightSlot(td, ht, lifespan, true); + } + + /** + * Allocate a 'light' (no native TupleTableSlot struct) + * {@code TupleTableSlotImpl.Heap} object, given a tuple descriptor and + * a pointer to a single-chunk-allocated {@code HeapTuple} (one where the + * {@code HeapTupleHeader} directly follows the {@code HeapTupleData}) that + * is not to be freed when no longer needed. + *

    + * The first and, so far, only use of this method is from + * {@code TriggerDataImpl} to present the old and/or new tuples for + * inspection in a trigger function. The Lifespan passed should persist + * no longer than the current function invocation, and no special action + * will be taken to free the tuple itself, which belongs to PostgreSQL. + */ + static Heap heapTupleGetLightSlotNoFree( + TupleDescriptor td, long p, Lifespan lifespan) + { + ByteBuffer ht = doInPG(() -> _mapHeapTuple(p)); + return heapTupleGetLightSlot(td, ht, lifespan, false); + } + + static Heap heapTupleGetLightSlot( + TupleDescriptor td, ByteBuffer ht, Lifespan lifespan, boolean free) + { + ht = asReadOnlyNativeOrder(ht); + + assert 4 == SIZEOF_HeapTupleData_t_len + : "sizeof HeapTupleData.t_len changed"; + int len = ht.getInt(OFFSET_HeapTupleData_t_len); + + assert ht.capacity() == len + HEAPTUPLESIZE + : "unexpected length for single-chunk HeapTuple"; + + int relOid = ht.getInt(OFFSET_HeapTupleData_t_tableOid); + + boolean disallowExternal = true; + + /* + * Following offsets are relative to the HeapTupleHeaderData struct. + * Could slice off a new ByteBuffer from HEAPTUPLESIZE here and use + * the offsets directly, but we'll just add HEAPTUPLESIZE to the offsets + * and save constructing that intermediate object. We will slice off + * values and nulls ByteBuffers further below. + */ + + assert 2 == SIZEOF_HeapTupleHeaderData_t_infomask + : "sizeof HeapTupleHeaderData.t_infomask changed"; + short infomask = ht.getShort( + HEAPTUPLESIZE + OFFSET_HeapTupleHeaderData_t_infomask); + + assert 2 == SIZEOF_HeapTupleHeaderData_t_infomask2 + : "sizeof HeapTupleHeaderData.t_infomask2 changed"; + short infomask2 = ht.getShort( + HEAPTUPLESIZE + OFFSET_HeapTupleHeaderData_t_infomask2); + + assert 1 == SIZEOF_HeapTupleHeaderData_t_hoff + : "sizeof HeapTupleHeaderData.t_hoff changed"; + int hoff = + Byte.toUnsignedInt(ht.get( + HEAPTUPLESIZE + OFFSET_HeapTupleHeaderData_t_hoff)); + + if ( disallowExternal && 0 != ( infomask & HEAP_HASEXTERNAL ) ) + throw notyet("heapTupleGetLightSlot with external values in tuple"); + + int voff = hoff + HEAPTUPLESIZE; + + ByteBuffer values = mapFixedLength(ht, voff, ht.capacity() - voff); + ByteBuffer nulls = null; + + if ( 0 != ( infomask & HEAP_HASNULL ) ) + { + int nlen = ( td.size() + 7 ) / 8; + if ( nlen + OFFSET_HeapTupleHeaderData_t_bits > hoff ) + { + int attsReallyPresent = infomask2 & HEAP_NATTS_MASK; + nlen = ( attsReallyPresent + 7 ) / 8; + assert nlen + OFFSET_HeapTupleHeaderData_t_bits <= hoff + : "heap null bitmap length"; + } + nulls = mapFixedLength(ht, + HEAPTUPLESIZE + OFFSET_HeapTupleHeaderData_t_bits, nlen); + } + + Heap slot = new Heap( + staticFormObjectId(RegClass.CLASSID, relOid), td, values, nulls); + + slot.m_state = + free + ? new HTChunkState(slot, lifespan, ht) + : new BBOnlyState(slot, lifespan, ht); + + return slot; + } + + /** + * Return the index into {@code m_accessors} for this attribute, + * ensuring the elements at that index of {@code m_accessors} and + * {@code m_adapters} are set, or throw an exception if + * this {@code Attribute} doesn't belong to this slot's + * {@code TupleDescriptor}, or if the supplied {@code Adapter} can't + * fetch it. + *

    + * Most tests are skipped if the index is in range and {@code m_adapters} + * at that index already contains the supplied {@code Adapter}. + */ + protected int toIndex(Attribute att, Adapter adp) + { + int idx = att.subId() - 1; + + if ( 0 > idx || idx >= m_adapters.length + || m_adapters [ idx ] != requireNonNull(adp) ) + { + if ( ! m_tupdesc.contains(att) ) + { + throw new IllegalArgumentException( + "attribute " + att + " does not go with slot " + this); + } + + memoize(idx, att, adp); + } + + return idx; + } + + /** + * Return the {@code Attribute} at this index into the associated + * {@code TupleDescriptor}, + * ensuring the elements at that index of {@code m_accessors} and + * {@code m_adapters} are set, or throw an exception if + * this {@code Attribute} doesn't belong to this slot's + * {@code TupleDescriptor}, or if the supplied {@code Adapter} can't + * fetch it. + *

    + * Most tests are skipped if the index is in range and {@code m_adapters} + * at that index already contains the supplied {@code Adapter}. + */ + protected Attribute fromIndex(int idx, Adapter adp) + { + Attribute att = m_tupdesc.get(idx); + if ( m_adapters [ idx ] != requireNonNull(adp) ) + memoize(idx, att, adp); + return att; + } + + /** + * Called after verifying that att belongs to this slot's + * {@code TupleDescriptor}, that idx is its corresponding + * (zero-based) index, and that {@code m_adapters[idx]} does not already + * contain adp. + */ + protected void memoize(int idx, Attribute att, Adapter adp) + { + if ( ! adp.canFetch(att) ) + { + throw new IllegalArgumentException(String.format( + "cannot fetch attribute %s of type %s using %s", + att, att.type(), adp)); + } + + m_adapters [ idx ] = adp; + + if ( null == m_accessors [ idx ] ) + { + boolean byValue = att.byValue(); + short length = att.length(); + + m_accessors [ idx ] = selectAccessor(byValue, length); + } + } + + /** + * Selects appropriate {@code Accessor} for this {@code Layout} given + * byValue and length. + */ + protected abstract Accessor selectAccessor( + boolean byValue, short length); + + /** + * Returns the previously-selected {@code Accessor} for the item at the + * given index. + *

    + * The indirection's cost may be regrettable, but it simplifies the + * implementation of {@code Indexed}. + */ + protected Accessor accessor(int idx) + { + return m_accessors[idx]; + } + + /** + * Only to be called after idx is known valid + * from calling {@code toIndex}. + */ + protected abstract boolean isNull(int idx); + + /** + * Only to be called after idx is known valid + * from calling {@code toIndex}. + */ + protected abstract int toOffset(int idx); + + /** + * Implementation of {@link TupleTableSlot TupleTableSlot} for deformed + * layout. + */ + static class Deformed extends TupleTableSlotImpl + { + Deformed( + ByteBuffer tts, TupleDescriptor tupleDesc, + ByteBuffer values, ByteBuffer isnull) + { + super(tts, tupleDesc, values, requireNonNull(isnull)); + } + + @Override + protected int toIndex(Attribute att, Adapter adp) + { + int idx = super.toIndex(att, adp); + + getsomeattrs(idx); + return idx; + } + + @Override + protected Attribute fromIndex(int idx, Adapter adp) + { + Attribute att = super.fromIndex(idx, adp); + + getsomeattrs(idx); + return att; + } + + @Override + protected Accessor selectAccessor( + boolean byValue, short length) + { + return forDeformed(byValue, length); + } + + @Override + protected boolean isNull(int idx) + { + return 0 != m_isnull.get(idx); + } + + @Override + protected int toOffset(int idx) + { + return idx * SIZEOF_DATUM; + } + + /** + * Like PostgreSQL's {@code slot_getsomeattrs}, but {@code idx} here is + * zero-based (one will be added when it is passed to PostgreSQL). + */ + private void getsomeattrs(int idx) + { + int nValid; + if ( 2 == SIZEOF_TTS_NVALID ) + nValid = m_tts.getShort(OFFSET_TTS_NVALID); + else + { + assert 4 == SIZEOF_TTS_NVALID : "unexpected SIZEOF_TTS_NVALID"; + nValid = m_tts.getInt(OFFSET_TTS_NVALID); + } + if ( nValid <= idx ) + doInPG(() -> _getsomeattrs(m_tts, 1 + idx)); + } + } + + /** + * Implementation of {@link TupleTableSlot TupleTableSlot} for heap + * layout. + */ + static class Heap extends TupleTableSlotImpl + { + protected final ByteBuffer m_hValues; + protected final ByteBuffer m_hIsNull; + protected final RegClass m_relation; + + Heap( + RegClass relation, TupleDescriptor tupleDesc, + ByteBuffer hValues, ByteBuffer hIsNull) + { + super(null, tupleDesc, null, null); + m_relation = requireNonNull(relation); + m_hValues = requireNonNull(hValues); + m_hIsNull = hIsNull; + } + + @Override + protected Accessor selectAccessor( + boolean byValue, short length) + { + return forHeap(byValue, length); + } + + @Override + protected boolean isNull(int idx) + { + if ( null == m_hIsNull ) + return false; + + // XXX we could have actual natts < m_tupdesc.size() + return 0 == ( m_hIsNull.get(idx >>> 3) & (1 << (idx & 7)) ); + } + + @Override + protected int toOffset(int idx) + { + int offset = 0; + List atts = m_tupdesc; + Attribute att; + + /* + * This logic is largely duplicated in Heap.Indexed.toOffsetNonFixed + * and will probably need to be changed there too if anything is + * changed here. + */ + for ( int i = 0 ; i < idx ; ++ i ) + { + if ( isNull(i) ) + continue; + + att = atts.get(i); + + int align = alignmentModulus(att.alignment()); + int len = att.length(); + + /* + * Skip the fuss of aligning if align isn't greater than 1. + * More interestingly, whether to align in the varlena case + * (length of -1) depends on whether the byte at the current + * offset is zero. Each outcome includes two subcases, for one + * of which it doesn't matter whether we align or not because + * the offset is already aligned, and for the other of which it + * does matter, so that determines the choice. If the byte seen + * there is zero, it might be a pad byte and require aligning, + * so align. See att_align_pointer in PG's access/tupmacs.h. + */ + if ( align > 1 && ( -1 != len || 0 == m_hValues.get(offset) ) ) + offset += + - m_hValues.alignmentOffset(offset, align) & (align-1); + + if ( 0 <= len ) // a nonnegative length is used directly + offset += len; + else if ( -1 == len ) // find and skip the length of the varlena + offset += inspectVarlena(m_hValues, offset); + else if ( -2 == len ) // NUL-terminated value, skip past the NUL + { + while ( 0 != m_hValues.get(offset) ) + ++ offset; + ++ offset; + } + else + throw new AssertionError( + "cannot skip attribute with weird length " + len); + } + + att = atts.get(idx); + + int align = alignmentModulus(att.alignment()); + int len = att.length(); + /* + * Same alignment logic as above. + */ + if ( align > 1 && ( -1 != len || 0 == m_hValues.get(offset) ) ) + offset += -m_hValues.alignmentOffset(offset, align) & (align-1); + + return offset; + } + + @Override + ByteBuffer values() + { + return m_hValues; + } + + @Override + public RegClass relation() + { + return m_relation; + } + + /** + * Something that resembles a {@code Heap} tuple, but consists of + * a number of elements all of the same type, distinguished by index. + *

    + * Constructed with a one-element {@code TupleDescriptor} whose single + * {@code Attribute} describes the type of all elements. + *

    + * + */ + static class Indexed extends Heap implements TupleTableSlot.Indexed + { + private final int m_elements; + private final IntUnaryOperator m_toOffset; + + Indexed( + TupleDescriptor td, int elements, + ByteBuffer nulls, ByteBuffer values) + { + super(td.get(0).relation(), td, values, nulls); + assert elements >= 0 : "negative element count"; + assert null == nulls || nulls.capacity() == (elements+7)/8 + : "nulls length element count mismatch"; + m_elements = elements; + + Attribute att = td.get(0); + int length = att.length(); + int align = alignmentModulus(att.alignment()); + assert 0 == values.alignmentOffset(0, align) + : "misaligned ByteBuffer passed"; + int mask = align - 1; // make it a mask + if ( length < 0 ) // the non-fixed case + /* + * XXX without offset memoization of some kind, this will be + * a quadratic way of accessing elements, but that can be + * improved later. + */ + m_toOffset = i -> toOffsetNonFixed(i, length, mask); + else + { + int stride = length + ( -(length & mask) & mask ); + if ( null == nulls ) + m_toOffset = i -> i * stride; + else + m_toOffset = i -> (i - nullsPreceding(i)) * stride; + } + } + + @Override + public int elements() + { + return m_elements; + } + + @Override + protected Attribute fromIndex(int idx, Adapter adp) + { + checkIndex(idx, m_elements); + Attribute att = m_tupdesc.get(0); + if ( m_adapters [ 0 ] != requireNonNull(adp) ) + memoize(0, att, adp); + return att; + } + + @Override + protected int toOffset(int idx) + { + return m_toOffset.applyAsInt(idx); + } + + @Override + protected Accessor accessor(int idx) + { + return m_accessors[0]; + } + + private int nullsPreceding(int idx) + { + int targetByte = idx >>> 3; + int targetBit = 1 << ( idx & 7 ); + byte b = m_hIsNull.get(targetByte); + /* + * The nulls bitmask has 1 bits where values are *not* null. + * Java has a bitCount method that counts 1 bits. So the loop + * below will have an invert step before counting bits. That + * means we want to modify *this* byte to have 1 at the target + * position *and above*, so all those bits will invert to zero + * before we count them. The next step does that. + */ + b |= - targetBit; + int count = Integer.bitCount(Byte.toUnsignedInt(b) ^ 0xff); + for ( int i = 0; i < targetByte; ++ i ) + { + b = m_hIsNull.get(i); + count += Integer.bitCount(Byte.toUnsignedInt(b) ^ 0xff); + } + return count; + } + + /** + * Largely duplicates the superclass {@code toOffset} but + * specialized to only a single attribute type that is repeated. + *

    + * Only covers the non-fixed-length cases (length of -1 or -2). + * Assumes the byte buffer is already aligned such that offset 0 + * satisfies the alignment constraint. + *

    + * Important: align here is a mask; the caller + * has subtracted 1 from it, compared to the align value + * seen in the superclass implementation. + */ + private int toOffsetNonFixed(int idx, int len, int align) + { + int offset = 0; + + if ( null != m_hIsNull ) + idx -= nullsPreceding(idx); + + /* + * The following code is very similar to that in the superclass, + * other than having already converted align to a mask (changing + * the test below to align>0 where the superclass has align>1), + * and having already reduced idx by the preceding nulls. If any + * change is needed here, it is probably needed there too. + */ + for ( int i = 0 ; i < idx ; ++ i ) + { + if ( align > 0 + && ( -1 != len || 0 == m_hValues.get(offset) ) ) + offset += - (offset & align) & align; + + if ( -1 == len ) // find and skip the length of the varlena + offset += inspectVarlena(m_hValues, offset); + else if ( -2 == len ) // NUL-terminated, skip past the NUL + { + while ( 0 != m_hValues.get(offset) ) + ++ offset; + ++ offset; + } + else + throw new AssertionError( + "cannot skip attribute with weird length " + len); + } + + /* + * Same alignment logic as above. + */ + if ( align > 0 && ( -1 != len || 0 == m_hValues.get(offset) ) ) + offset += - (offset & align) & align; + + return offset; + } + } + } + + /** + * Implementation of {@link TupleTableSlot TupleTableSlot} for + * {@code NullableDatum} layout, as used for PL routine arguments. + */ + static class NullableDatum extends TupleTableSlotImpl + { + NullableDatum(TupleDescriptor tupleDesc, ByteBuffer values) + { + super(null, tupleDesc, values, null); + } + + @Override + protected Accessor selectAccessor( + boolean byValue, short length) + { + return forDeformed(byValue, length); + } + + @Override + protected boolean isNull(int idx) + { + return 0 != m_values.get( + idx * SIZEOF_NullableDatum + OFFSET_NullableDatum_isnull); + } + + @Override + protected int toOffset(int idx) + { + return idx * SIZEOF_NullableDatum + OFFSET_NullableDatum_value; + } + + @Override + public RegClass relation() + { + return RegClass.CLASSID.invalid(); + } + } + + @Override + public RegClass relation() + { + int tableOid; + + if ( NOCONSTANT == OFFSET_TTS_TABLEOID ) + throw notyet("table Oid from TupleTableSlot in PostgreSQL < 12"); + + tableOid = m_tts.getInt(OFFSET_TTS_TABLEOID); + return staticFormObjectId(RegClass.CLASSID, tableOid); + } + + @Override + public TupleDescriptor descriptor() + { + return m_tupdesc; + } + + ByteBuffer values() + { + return m_values; + } + + void store_heaptuple(long ht, boolean shouldFree) + { + doInPG(() -> _store_heaptuple(m_tts, ht, shouldFree)); + } + + private static native void _getsomeattrs(ByteBuffer tts, int idx); + + private static native ByteBuffer _mapHeapTuple(long nativeAddress); + + private static native void _store_heaptuple( + ByteBuffer tts, long ht, boolean shouldFree); + + @Override + public T get(Attribute att, As adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public long get(Attribute att, AsLong adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public double get(Attribute att, AsDouble adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public int get(Attribute att, AsInt adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public float get(Attribute att, AsFloat adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public short get(Attribute att, AsShort adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public char get(Attribute att, AsChar adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public byte get(Attribute att, AsByte adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public boolean get(Attribute att, AsBoolean adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public T get(int idx, As adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public long get(int idx, AsLong adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public double get(int idx, AsDouble adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public int get(int idx, AsInt adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public float get(int idx, AsFloat adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public short get(int idx, AsShort adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public char get(int idx, AsChar adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public byte get(int idx, AsByte adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public boolean get(int idx, AsBoolean adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + private static class HTChunkState + extends DualState.BBHeapFreeTuple + { + private HTChunkState( + TupleTableSlotImpl referent, Lifespan span, ByteBuffer ht) + { + super(referent, span, ht); + } + } + + private static class BBOnlyState + extends DualState.SingleGuardedBB + { + private BBOnlyState( + TupleTableSlotImpl referent, Lifespan span, ByteBuffer ht) + { + super(referent, span, ht); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/ArrayAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/ArrayAdapter.java new file mode 100644 index 000000000..746f2b1dd --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/ArrayAdapter.java @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.lang.reflect.Type; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; +import java.nio.IntBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import java.util.List; +import static java.util.Objects.requireNonNull; + +import java.util.stream.IntStream; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.Contract; + +import org.postgresql.pljava.adt.Array.AsFlatList; +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegClass; +import org.postgresql.pljava.model.RegType; +import static org.postgresql.pljava.model.RegType.ANYARRAY; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.of; +import static org.postgresql.pljava.pg.DatumUtils.indexedTupleSlot; +import static org.postgresql.pljava.pg.DatumUtils.mapFixedLength; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_ArrayType_ndim; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_ArrayType_ndim; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_ArrayType_elemtype; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_ArrayType_elemtype; +import static + org.postgresql.pljava.pg.ModelConstants.SIZEOF_ArrayType_dataoffset; +import static + org.postgresql.pljava.pg.ModelConstants.OFFSET_ArrayType_dataoffset; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_ArrayType_DIMS; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_ArrayType_DIM; +import static org.postgresql.pljava.pg.ModelConstants.VARHDRSZ; + +import static org.postgresql.pljava.pg.ModelConstants.MAXIMUM_ALIGNOF; + +/* + * The representation details are found in include/utils/array.h + */ + +/** + * Ancestor of adapters that can map a PostgreSQL array to some representation + * {@literal }. + * @param Java type to represent the entire array. + */ +public class ArrayAdapter extends Adapter.Array +{ + private static final Configuration s_config; + + /** + * An {@code ArrayAdapter} that maps any PostgreSQL array with element type + * compatible with {@link TextAdapter TextAdapter} to flat (disregarding the + * PostgreSQL array's dimensionality) {@code List} of {@code String}, + * with any null elements mapped to Java null. + */ + public static final + ArrayAdapter> FLAT_STRING_LIST_INSTANCE; + + public static final + ArrayAdapter TYPE_OBTAINING_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(ArrayAdapter.class, Via.DATUM)); + + s_config = config; + + FLAT_STRING_LIST_INSTANCE = new ArrayAdapter<>( + TextAdapter.INSTANCE, AsFlatList.of(AsFlatList::nullsIncludedCopy)); + + TYPE_OBTAINING_INSTANCE = new ArrayAdapter( + Opaque.INSTANCE, new ElementTypeContract()); + } + + /** + * Constructs an array adapter given an adapter that returns a reference + * type {@literal } for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.As element, Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code long} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsLong element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code double} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsDouble element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code int} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsInt element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code float} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsFloat element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code short} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsShort element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code char} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsChar element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code byte} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsByte element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code boolean} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsBoolean element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + ArrayAdapter( + Adapter.As element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsLong element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsDouble element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsInt element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsFloat element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsShort element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsChar element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsByte element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsBoolean element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + /** + * Whether this adapter can be applied to the given PostgreSQL type. + *

    + * If not overridden, simply requires that pgType is an array + * type and that its declared element type is acceptable to {@code canFetch} + * of the configured element adapter. + */ + @Override + public boolean canFetch(RegType pgType) + { + RegType elementType = pgType.element(); + if ( elementType.isValid() && m_elementAdapter.canFetch(elementType) ) + return true; + return + ANYARRAY == pgType && Opaque.INSTANCE == m_elementAdapter; + } + + /** + * Returns the result of applying the configured element adapter and + * {@link Contract.Array array contract} to the contents of the array + * in. + */ + public T fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer().order(nativeOrder()); + + assert 4 == SIZEOF_ArrayType_ndim : "ArrayType.ndim size change"; + int nDims = bb.getInt(OFFSET_ArrayType_ndim); + + assert 4 == SIZEOF_ArrayType_elemtype + : "ArrayType.elemtype size change"; + RegType elementType = + of(RegType.CLASSID, bb.getInt(OFFSET_ArrayType_elemtype)); + + if ( ! m_elementAdapter.canFetch(elementType) ) + throw new IllegalArgumentException(String.format( + "cannot fetch array element of type %s using %s", + elementType, m_elementAdapter)); + + assert 4 == SIZEOF_ArrayType_dataoffset + : "ArrayType.dataoffset size change"; + int dataOffset = bb.getInt(OFFSET_ArrayType_dataoffset); + + boolean hasNulls = 0 != dataOffset; + + int dimsOffset = OFFSET_ArrayType_DIMS; + int dimsBoundsLength = 2 * nDims * SIZEOF_ArrayType_DIM; + + assert 4 == SIZEOF_ArrayType_DIM : "ArrayType dim size change"; + IntBuffer dimsAndBounds = + mapFixedLength(bb, dimsOffset, dimsBoundsLength).asIntBuffer(); + + int nItems = 0 == nDims ? 0 : + IntStream.range(0, nDims).map(dimsAndBounds::get) + .reduce(1, Math::multiplyExact); + + ByteBuffer nulls; + + if ( hasNulls ) + { + int nullsOffset = dimsOffset + dimsBoundsLength; + int nullsLength = (nItems + 7) / 8; + nulls = mapFixedLength(bb, nullsOffset, nullsLength); + /* + * In the with-nulls case, PostgreSQL has supplied dataOffset. + * But it includes VARHDRSZ, and a VarlenaWrapper doesn't + * include that first word. + */ + dataOffset -= VARHDRSZ; + } + else + { + nulls = null; + /* + * In the no-nulls case, computing dataOffset is up to us. + */ + dataOffset = dimsOffset + dimsBoundsLength; + dataOffset += + - bb.alignmentOffset(dataOffset, MAXIMUM_ALIGNOF) + & (MAXIMUM_ALIGNOF - 1); + } + + ByteBuffer values = + mapFixedLength(bb, dataOffset, bb.capacity() - dataOffset); + + TupleTableSlot.Indexed tti = + indexedTupleSlot(elementType, nItems, nulls, values); + + int[] dimsBoundsArray = new int [ dimsAndBounds.capacity() ]; + dimsAndBounds.get(dimsBoundsArray); + + /* + * The accessible constructors ensured that m_elementAdapter and + * m_contract have compatible parameterized types. They were stored + * as raw types to avoid having extra type parameters on array + * adapters that are of no interest to code that makes use of them. + */ + @SuppressWarnings("unchecked") + T result = (T)m_contract.construct( + nDims, dimsBoundsArray, m_elementAdapter, tti); + + return result; + } + finally + { + in.unpin(); + in.close(); + } + } + + /** + * A contract that cannot retrieve any element, but returns the array's + * internally-recorded element type. + */ + private static class ElementTypeContract + implements Contract.Array> + { + @Override + public RegType construct( + int nDims, int[] dimsAndBounds, Adapter.As adapter, + TupleTableSlot.Indexed slot) + throws SQLException + { + return slot.descriptor().get(0).type(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/ByteaAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/ByteaAdapter.java new file mode 100644 index 000000000..b0fbed3f6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/ByteaAdapter.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.InputStream; +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL {@code bytea}. + */ +public abstract class ByteaAdapter extends Adapter.Container +{ + private ByteaAdapter() // no instances + { + } + + public static final Bytes ARRAY_INSTANCE; + public static final Stream STREAM_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( Bytes.class, Via.DATUM), + configure(Stream.class, Via.DATUM) + }); + + ARRAY_INSTANCE = new Bytes(configs[0]); + STREAM_INSTANCE = new Stream(configs[1]); + } + + /** + * Adapter producing a Java byte array. + */ + public static class Bytes extends Adapter.As + { + private Bytes(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.BYTEA == pgType; + } + + public byte[] fetch(Attribute a, Datum.Input in) + throws SQLException + { + in.pin(); + try + { + ByteBuffer b = in.buffer(); + byte[] array = new byte [ b.limit() ]; + // Java >= 13: b.get(0, array) + b.rewind().get(array); + return array; + } + finally + { + in.unpin(); + } + } + } + + /** + * Adapter producing an {@code InputStream}. + */ + public static class Stream extends Adapter.As + { + private Stream(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.BYTEA == pgType; + } + + public InputStream fetch(Attribute a, Datum.Input in) + throws SQLException + { + return in.inputStream(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/DateTimeAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/DateTimeAdapter.java new file mode 100644 index 000000000..408828fbf --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/DateTimeAdapter.java @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.OffsetTime; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Datetime; +import org.postgresql.pljava.adt.Timespan; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.RegNamespace.PG_CATALOG; +import org.postgresql.pljava.model.RegType; + +import org.postgresql.pljava.model.SlotTester.Visible; // temporary for test jig + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * PostgreSQL date, time, timestamp, and interval types, available in various + * representations by implementing the corresponding functional interfaces + * to construct them. + */ +public abstract class DateTimeAdapter extends Adapter.Container +{ + private DateTimeAdapter() // no instances + { + } + + private static final Configuration[] s_configs; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( Date.class, Via.INT32SX), + configure( Time.class, Via.INT64SX), + configure( TimeTZ.class, Via.DATUM ), + configure( Timestamp.class, Via.INT64SX), + configure(TimestampTZ.class, Via.INT64SX), + configure( Interval.class, Via.DATUM ) + }); + + s_configs = configs; + } + + /** + * Instances of the date/time/timestamp adapters using the JSR310 + * {@code java.time} types. + *

    + * A holder interface so these won't be instantiated unless wanted. + */ + public interface JSR310 extends Visible + { + Date DATE_INSTANCE = + new Date<>(Datetime.Date.AsLocalDate.INSTANCE); + + Time TIME_INSTANCE = + new Time<>(Datetime.Time.AsLocalTime.INSTANCE); + + TimeTZ TIMETZ_INSTANCE = + new TimeTZ<>(Datetime.TimeTZ.AsOffsetTime.INSTANCE); + + Timestamp TIMESTAMP_INSTANCE = + new Timestamp<>(Datetime.Timestamp.AsLocalDateTime.INSTANCE); + + TimestampTZ TIMESTAMPTZ_INSTANCE = + new TimestampTZ<>(Datetime.TimestampTZ.AsOffsetDateTime.INSTANCE); + + /* + * See org.postgresql.pljava.adt.Timespan.Interval for why a reference + * implementation for that type is missing here. + */ + } + + /** + * Adapter for the {@code DATE} type to the functional interface + * {@link Datetime.Date Datetime.Date}. + */ + public static class Date extends Adapter.As + { + private Datetime.Date m_ctor; + public Date(Datetime.Date ctor) + { + super(ctor, null, s_configs[0]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.DATE == pgType; + } + + public T fetch(Attribute a, int in) + { + return m_ctor.construct(in); + } + } + + /** + * Adapter for the {@code TIME} type to the functional interface + * {@link Datetime.Time Datetime.Time}. + */ + public static class Time extends Adapter.As + { + private Datetime.Time m_ctor; + public Time(Datetime.Time ctor) + { + super(ctor, null, s_configs[1]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.TIME == pgType; + } + + public T fetch(Attribute a, long in) + { + return m_ctor.construct(in); + } + } + + /** + * Adapter for the {@code TIME WITH TIME ZONE} type to the functional + * interface {@link Datetime.TimeTZ Datetime.TimeTZ}. + */ + public static class TimeTZ extends Adapter.As + { + private Datetime.TimeTZ m_ctor; + public TimeTZ(Datetime.TimeTZ ctor) + { + super(ctor, null, s_configs[2]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.TIMETZ == pgType; + } + + public T fetch(Attribute a, Datum.Input in) + throws IOException, SQLException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer(); + long microsecondsSincePostgresEpoch = bb.getLong(); + int secondsWestOfPrimeMeridian = bb.getInt(); + return m_ctor.construct( + microsecondsSincePostgresEpoch, secondsWestOfPrimeMeridian); + } + finally + { + in.unpin(); + in.close(); + } + } + } + + /** + * Adapter for the {@code TIMESTAMP} type to the functional + * interface {@link Datetime.Timestamp Datetime.Timestamp}. + */ + public static class Timestamp extends Adapter.As + { + private Datetime.Timestamp m_ctor; + public Timestamp(Datetime.Timestamp ctor) + { + super(ctor, null, s_configs[3]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.TIMESTAMP == pgType; + } + + public T fetch(Attribute a, long in) + { + return m_ctor.construct(in); + } + } + + /** + * Adapter for the {@code TIMESTAMP WITH TIME ZONE} type to the functional + * interface {@link Datetime.TimestampTZ Datetime.TimestampTZ}. + */ + public static class TimestampTZ extends Adapter.As + { + private Datetime.TimestampTZ m_ctor; + public TimestampTZ(Datetime.TimestampTZ ctor) + { + super(ctor, null, s_configs[4]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.TIMESTAMPTZ == pgType; + } + + public T fetch(Attribute a, long in) + { + return m_ctor.construct(in); + } + } + + /** + * Adapter for the {@code INTERVAL} type to the functional + * interface {@link Timespan.Interval Timespan.Interval}. + */ + public static class Interval extends Adapter.As + { + private static final Simple + s_name_INTERVAL = Simple.fromJava("interval"); + + private static RegType s_intervalType; + + private Timespan.Interval m_ctor; + public Interval(Timespan.Interval ctor) + { + super(ctor, null, s_configs[5]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + /* + * There has to be some kind of rule for which data types deserve + * their own RegType constants. The date/time/timestamp ones all do + * because JDBC mentions them, but it doesn't mention interval. + * So just compare it by name here, unless the decision is made + * to have a RegType constant for it too. + */ + RegType intervalType = s_intervalType; + if ( null != intervalType ) // did we match the type and cache it? + return intervalType == pgType; + + if ( ! s_name_INTERVAL.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + + /* + * Hang onto this matching RegType for faster future checks. + * Because RegTypes are singletons, and reference writes can't + * be torn, this isn't evil as data races go. + */ + s_intervalType = pgType; + return true; + } + + public T fetch(Attribute a, Datum.Input in) + throws IOException, SQLException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer(); + long microseconds = bb.getLong(); + int days = bb.getInt(); + int months = bb.getInt(); + return m_ctor.construct(microseconds, days, months); + } + finally + { + in.unpin(); + in.close(); + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/EncodingAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/EncodingAdapter.java new file mode 100644 index 000000000..d2df9b5da --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/EncodingAdapter.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.CharsetEncoding; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL character set encoding ({@code int4} in the catalogs) represented + * as {@code CharsetEncoding}. + */ +public class EncodingAdapter extends Adapter.As +{ + public static final EncodingAdapter INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(EncodingAdapter.class, Via.INT32SX)); + + INSTANCE = new EncodingAdapter(config); + } + + EncodingAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.INT4 == pgType; + } + + public CharsetEncoding fetch(Attribute a, int in) + throws SQLException, IOException + { + return -1 == in ? CharsetEncoding.ANY : CharsetEncoding.fromOrdinal(in); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/GrantAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/GrantAdapter.java new file mode 100644 index 000000000..1848710da --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/GrantAdapter.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import java.util.List; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Array.AsFlatList; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.CatalogObject.Grant; +import org.postgresql.pljava.model.RegType; + +import org.postgresql.pljava.pg.AclItem; + +/** + * PostgreSQL {@code aclitem} represented as {@link Grant Grant}. + */ +public class GrantAdapter extends Adapter.As +{ + public static final GrantAdapter INSTANCE; + + public static final ArrayAdapter> LIST_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(GrantAdapter.class, Via.DATUM)); + + INSTANCE = new GrantAdapter(config); + + LIST_INSTANCE = new ArrayAdapter<>(INSTANCE, + AsFlatList.of(AsFlatList::nullsIncludedCopy)); + } + + private GrantAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.ACLITEM == pgType; + } + + public Grant fetch(Attribute a, Datum.Input in) + throws IOException, SQLException + { + in.pin(); + try + { + ByteBuffer b = in.buffer().order(nativeOrder()); + return new AclItem.NonRole(b); + } + finally + { + in.unpin(); + in.close(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/MoneyAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/MoneyAdapter.java new file mode 100644 index 000000000..4694a354e --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/MoneyAdapter.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Money; +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.RegNamespace.PG_CATALOG; +import org.postgresql.pljava.model.RegType; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Adapter for the {@code MONEY} type to the functional interface {@link Money}. + */ +public abstract class MoneyAdapter extends Adapter.As +{ + private static final Simple s_name_MONEY = Simple.fromJava("money"); + private static RegType s_moneyType; + private final Money m_ctor; + + @SuppressWarnings("removal") // JEP 411 + private static final Configuration s_config = + AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(MoneyAdapter.class, Via.INT64SX)); + + public MoneyAdapter(Money ctor) + { + super(ctor, null, s_config); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + /* + * There has to be some kind of rule for which data types deserve + * their own RegType constants. The date/time/timestamp ones all do + * because JDBC mentions them, but it doesn't mention interval. + * So just compare it by name here, unless the decision is made + * to have a RegType constant for it too. + */ + RegType moneyType = s_moneyType; + if ( null != moneyType ) // did we match the type and cache it? + return moneyType == pgType; + + if ( ! s_name_MONEY.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + + /* + * Hang onto this matching RegType for faster future checks. + * Because RegTypes are singletons, and reference writes can't + * be torn, this isn't evil as data races go. + */ + s_moneyType = pgType; + return true; + } + + public T fetch(Attribute a, long scaledToInteger) + throws IOException, SQLException + { + return m_ctor.construct(scaledToInteger); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/NameAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/NameAdapter.java new file mode 100644 index 000000000..b06fa76c3 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/NameAdapter.java @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; + +import org.postgresql.pljava.model.RegType; + +import static org.postgresql.pljava.pg.DatumUtils.mapCString; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * PostgreSQL {@code name} type represented as + * {@code Lexicals.Identifier.Simple} or {@code Lexicals.Identifier.Operator}. + */ +public abstract class NameAdapter +extends Adapter.As +{ + public static final Simple SIMPLE_INSTANCE; + public static final Operator OPERATOR_INSTANCE; + public static final AsString AS_STRING_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( Simple.class, Via.DATUM), + configure(Operator.class, Via.DATUM), + configure(AsString.class, Via.DATUM) + }); + + SIMPLE_INSTANCE = new Simple(configs[0]); + OPERATOR_INSTANCE = new Operator(configs[1]); + AS_STRING_INSTANCE = new AsString(configs[2]); + } + + NameAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.NAME == pgType; + } + + /** + * Adapter for the {@code name} type, returning an + * {@link Identifier.Simple Identifier.Simple}. + */ + public static class Simple extends NameAdapter + { + private Simple(Configuration c) + { + super(c); + } + + public Identifier.Simple fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return Identifier.Simple.fromCatalog(decoded(in)); + } + } + + /** + * Adapter for the {@code name} type, returning an + * {@link Identifier.Operator Identifier.Operator}. + */ + public static class Operator extends NameAdapter + { + private Operator(Configuration c) + { + super(c); + } + + public Identifier.Operator fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return Identifier.Operator.from(decoded(in)); + } + } + + /** + * Adapter for the {@code name} type, returning a Java {@code String}. + *

    + * This may be convenient for some casual uses, but a Java string will not + * observe any of the peculiar case-sensitivity rules of SQL identifiers. + */ + public static class AsString extends Adapter.As + { + private AsString(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.NAME == pgType; + } + + public String fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return decoded(in); + } + } + + static final String decoded(Datum.Input in) throws SQLException, IOException + { + in.pin(); + try + { + ByteBuffer bnew = mapCString(in.buffer(), 0); + return SERVER_ENCODING.decode(bnew).toString(); + } + finally + { + in.unpin(); + in.close(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/NumericAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/NumericAdapter.java new file mode 100644 index 000000000..35f871763 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/NumericAdapter.java @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.math.BigDecimal; + +import java.nio.ShortBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Numeric; +import org.postgresql.pljava.adt.Numeric.Kind; +import org.postgresql.pljava.adt.Numeric.AsBigDecimal; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; + +/** + * Adapter for the {@code NUMERIC} type to the functional interface + * {@link Numeric}. + */ +public class NumericAdapter extends Adapter.As +{ + private final Numeric m_ctor; + + @SuppressWarnings("removal") // JEP 411 + private static final Configuration s_config = + AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(NumericAdapter.class, Via.DATUM)); + + public static final NumericAdapter BIGDECIMAL_INSTANCE = + new NumericAdapter<>(AsBigDecimal.INSTANCE); + + public NumericAdapter(Numeric ctor) + { + super(ctor, null, s_config); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.NUMERIC == pgType; + } + + public T fetch(Attribute a, Datum.Input in) throws SQLException + { + in.pin(); + try + { + ShortBuffer b = + in.buffer().order(nativeOrder()).asShortBuffer(); + + /* + * Magic numbers used below are not exposed in .h files, but + * only found in PostgreSQL's utils/adt/numeric.c. Most are used + * naked here, rather than named, if they aren't needed in many + * places and the usage is clear in context. Regression tests + * are the only way to confirm they are right anyway. + */ + + short header = b.get(); + + boolean isShort = 0 != (header & 0x8000); + + Kind k; + + switch ( header & 0xF000 ) + { + case 0xC000: k = Kind.NAN; break; + case 0xD000: k = Kind.POSINFINITY; break; + case 0xF000: k = Kind.NEGINFINITY; break; + default: + int displayScale; + int weight; + + if ( isShort ) + { + k = 0 != (header & 0x2000) ? Kind.NEGATIVE : Kind.POSITIVE; + displayScale = (header & 0x1F80) >>> 7; + weight = ( (header & 0x007F) ^ 0x0040 ) - 0x0040;// sign ext + } + else + { + k = 0 != (header & 0x4000) ? Kind.NEGATIVE : Kind.POSITIVE; + displayScale = header & 0x3FFF; + weight = b.get(); + } + + short[] base10000Digits = new short [ b.remaining() ]; + b.get(base10000Digits); + + return m_ctor.construct( + k, displayScale, weight, base10000Digits); + } + + return m_ctor.construct(k, 0, 0, new short[0]); + } + finally + { + in.unpin(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/OidAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/OidAdapter.java new file mode 100644 index 000000000..23c39b58d --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/OidAdapter.java @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import static java.util.Arrays.stream; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.model.Attribute; + +import org.postgresql.pljava.model.*; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.of; + +/** + * PostgreSQL {@code oid} type represented as + * {@code CatalogObject} or one of its {@code Addressed} subtypes. + */ +public class OidAdapter +extends Adapter.As +{ + public static final OidAdapter INSTANCE; + public static final Int4 INT4_INSTANCE; + public static final Addressed REGCLASS_INSTANCE; + public static final Addressed REGCOLLATION_INSTANCE; + public static final Addressed REGCONFIG_INSTANCE; + public static final Addressed REGDICTIONARY_INSTANCE; + public static final Addressed REGNAMESPACE_INSTANCE; + public static final Addressed REGOPERATOR_INSTANCE; + public static final Procedure REGPROCEDURE_INSTANCE; + public static final Addressed REGROLE_INSTANCE; + public static final Addressed REGTYPE_INSTANCE; + public static final Addressed CONSTRAINT_INSTANCE; + public static final Addressed DATABASE_INSTANCE; + public static final Addressed EXTENSION_INSTANCE; + public static final Addressed PLANG_INSTANCE; + public static final Addressed TRANSFORM_INSTANCE; + public static final Addressed AM_INSTANCE; + public static final Addressed TABLESPACE_INSTANCE; + public static final Addressed FDW_INSTANCE; + public static final Addressed SERVER_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure(OidAdapter.class, Via.INT32ZX), + configure( Int4.class, Via.INT32ZX), + configure( Addressed.class, Via.INT32ZX), + configure( Procedure.class, Via.INT32ZX) + }); + + INSTANCE = new OidAdapter<>(configs[0], null); + + INT4_INSTANCE = new Int4(configs[1]); + + REGCLASS_INSTANCE = new Addressed<>(configs[2], + RegClass.CLASSID, RegClass.class, RegType.REGCLASS); + + REGCOLLATION_INSTANCE = new Addressed<>(configs[2], + RegCollation.CLASSID, RegCollation.class, RegType.REGCOLLATION); + + REGCONFIG_INSTANCE = new Addressed<>(configs[2], + RegConfig.CLASSID, RegConfig.class, RegType.REGCONFIG); + + REGDICTIONARY_INSTANCE = new Addressed<>(configs[2], + RegDictionary.CLASSID, RegDictionary.class, RegType.REGDICTIONARY); + + REGNAMESPACE_INSTANCE = new Addressed<>(configs[2], + RegNamespace.CLASSID, RegNamespace.class, RegType.REGNAMESPACE); + + REGOPERATOR_INSTANCE = new Addressed<>(configs[2], + RegOperator.CLASSID, RegOperator.class, + RegType.REGOPER, RegType.REGOPERATOR); + + REGPROCEDURE_INSTANCE = new Procedure(configs[3]); + + REGROLE_INSTANCE = new Addressed<>(configs[2], + RegRole.CLASSID, RegRole.class, RegType.REGROLE); + + REGTYPE_INSTANCE = new Addressed<>(configs[2], + RegType.CLASSID, RegType.class, RegType.REGTYPE); + + CONSTRAINT_INSTANCE = new Addressed<>(configs[2], + Constraint.CLASSID, Constraint.class); + + DATABASE_INSTANCE = new Addressed<>(configs[2], + Database.CLASSID, Database.class); + + EXTENSION_INSTANCE = new Addressed<>(configs[2], + Extension.CLASSID, Extension.class); + + PLANG_INSTANCE = new Addressed<>(configs[2], + ProceduralLanguage.CLASSID, ProceduralLanguage.class); + + TRANSFORM_INSTANCE = new Addressed<>(configs[2], + Transform.CLASSID, Transform.class); + + AM_INSTANCE = new Addressed<>(configs[2], + AccessMethod.CLASSID, AccessMethod.class); + + TABLESPACE_INSTANCE = new Addressed<>(configs[2], + Tablespace.CLASSID, Tablespace.class); + + FDW_INSTANCE = new Addressed<>(configs[2], + ForeignDataWrapper.CLASSID, ForeignDataWrapper.class); + + SERVER_INSTANCE = new Addressed<>(configs[2], + ForeignServer.CLASSID, ForeignServer.class); + } + + /** + * Types for which the non-specific {@code OidAdapter} or {@code Int4} will + * allow itself to be applied. + *

    + * Some halfhearted effort is put into ordering this with less commonly + * sought entries later. + */ + private static final RegType[] s_oidTypes = + { + RegType.OID, RegType.REGPROC, RegType.REGPROCEDURE, RegType.REGTYPE, + RegType.REGNAMESPACE, RegType.REGOPER, RegType.REGOPERATOR, + RegType.REGROLE, RegType.REGCLASS, RegType.REGCOLLATION, + RegType.REGCONFIG, RegType.REGDICTIONARY + }; + + private OidAdapter(Configuration c, Class witness) + { + super(c, null, witness); + } + + @Override + public boolean canFetch(RegType pgType) + { + for ( RegType t : s_oidTypes ) + if ( t == pgType ) + return true; + return false; + } + + public CatalogObject fetch(Attribute a, int in) + { + return of(in); + } + + /** + * Adapter for the {@code oid} type, returned as a primitive {@code int}. + */ + public static class Int4 extends Adapter.AsInt.Unsigned + { + private Int4(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + for ( RegType t : s_oidTypes ) + if ( t == pgType ) + return true; + return false; + } + + public int fetch(Attribute a, int in) + { + return in; + } + } + + /** + * Adapter for the {@code oid} type, able to return most of the + * {@link CatalogObject.Addressed CatalogObject.Addressed} subinterfaces. + */ + public static class Addressed> + extends OidAdapter + { + private final RegClass.Known m_classId; + private final RegType[] m_specificTypes; + + private Addressed( + Configuration c, RegClass.Known classId, Class witness, + RegType... specificTypes) + { + super(c, witness); + m_classId = classId; + m_specificTypes = stream(specificTypes) + .filter(RegType::isValid).toArray(RegType[]::new); + } + + @Override + public boolean canFetch(RegType pgType) + { + for ( RegType t : m_specificTypes ) + if ( t == pgType ) + return true; + return RegType.OID == pgType; + } + + public T fetch(Attribute a, int in) + { + return of(m_classId, in); + } + } + + /** + * A distinct adapter class is needed here because the parameterized + * {@code RegProcedure} type can't be indicated with a class literal + * argument to {@code Addressed}. + */ + public static class Procedure + extends OidAdapter> + { + private Procedure(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + if ( RegType.REGPROC == pgType || RegType.REGPROCEDURE == pgType ) + return true; + return RegType.OID == pgType; + } + + public RegProcedure fetch(Attribute a, int in) + { + return of(RegProcedure.CLASSID, in); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/Primitives.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/Primitives.java new file mode 100644 index 000000000..35d70fd56 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/Primitives.java @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL primitive numeric and boolean, as the corresponding Java + * primitive types. + */ +public abstract class Primitives extends Adapter.Container +{ + private Primitives() // no instances + { + } + + public static final Int8 INT8_INSTANCE; + public static final Int4 INT4_INSTANCE; + public static final Int2 INT2_INSTANCE; + /** + * The PostgreSQL type {@code "char"} (with the quotes, to distinguish it + * from the different, standard SQL type), an 8-bit signed value with no + * associated character encoding (though often used in PostgreSQL catalogs + * with ASCII letters as values). + */ + public static final Int1 INT1_INSTANCE; + public static final Float8 FLOAT8_INSTANCE; + public static final Float4 FLOAT4_INSTANCE; + public static final Boolean BOOLEAN_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( Int8.class, Via.INT64SX), + configure( Int4.class, Via.INT32SX), + configure( Int2.class, Via.SHORT), + configure( Int1.class, Via.BYTE), + configure( Float8.class, Via.DOUBLE), + configure( Float4.class, Via.FLOAT), + configure(Boolean.class, Via.BOOLEAN) + }); + + INT8_INSTANCE = new Int8(configs[0]); + INT4_INSTANCE = new Int4(configs[1]); + INT2_INSTANCE = new Int2(configs[2]); + INT1_INSTANCE = new Int1(configs[3]); + FLOAT8_INSTANCE = new Float8(configs[4]); + FLOAT4_INSTANCE = new Float4(configs[5]); + BOOLEAN_INSTANCE = new Boolean(configs[6]); + } + + /** + * Adapter for the {@code int8} type. + */ + public static class Int8 extends Adapter.AsLong.Signed + { + private Int8(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.INT8 == pgType; + } + + public long fetch(Attribute a, long in) + { + return in; + } + } + + /** + * Adapter for the {@code int4} type. + */ + public static class Int4 extends Adapter.AsInt.Signed + { + private Int4(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.INT4 == pgType; + } + + public int fetch(Attribute a, int in) + { + return in; + } + } + + /** + * Adapter for the {@code int2} type. + */ + public static class Int2 extends Adapter.AsShort.Signed + { + private Int2(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.INT2 == pgType; + } + + public short fetch(Attribute a, short in) + { + return in; + } + } + + /** + * Adapter for the {@code "char"} type. + */ + public static class Int1 extends Adapter.AsByte.Signed + { + private Int1(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.CHAR == pgType; + } + + public byte fetch(Attribute a, byte in) + { + return in; + } + } + + /** + * Adapter for the {@code float8} type. + */ + public static class Float8 extends Adapter.AsDouble + { + private Float8(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.FLOAT8 == pgType; + } + + public double fetch(Attribute a, double in) + { + return in; + } + } + + /** + * Adapter for the {@code float4} type. + */ + public static class Float4 extends Adapter.AsFloat + { + private Float4(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.FLOAT4 == pgType; + } + + public float fetch(Attribute a, float in) + { + return in; + } + } + + /** + * Adapter for the {@code boolean} type. + */ + public static class Boolean extends Adapter.AsBoolean + { + private Boolean(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.BOOL == pgType; + } + + public boolean fetch(Attribute a, boolean in) + { + return in; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/Service.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/Service.java new file mode 100644 index 000000000..02f7682a0 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/Service.java @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.lang.reflect.Type; + +import java.security.Permission; + +import java.sql.SQLException; +import java.sql.SQLDataException; + +import static java.util.Arrays.copyOf; +import static java.util.Objects.requireNonNull; + +import java.util.function.Consumer; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.Array; +import org.postgresql.pljava.Adapter.ArrayBuilder; +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsBoolean; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.Adapter.TypeWrapper; + +import org.postgresql.pljava.adt.spi.AbstractType.MultiArray; +import org.postgresql.pljava.adt.spi.AbstractType.MultiArray.Sized.Allocated; + +import org.postgresql.pljava.internal.Backend; + +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.TupleTableSlot.Indexed; + +/** + * Implementation of a service defined by {@link Adapter} for data types. + *

    + * Handles operations such as creating a properly-typed {@link ArrayAdapter} + * with dimensions and types computed from an adapter for the component type. + */ +public final class Service extends Adapter.Service +{ + @Override + protected Array + buildArrayAdapterImpl(ArrayBuilder builder, TypeWrapper w) + { + return staticBuildArrayAdapter( + builder, adapter(builder), multiArray(builder), requireNonNull(w)); + } + + @Override + protected Consumer permissionChecker() + { + return Backend.CHECKER; + } + + @Override + protected Array elementTypeAdapter() + { + return ArrayAdapter.TYPE_OBTAINING_INSTANCE; + } + + /** + * Functional interface representing the initial logic of multiarray + * creation, verifying that the dimensions match, and allocating the Java + * array using the sizes from the PostgreSQL array datum. + */ + @FunctionalInterface + private interface MultiArrayBuilder + { + Allocated + build(int nDims, int[] dimsAndBounds) throws SQLException; + } + + /** + * Instantiate an array adapter, given the builder, and the component + * adapter and the {@link MultiArray} representing the desired array shape, + * both extracted from the builder in the protected caller above. + * + * A {@link TypeWrapper} has been supplied, to be populated here with the + * computed type, and passed as the 'witness' to the appropriate + * {@code ArrayAdapter} constructor. + */ + private static Array staticBuildArrayAdapter( + ArrayBuilder builder, + Adapter componentAdapter, + MultiArray shape, + TypeWrapper w) + { + w.setWrappedType(shape.arrayType()); + + /* + * Build an 'init' lambda that closes over 'shape'. + */ + final MultiArrayBuilder init = (nDims, dimsAndBounds) -> + { + if ( shape.dimensions != nDims ) + throw new SQLDataException( + shape.dimensions + "-dimension array adapter " + + "applied to " + nDims + "-dimension value", "2202E"); + + return shape.size(copyOf(dimsAndBounds, nDims)).allocate(); + }; + + /* + * A lambda implementing the rest of the array contract (closed over + * the 'init' created above) has to be specialized to the component type + * (reference or one of the primitives) that its inner loop will have to + * contend with. That can be determined from the subclass of Adapter. + */ + if ( componentAdapter instanceof AsLong ) + { + return new ArrayAdapter( + (AsLong)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsLong adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( long[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsDouble ) + { + return new ArrayAdapter( + (AsDouble)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsDouble adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( double[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsInt ) + { + return new ArrayAdapter( + (AsInt)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsInt adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( int[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsFloat ) + { + return new ArrayAdapter( + (AsFloat)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsFloat adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( float[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsShort ) + { + return new ArrayAdapter( + (AsShort)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsShort adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( short[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsChar ) + { + return new ArrayAdapter( + (AsChar)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsChar adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( char[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsByte ) + { + return new ArrayAdapter( + (AsByte)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsByte adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( byte[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsBoolean ) + { + return new ArrayAdapter( + (AsBoolean)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsBoolean adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( boolean[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof As ) + { + @SuppressWarnings("unchecked") + As erasedComponent = (As)componentAdapter; + + return new ArrayAdapter( + erasedComponent, w, + (int nDims, int[] dimsAndBounds, As adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( Object[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + throw new AssertionError("unhandled type building array adapter"); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/TextAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/TextAdapter.java new file mode 100644 index 000000000..089657e66 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/TextAdapter.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL {@code text}, {@code varchar}, and similar types represented as + * Java {@code String}. + */ +public class TextAdapter extends Adapter.As +{ + public static final TextAdapter INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(TextAdapter.class, Via.DATUM)); + + INSTANCE = new TextAdapter(config); + } + + private TextAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + if ( RegType.TEXT == pgType || RegType.CSTRING == pgType ) + return true; + + pgType = pgType.withoutModifier(); + + return RegType.VARCHAR == pgType + || RegType.BPCHAR == pgType; + + /* [comment re: typmod copied from upstream utils/adt/varchar.c:] + * For largely historical reasons, the typmod is VARHDRSZ plus the number + * of characters; there is enough client-side code that knows about that + * that we'd better not change it. + */ + } + + public String fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return SERVER_ENCODING.decode(in, /* close */ true).toString(); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/UUIDAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/UUIDAdapter.java new file mode 100644 index 000000000..d1d47ddad --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/UUIDAdapter.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.BIG_ENDIAN; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import java.util.UUID; + +import org.postgresql.pljava.Adapter; + +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.RegNamespace.PG_CATALOG; +import org.postgresql.pljava.model.RegType; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * PostgreSQL {@code uuid} type represented + * as {@code java.util.UUID}. + */ +public class UUIDAdapter extends Adapter.As +{ + public static final UUIDAdapter INSTANCE; + + private static final Simple s_name_UUID = Simple.fromJava("uuid"); + + private static RegType s_uuidType; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(UUIDAdapter.class, Via.DATUM)); + + INSTANCE = new UUIDAdapter(config); + } + + UUIDAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + /* + * Compare by name and namespace rather than requiring RegType to have + * a static field for the UUID type; more popular ones, sure, but a line + * has to be drawn somewhere. + */ + RegType uuidType = s_uuidType; + if ( null != uuidType ) // have we matched it before and cached it? + return uuidType == pgType; + + if ( ! s_name_UUID.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + + /* + * Hang onto this matching RegType for faster future checks. + * Because RegTypes are singletons, and reference writes can't + * be torn, this isn't evil as data races go. + */ + s_uuidType = pgType; + return true; + } + + public UUID fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer(); + /* + * The storage is laid out byte by byte in the order PostgreSQL + * prints them (irrespective of architecture). Java's UUID type + * prints the MSB first. + */ + bb.order(BIG_ENDIAN); + long high64 = bb.getLong(); + long low64 = bb.getLong(); + return new UUID(high64, low64); + } + finally + { + in.unpin(); + in.close(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/XMLAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/XMLAdapter.java new file mode 100644 index 000000000..7899f00e5 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/XMLAdapter.java @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.InputStream; +import java.io.IOException; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.jdbc.SQLXMLImpl; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL {@code xml} type represented as {@code java.sql.SQLXML}. + */ +public class XMLAdapter extends Adapter.As +{ + public static final XMLAdapter INSTANCE; + public static final XMLAdapter SYNTHETIC_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure(XMLAdapter.class, Via.DATUM), + configure(Synthetic.class, Via.DATUM) + }); + + INSTANCE = new XMLAdapter(configs[0]); + SYNTHETIC_INSTANCE = new Synthetic(configs[1]); + } + + XMLAdapter(Configuration c) + { + super(c, null, null); + } + + /* + * This preserves the convention, since SQLXML came to PL/Java 1.5.1, that + * you can use the SQLXML API over text values (such as in a database built + * without the XML type, though who would do that nowadays?). + */ + @Override + public boolean canFetch(RegType pgType) + { + return RegType.XML == pgType + || RegType.TEXT == pgType; + } + + public + SQLXML fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return SQLXMLImpl.newReadable(in, a.type(), false); + } + + /** + * Adapter for use when the PostgreSQL type is not actually XML, but + * to be synthetically rendered as XML (such as {@code pg_node_tree}). + *

    + * This is, for now, a very thin wrapper over + * {@code SQLXMLImpl.newReadable}, which (so far) is still where the + * type-specific rendering logic gets chosen, but that can be refactored + * eventually. + */ + public static class Synthetic extends XMLAdapter + { + Synthetic(Configuration c) + { + super(c); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.PG_NODE_TREE == pgType; + } + + @Override + public + SQLXML fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return SQLXMLImpl.newReadable(in, a.type(), true); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/XidAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/XidAdapter.java new file mode 100644 index 000000000..204067a4d --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/XidAdapter.java @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Internal; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; +import static org.postgresql.pljava.model.RegNamespace.PG_CATALOG; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * PostgreSQL {@code cid}, {@code tid}, {@code xid}, and {@code xid8} types. + */ +public abstract class XidAdapter extends Adapter.Container +{ + private XidAdapter() // no instances + { + } + + private static final Configuration s_tid_config; + + public static final CidXid CID_INSTANCE; + public static final CidXid XID_INSTANCE; + public static final Xid8 XID8_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( CidXid.class, Via.INT32ZX), + configure( Xid8.class, Via.INT64ZX), + configure( Tid.class, Via.DATUM ) + }); + + CID_INSTANCE = new CidXid(configs[0], "cid"); + XID_INSTANCE = new CidXid(configs[0], "xid"); + XID8_INSTANCE = new Xid8(configs[1]); + + s_tid_config = configs[2]; + } + + /** + * Adapter for the {@code cid} or {@code xid} type, returned as + * a primitive {@code int}. + */ + public static class CidXid extends Adapter.AsInt.Unsigned + { + private final Simple m_typeName; + private RegType m_type; + + private CidXid(Configuration c, String typeName) + { + super(c, null); + m_typeName = Simple.fromJava(typeName); + } + + @Override + public boolean canFetch(RegType pgType) + { + RegType myType = m_type; + if ( null != myType ) + return myType == pgType; + if ( ! m_typeName.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + /* + * Reference writes are atomic and RegTypes are singletons, + * so this race isn't evil. + */ + m_type = pgType; + return true; + } + + public int fetch(Attribute a, int in) + { + return in; + } + } + + /** + * Adapter for the {@code xid8} type, returned as a primitive {@code long}. + */ + public static class Xid8 extends Adapter.AsLong.Unsigned + { + private static final Simple s_typeName = Simple.fromJava("xid8"); + private static RegType s_type; + + private Xid8(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + RegType myType = s_type; + if ( null != myType ) + return myType == pgType; + if ( ! s_typeName.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + /* + * Reference writes are atomic and RegTypes are singletons, + * so this race isn't evil. + */ + s_type = pgType; + return true; + } + + public long fetch(Attribute a, long in) + { + return in; + } + } + + /** + * Adapter for the {@code tid} type using the functional interface + * {@link Internal.Tid Internal.Tid}. + */ + public static class Tid extends Adapter.As + { + private static final Simple s_typeName = Simple.fromJava("tid"); + private static RegType s_type; + private Internal.Tid m_ctor; + + public Tid(Configuration c, Internal.Tid ctor) + { + super(ctor, null, c); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + RegType myType = s_type; + if ( null != myType ) + return myType == pgType; + if ( ! s_typeName.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + /* + * Reference writes are atomic and RegTypes are singletons, + * so this race isn't evil. + */ + s_type = pgType; + return true; + } + + public T fetch(Attribute a, Datum.Input in) + throws IOException, SQLException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer(); + /* + * The following read could be unaligned; the C code declares + * BlockIdData trickily to allow it to be short-aligned. + * Java ByteBuffers will break up unaligned accesses as needed. + */ + int blockId = bb.getInt(); + short offsetNumber = bb.getShort(); + return m_ctor.construct(blockId, offsetNumber); + } + finally + { + in.unpin(); + in.close(); + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/package-info.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/package-info.java new file mode 100644 index 000000000..02e2c7cdf --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Built-in implementations of {@link Adapter Adapter} for common PostgreSQL + * data types. + * + * @author Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import org.postgresql.pljava.Adapter; diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/package-info.java b/pljava/src/main/java/org/postgresql/pljava/pg/package-info.java new file mode 100644 index 000000000..0b6109730 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Package that provides the running-directly-in-PG-backend implementations + * for the API in {@link org.postgresql.pljava.model}. + * + * @author Chapman Flack + */ +package org.postgresql.pljava.pg;