diff --git a/compas_python_utils/cosmic_integration/ClassCOMPAS.py b/compas_python_utils/cosmic_integration/ClassCOMPAS.py index 76ac552b3..ef15200f4 100644 --- a/compas_python_utils/cosmic_integration/ClassCOMPAS.py +++ b/compas_python_utils/cosmic_integration/ClassCOMPAS.py @@ -2,7 +2,11 @@ import numpy as np import h5py as h5 import os -from . import totalMassEvolvedPerZ as MPZ +import sys +# Get the COMPAS_ROOT_DIR var, and add the cosmic_integration directory to the path +compas_root_dir = os.getenv('COMPAS_ROOT_DIR') +sys.path.append(os.path.join(compas_root_dir, 'compas_python_utils/cosmic_integration')) +import totalMassEvolvedPerZ as MPZ class COMPASData(object): @@ -31,12 +35,13 @@ def __init__( self.mass2 = None # Msun self.DCOmask = None self.allTypesMask = None - self.BBHmask = None - self.DNSmask = None + self.BHBHmask = None + self.NSNSmask = None self.BHNSmask = None + self.WDWDmask = None self.CHE_mask = None - self.CHE_BBHmask = None - self.NonCHE_BBHmask = None + self.CHE_BHBHmask = None + self.NonCHE_BHBHmask = None self.initialZ = None self.sw_weights = None self.n_systems = None @@ -63,9 +68,9 @@ def __init__( print(" and optionally self.setGridAndMassEvolved() if using a metallicity grid") def setCOMPASDCOmask( - self, types="BBH", withinHubbleTime=True, pessimistic=True, noRLOFafterCEE=True + self, types="BHBH", withinHubbleTime=True, pessimistic=True, noRLOFafterCEE=True ): - # By default, we mask for BBHs that merge within a Hubble time, assuming + # By default, we mask for BHBHs that merge within a Hubble time, assuming # the pessimistic CEE prescription (HG donors cannot survive a CEE) and # not allowing immediate RLOF post-CEE @@ -73,14 +78,14 @@ def setCOMPASDCOmask( self.get_COMPAS_variables("BSE_Double_Compact_Objects", ["Stellar_Type(1)", "Stellar_Type(2)", "Merges_Hubble_Time", "SEED"]) dco_seeds = dco_seeds.flatten() - if types == "CHE_BBH" or types == "NON_CHE_BBH": + if types == "CHE_BHBH" or types == "NON_CHE_BHBH": stellar_type_1_zams, stellar_type_2_zams, che_ms_1, che_ms_2, sys_seeds = \ self.get_COMPAS_variables("BSE_System_Parameters", ["Stellar_Type@ZAMS(1)", "Stellar_Type@ZAMS(2)", "CH_on_MS(1)", "CH_on_MS(2)", "SEED"]) che_mask = np.logical_and.reduce((stellar_type_1_zams == 16, stellar_type_2_zams == 16, che_ms_1 == True, che_ms_2 == True)) che_seeds = sys_seeds[()][che_mask] - self.CHE_mask = np.in1d(dco_seeds, che_seeds) if types == "CHE_BBH" or types == "NON_CHE_BBH" else np.repeat(False, len(dco_seeds)) + self.CHE_mask = np.in1d(dco_seeds, che_seeds) if types == "CHE_BHBH" or types == "NON_CHE_BHBH" else np.repeat(False, len(dco_seeds)) # if user wants to mask on Hubble time use the flag, otherwise just set all to True, use astype(bool) to set masks to bool type hubble_mask = hubble_flag.astype(bool) if withinHubbleTime else np.repeat(True, len(dco_seeds)) @@ -88,12 +93,18 @@ def setCOMPASDCOmask( # mask on stellar types (where 14=BH and 13=NS), BHNS can be BHNS or NSBH type_masks = { "all": np.repeat(True, len(dco_seeds)), - "BBH": np.logical_and(stellar_type_1 == 14, stellar_type_2 == 14), - "BHNS": np.logical_or(np.logical_and(stellar_type_1 == 14, stellar_type_2 == 13), np.logical_and(stellar_type_1 == 13, stellar_type_2 == 14)), - "BNS": np.logical_and(stellar_type_1 == 13, stellar_type_2 == 13), + "BHBH": np.logical_and(stellar_type_1 == 14, stellar_type_2 == 14), + "NSNS": np.logical_and(stellar_type_1 == 13, stellar_type_2 == 13), + "WDWD": np.logical_and(np.isin(stellar_type_1,[10,11,12]),np.isin(stellar_type_2,[10,11,12])), + "BHNS": np.logical_or(np.logical_and(stellar_type_1 == 13, stellar_type_2 == 14),np.logical_and(stellar_type_1 == 14, stellar_type_2 == 13)), + "NSWD": np.logical_or(np.logical_and(np.isin(stellar_type_1,[10,11,12]),stellar_type_2 == 13), + np.logical_and(np.isin(stellar_type_2,[10,11,12]),stellar_type_1 == 13)), + "WDBH": np.logical_or(np.logical_and(np.isin(stellar_type_1,[10,11,12]),stellar_type_2 == 14), + np.logical_and(np.isin(stellar_type_2,[10,11,12]),stellar_type_1 == 14)), } - type_masks["CHE_BBH"] = np.logical_and(self.CHE_mask, type_masks["BBH"]) if types == "CHE_BBH" else np.repeat(False, len(dco_seeds)) - type_masks["NON_CHE_BBH"] = np.logical_and(np.logical_not(self.CHE_mask), type_masks["BBH"]) if types == "NON_CHE_BBH" else np.repeat(True, len(dco_seeds)) + + type_masks["CHE_BHBH"] = np.logical_and(self.CHE_mask, type_masks["BHBH"]) if types == "CHE_BHBH" else np.repeat(False, len(dco_seeds)) + type_masks["NON_CHE_BHBH"] = np.logical_and(np.logical_not(self.CHE_mask), type_masks["BHBH"]) if types == "NON_CHE_BHBH" else np.repeat(True, len(dco_seeds)) # if the user wants to make RLOF or optimistic CEs if noRLOFafterCEE or pessimistic: @@ -124,11 +135,14 @@ def setCOMPASDCOmask( # create a mask for each dco type supplied self.DCOmask = type_masks[types] * hubble_mask * rlof_mask * pessimistic_mask - self.BBHmask = type_masks["BBH"] * hubble_mask * rlof_mask * pessimistic_mask + self.BHBHmask = type_masks["BHBH"] * hubble_mask * rlof_mask * pessimistic_mask + self.NSNSmask = type_masks["NSNS"] * hubble_mask * rlof_mask * pessimistic_mask + self.WDWDmask = type_masks["WDWD"] * hubble_mask * rlof_mask * pessimistic_mask self.BHNSmask = type_masks["BHNS"] * hubble_mask * rlof_mask * pessimistic_mask - self.DNSmask = type_masks["BNS"] * hubble_mask * rlof_mask * pessimistic_mask - self.CHE_BBHmask = type_masks["CHE_BBH"] * hubble_mask * rlof_mask * pessimistic_mask - self.NonCHE_BBHmask = type_masks["NON_CHE_BBH"] * hubble_mask * rlof_mask * pessimistic_mask + self.WDWDmask = type_masks["NSWD"] * hubble_mask * rlof_mask * pessimistic_mask + self.WDWDmask = type_masks["WDBH"] * hubble_mask * rlof_mask * pessimistic_mask + self.CHE_BHBHmask = type_masks["CHE_BHBH"] * hubble_mask * rlof_mask * pessimistic_mask + self.NonCHE_BHBHmask = type_masks["NON_CHE_BHBH"] * hubble_mask * rlof_mask * pessimistic_mask self.allTypesMask = type_masks["all"] * hubble_mask * rlof_mask * pessimistic_mask self.optimisticmask = pessimistic_mask @@ -158,6 +172,9 @@ def setCOMPASData(self): primary_masses, secondary_masses, formation_times, coalescence_times, dco_seeds = \ self.get_COMPAS_variables("BSE_Double_Compact_Objects", ["Mass(1)", "Mass(2)", "Time", "Coalescence_Time", "SEED"]) + # Raise an error if DCO table is empty + if len(primary_masses) == 0: + raise ValueError("BSE_Double_Compact_Objects is empty!") initial_seeds, initial_Z = self.get_COMPAS_variables("BSE_System_Parameters", ["SEED", "Metallicity@ZAMS(1)"]) @@ -173,6 +190,10 @@ def setCOMPASData(self): self.mass1 = primary_masses[self.DCOmask] self.mass2 = secondary_masses[self.DCOmask] + #Check that you have some systems of interest in your DCO table (i.e. len(primary_masses[self.DCOmask])>0 ) + if len(self.mass1) == 0: + raise ValueError("No DCOs found with the current mask. Please check your DCO table, or change your mask settings.") + # Stuff of data I dont need for integral # but I might be to laze to read in myself # and often use. Might turn it of for memory efficiency diff --git a/compas_python_utils/cosmic_integration/FastCosmicIntegration.py b/compas_python_utils/cosmic_integration/FastCosmicIntegration.py index 85c6a30d8..8da39e2c5 100644 --- a/compas_python_utils/cosmic_integration/FastCosmicIntegration.py +++ b/compas_python_utils/cosmic_integration/FastCosmicIntegration.py @@ -1,18 +1,24 @@ import numpy as np import h5py as h5 import os +import sys import time import matplotlib.pyplot as plt import scipy from scipy.interpolate import interp1d from scipy.stats import norm as NormDist -from compas_python_utils.cosmic_integration import ClassCOMPAS -from compas_python_utils.cosmic_integration import selection_effects import warnings import astropy.units as u import argparse import importlib -from compas_python_utils.cosmic_integration.cosmology import get_cosmology + +# Get the COMPAS_ROOT_DIR var, and add the cosmic_integration directory to the path +compas_root_dir = os.getenv('COMPAS_ROOT_DIR') +sys.path.append(os.path.join(compas_root_dir, 'compas_python_utils/cosmic_integration')) +import ClassCOMPAS +from cosmology import get_cosmology +import selection_effects + def calculate_redshift_related_params(max_redshift=10.0, max_redshift_detection=1.0, redshift_step=0.001, z_first_SF = 10.0, cosmology=None): """ @@ -212,7 +218,7 @@ def find_formation_and_merger_rates(n_binaries, redshifts, times, time_first_SF, merger_rate[i, :first_too_early_index - 1] = formation_rate[i, z_of_formation_index] return formation_rate, merger_rate -def compute_snr_and_detection_grids(sensitivity="O1", snr_threshold=8.0, Mc_max=300.0, Mc_step=0.1, +def compute_snr_and_detection_grids(dco_type, sensitivity="O1", snr_threshold=8.0, Mc_max=300.0, Mc_step=0.1, eta_max=0.25, eta_step=0.01, snr_max=1000.0, snr_step=0.1): """ Compute a grid of SNRs and detection probabilities for a range of masses and SNRs @@ -238,6 +244,10 @@ def compute_snr_and_detection_grids(sensitivity="O1", snr_threshold=8.0, Mc_max= snr_grid_at_1Mpc --> [2D float array] The snr of a binary with masses (Mc, eta) at a distance of 1 Mpc detection_probability_from_snr --> [list of floats] A list of detection probabilities for different SNRs """ + # If DCO type includes a WD, return empty arrays since we currently only support LVK sensitivity + if dco_type in ["WDWD", "NSWD", "WDBH"]: + warnings.warn("!! Detected rate is not computed since DCO type {} doesnt work with LVK sensitivity {}".format(dco_type, sensitivity)) + # get interpolator given sensitivity interpolator = selection_effects.SNRinterpolator(sensitivity) @@ -310,7 +320,7 @@ def find_detection_probability(Mc, eta, redshifts, distances, n_redshifts_detect return detection_probability -def find_detection_rate(path, dco_type="BBH", merger_output_filename=None, weight_column=None, +def find_detection_rate(path, dco_type="BHBH", merger_output_filename=None, weight_column=None, merges_hubble_time=True, pessimistic_CEE=True, no_RLOF_after_CEE=True, max_redshift=10.0, max_redshift_detection=1.0, redshift_step=0.001, z_first_SF = 10, use_sampled_mass_ranges=True, m1_min=5 * u.Msun, m1_max=150 * u.Msun, m2_min=0.1 * u.Msun, fbin=0.7, @@ -332,7 +342,7 @@ def find_detection_rate(path, dco_type="BBH", merger_output_filename=None, weigh == Arguments for finding and masking COMPAS file == =================================================== path --> [string] Path to the COMPAS data file that contains the output - dco_type --> [string] Which DCO type to calculate rates for: one of ["all", "BBH", "BHNS", "BNS"] + dco_type --> [string] Which DCO type to calculate rates for: one of ["all", "BHBH", "NSNS", "WDWD", "BHNS", "NSWD", "WDBH"] merger_output_filename --> [string] Optional name of output file to store merging DCOs (do not create the extra output if None) weight_column --> [string] Name of column in "DoubleCompactObjects" file that contains adaptive sampling weights (Leave this as None if you have unweighted samples) @@ -393,7 +403,7 @@ def find_detection_rate(path, dco_type="BBH", merger_output_filename=None, weigh # assert that input will not produce errors assert max_redshift_detection <= max_redshift, "Maximum detection redshift cannot be below maximum redshift" assert m1_min <= m1_max, "Minimum sampled primary mass cannot be above maximum sampled primary mass" - assert np.logical_and(fbin >= 0.0, fbin <= 1.0), "Binary fraction must be between 0 and 1" + assert fbin is None or (0.0 <= fbin <= 1.0), "Binary fraction must be between 0 and 1, or if None will vary with mass" assert Mc_step < Mc_max, "Chirp mass step size must be less than maximum chirp mass" assert eta_step < eta_max, "Symmetric mass ratio step size must be less than maximum symmetric mass ratio" assert snr_step < snr_max, "SNR step size must be less than maximum SNR" @@ -469,7 +479,7 @@ def find_detection_rate(path, dco_type="BBH", merger_output_filename=None, weigh COMPAS.delayTimes, COMPAS.sw_weights) # create lookup tables for the SNR at 1Mpc as a function of the masses and the probability of detection as a function of SNR - snr_grid_at_1Mpc, detection_probability_from_snr = compute_snr_and_detection_grids(sensitivity, snr_threshold, Mc_max, Mc_step, + snr_grid_at_1Mpc, detection_probability_from_snr = compute_snr_and_detection_grids(dco_type, sensitivity, snr_threshold, Mc_max, Mc_step, eta_max, eta_step, snr_max, snr_step) # use lookup tables to find the probability of detecting each binary at each redshift @@ -529,6 +539,8 @@ def append_rates(path, detection_rate, formation_rate, merger_rate, redshifts, C print('shape redshifts', np.shape(redshifts)) print('shape COMPAS.sw_weights', np.shape(COMPAS.sw_weights) ) print('COMPAS.DCOmask', COMPAS.DCOmask, ' was set for dco_type', dco_type) + if dco_type=='all': + print('Note that rates are calculated for ALL systems in the DCO table, this could include WDWD') print('shape COMPAS COMPAS.DCOmask', np.shape(COMPAS.DCOmask) ) ################################################# @@ -550,7 +562,6 @@ def append_rates(path, detection_rate, formation_rate, merger_rate, redshifts, C else: print(new_rate_group, 'exists, we will overrwrite the data') - ################################################# # Bin rates by redshifts ################################################# @@ -579,7 +590,7 @@ def append_rates(path, detection_rate, formation_rate, merger_rate, redshifts, C N_dco_in_z_bin = (merger_rate[:,:] * fine_shell_volumes[:]) print('fine_shell_volumes', fine_shell_volumes) - # The number of merging BBHs that need a weight + # The number of merging DCO systems that need a weight N_dco = len(merger_rate[:,0]) #################### @@ -609,7 +620,7 @@ def append_rates(path, detection_rate, formation_rate, merger_rate, redshifts, C detection_index = z_index if z_index < n_redshifts_detection else n_redshifts_detection print('You will only save data up to redshift ', maxz, ', i.e. index', z_index) - save_redshifts = redshifts + save_redshifts = redshifts[:z_index] save_merger_rate = merger_rate[:,:z_index] save_detection_rate = detection_rate[:,:detection_index] @@ -619,7 +630,8 @@ def append_rates(path, detection_rate, formation_rate, merger_rate, redshifts, C # Write the rates as a separate dataset # re-arrange your list of rate parameters DCO_to_rate_mask = COMPAS.DCOmask #save this bool for easy conversion between BSE_Double_Compact_Objects, and CI weights - rate_data_list = [DCO['SEED'][DCO_to_rate_mask], DCO_to_rate_mask , save_redshifts, save_merger_rate, merger_rate[:,0], save_detection_rate] + DCO_seeds = h_new['BSE_Double_Compact_Objects']['SEED'][DCO_to_rate_mask] # Get DCO seed + rate_data_list = [DCO_seeds, DCO_to_rate_mask , save_redshifts, save_merger_rate, merger_rate[:,0], save_detection_rate] rate_list_names = ['SEED', 'DCOmask', 'redshifts', 'merger_rate','merger_rate_z0', 'detection_rate'+sensitivity] for i, data in enumerate(rate_data_list): print('Adding rate info of shape', np.shape(data)) @@ -760,20 +772,29 @@ def plot_rates(save_dir, formation_rate, merger_rate, detection_rate, redshifts, else: plt.close() - +# To allow f_binary to be None or Float +def none_or_float(value): + return None if value.lower() == "none" else float(value) def parse_cli_args(): parser = argparse.ArgumentParser() parser.add_argument("--path", dest='path', help="Path to the COMPAS file that contains the output", type=str, default="COMPAS_Output.h5") - # For what DCO would you like the rate? options: ALL, BHBH, BHNS NSNS + + # For what DCO would you like the rate? options: ALL, BHBH, BHNS NSNS, WDWD parser.add_argument("--dco_type", dest='dco_type', - help="Which DCO type you used to calculate rates, one of: ['all', 'BBH', 'BHNS', 'BNS'] ", - type=str, default="BBH") + help="Which DCO type you used to calculate rates, one of: ['all', 'BHBH', 'NSNS', 'WDWD', 'BHNS', 'NSWD', 'WDBH'] ", + type=str, default="BHBH") parser.add_argument("--weight", dest='weight_column', help="Name of column w AIS sampling weights, i.e. 'mixture_weight'(leave as None for unweighted samples) ", type=str, default=None) - + parser.add_argument("--keep_pessimistic_CEE", dest='remove_pessimistic_CEE', + help="keep_pessimistic_CEE will set remove_pessimistic_CEE to false. The default behaviour (remove_pessimistic_CEE == True), will mask binaries that experience a CEE while on the HG", + action='store_false', default=True) + parser.add_argument("--keepRLOF_postCE", dest='remove_RLOF_after_CEE', + help="keepRLOF_postCE will set remove_RLOF_after_CEE to false. The default behaviour (remove_RLOF_after_CEE == True), will mask binaries that have immediate RLOF after a CCE", + action='store_false', default=True) + # Options for the redshift evolution and detector sensitivity parser.add_argument("--maxz", dest='max_redshift', help="Maximum redshift to use in array", type=float, default=10) parser.add_argument("--zSF", dest='z_first_SF', help="redshift of first star formation", type=float, default=10) @@ -792,7 +813,7 @@ def parse_cli_args(): default=150.) parser.add_argument("--m2min", dest='m2_min', help="Minimum secondary mass sampled by COMPAS", type=float, default=0.1) - parser.add_argument("--fbin", dest='fbin', help="Binary fraction used by COMPAS", type=float, default=0.7) + parser.add_argument("--fbin", dest='fbin', help="Binary fraction used by COMPAS, if None f_bin will be changing with mass", type=none_or_float, default=0.7) # Parameters determining dP/dZ and SFR(z), default options from Neijssel 2019 parser.add_argument("--mu0", dest='mu0', help="mean metallicity at redshhift 0", type=float, default=0.035) @@ -847,6 +868,8 @@ def main(): args.path, dco_type=args.dco_type, weight_column=args.weight_column, + pessimistic_CEE=args.remove_pessimistic_CEE, + no_RLOF_after_CEE=args.remove_RLOF_after_CEE, max_redshift=args.max_redshift, max_redshift_detection=args.max_redshift_detection, redshift_step=args.redshift_step, diff --git a/compas_python_utils/cosmic_integration/totalMassEvolvedPerZ.py b/compas_python_utils/cosmic_integration/totalMassEvolvedPerZ.py index 8d1317d9c..47e1a59b9 100644 --- a/compas_python_utils/cosmic_integration/totalMassEvolvedPerZ.py +++ b/compas_python_utils/cosmic_integration/totalMassEvolvedPerZ.py @@ -52,57 +52,71 @@ def IMF(m, m1=0.01, m2=0.08, m3=0.5, m4=200.0, a12=0.3, a23=1.3, a34=2.3): -def get_COMPAS_fraction(m1_low, m1_upp, m2_low, f_bin, mass_ratio_pdf_function=lambda q: 1, - m1=0.01, m2=0.08, m3=0.5, m4=200.0, a12=0.3, a23=1.3, a34=2.3): - """Calculate the fraction of mass in a COMPAS population relative to the total Universal population. This - can be used to normalise the rates of objects from COMPAS simulations. +def get_COMPAS_fraction(m1_low, m1_upp, m2_low, f_bin=None, + mass_ratio_pdf_function=lambda q: 1, + m1=0.01, m2=0.08, m3=0.5, m4=200.0, + a12=0.3, a23=1.3, a34=2.3): + """ + Calculate the fraction of mass in a COMPAS population relative to the total Universal population. + Can be used to normalise the rates of objects from COMPAS simulations. Parameters ---------- - m1_low : `float` - Lower limit on the sampled primary mass - m1_upp : `float` - Upper limit on the sampled primary mass - m2_low : `float` - Lower limit on the sampled secondary mass - f_bin : `float` - Binary fraction - mass_ratio_pdf_function : `function`, optional - Function to calculate the mass ratio PDF, by default a uniform mass ratio distribution - mi, aij : `float` - Settings for the IMF choice, see `IMF` for details, by default follows Kroupa (2001) - - Returns - ------- - fraction - The fraction of mass in a COMPAS population relative to the total Universal population - """ - # first, for normalisation purposes, we can find the integral with no COMPAS cuts - def full_integral(mass, m1, m2, m3, m4, a12, a23, a34): - primary_mass = IMF(mass, m1, m2, m3, m4, a12, a23, a34) * mass - - # find the expected companion mass given the mass ratio pdf function - expected_secondary_mass = quad(lambda q: q * mass_ratio_pdf_function(q), 0, 1)[0] * primary_mass - - single_stars = (1 - f_bin) * primary_mass - binary_stars = f_bin * (primary_mass + expected_secondary_mass) - return single_stars + binary_stars - full_mass = quad(full_integral, m1, m4, args=(m1, m2, m3, m4, a12, a23, a34))[0] + m1_low, m1_upp : float + Primary mass cuts in COMPAS simulation + m2_low : float + Secondary mass cutoff + f_bin : float or None + Binary fraction. If None, use a stepwise mass-dependent binary fraction. + mass_ratio_pdf_function : function + PDF of mass ratio q + mi, aij : float + IMF breakpoints and slopes + """ + binary_bin_edges = [m1, 0.08, 0.5, 1, 10, m4] - # now we do a similar integral but for the COMPAS regime - def compas_integral(mass, m2_low, f_bin, m1, m2, m3, m4, a12, a23, a34): - # define the primary mass in the same way - primary_mass = IMF(mass, m1, m2, m3, m4, a12, a23, a34) * mass - - # find the fraction that are below the m2 mass cut - f_below_m2low = quad(mass_ratio_pdf_function, 0, m2_low / mass)[0] - - # expectation value of the secondary mass given the m2 cut and mass ratio pdf function - expected_secondary_mass = quad(lambda q: q * mass_ratio_pdf_function(q), m2_low / mass, 1)[0] * primary_mass - - # return total mass of binary stars that have m2 above the cut - return f_bin * (1 - f_below_m2low) * (primary_mass + expected_secondary_mass) - compas_mass = quad(compas_integral, m1_low, m1_upp, args=(m2_low, f_bin, m1, m2, m3, m4, a12, a23, a34))[0] + def get_binary_fraction(mass): + binaryFractions = [0.1, 0.225, 0.5, 0.8, 1.0] + for i in range(len(binary_bin_edges) - 1): + if binary_bin_edges[i] <= mass < binary_bin_edges[i + 1]: + return binaryFractions[i] + return 0 # catch-all + + def IMF_mass(mass): + return IMF(mass, m1, m2, m3, m4, a12, a23, a34) * mass + + def integrand_full(mass, f_bin): + local_f_bin = get_binary_fraction(mass) if f_bin is None else f_bin + expected_q = quad(lambda q: q * mass_ratio_pdf_function(q), 0, 1)[0] + return (1 - local_f_bin + local_f_bin * (1 + expected_q)) * IMF_mass(mass) + + def integrand_compas(mass, f_bin): + local_f_bin = get_binary_fraction(mass) if f_bin is None else f_bin + q_min = m2_low / mass + if q_min >= 1: + return 0 # No valid secondaries + f_q = quad(mass_ratio_pdf_function, q_min, 1)[0] + expected_q = quad(lambda q: q * mass_ratio_pdf_function(q), q_min, 1)[0] + return local_f_bin * f_q * (1 + expected_q) * IMF_mass(mass) + + # split integral at binary fraction steps if f_bin is None (i.e. variable and like a step function) + def split_integral(func, a, b, f_bin): + total = 0 + for edge_start, edge_end in zip(binary_bin_edges[:-1], binary_bin_edges[1:]): + left = max(a, edge_start) + right = min(b, edge_end) + if left < right: + result, _ = quad(func, left, right, args=(f_bin,)) + total += result + return total + + if f_bin is None: + full_mass = split_integral(integrand_full, m1, m4, f_bin) + compas_mass = split_integral(integrand_compas, m1_low, m1_upp, f_bin) + else: + full_mass = quad(integrand_full, m1, m4, args=(f_bin,))[0] + compas_mass = quad(integrand_compas, m1_low, m1_upp, args=(f_bin,))[0] + return compas_mass / full_mass @@ -125,17 +139,17 @@ def totalMassEvolvedPerZ(path, Mlower, Mupper, m2_low, binaryFraction, mass_rati """ Calculate the total mass evolved per metallicity as a function of redshift in a COMPAS simulation. """ - # calculate the fraction of mass in the COMPAS simulation vs. the real population without sample cuts fraction = get_COMPAS_fraction(m1_low=Mlower, m1_upp=Mupper, m2_low=m2_low, f_bin=binaryFraction, mass_ratio_pdf_function=mass_ratio_pdf_function, m1=m1, m2=m2, m3=m3, m4=m4, a12=a12, a23=a23, a34=a34) multiplicationFactor = 1 / fraction + # Warning: This is slow and error prone! esp if you sample metallicities smoothly # get the mass evolved for each metallicity bin and convert to a total mass using the fraction MassEvolvedPerZ = retrieveMassEvolvedPerZ(path) - totalMassEvolvedPerMetallicity = MassEvolvedPerZ / fraction + totalMassEvolvedPerMetallicity = MassEvolvedPerZ / fraction return multiplicationFactor, totalMassEvolvedPerMetallicity @@ -147,7 +161,10 @@ def star_forming_mass_per_binary( """ Calculate the total mass of stars formed per binary star formed within the COMPAS simulation. """ - multiplicationFactor, _ = totalMassEvolvedPerZ(**locals()) + fraction = get_COMPAS_fraction(m1_low=Mlower,m1_upp=Mupper,m2_low=m2_low, + f_bin=binaryFraction,mass_ratio_pdf_function=mass_ratio_pdf_function, + m1=m1, m2=m2, m3=m3, m4=m4, + a12=a12, a23=a23, a34=a34) # get the total mass in COMPAS and number of binaries with h5.File(path, 'r') as f: @@ -157,7 +174,7 @@ def star_forming_mass_per_binary( n_binaries = len(m1s) total_star_forming_mass_in_COMPAS = sum(m1s) + sum(m2s) - total_star_forming_mass = total_star_forming_mass_in_COMPAS * multiplicationFactor + total_star_forming_mass = total_star_forming_mass_in_COMPAS / fraction return total_star_forming_mass / n_binaries @@ -191,7 +208,13 @@ def draw_samples_from_kroupa_imf( return m1_samples[mask] , m2_samples[mask] -def analytical_star_forming_mass_per_binary_using_kroupa_imf( + + + +################################################## +# Old version of analytical calculation +################################################## +def old_analytical_star_forming_mass_per_binary_using_kroupa_imf( m1_min, m1_max, m2_min, fbin=1., imf_mass_bounds=[0.01,0.08,0.5,200] ): """ @@ -215,4 +238,93 @@ def analytical_star_forming_mass_per_binary_using_kroupa_imf( fint = -alpha / 1.3 * (m1_max ** (-1.3) - m1_min ** (-1.3)) + alpha * m2_min / 2.3 * (m1_max ** (-2.3) - m1_min ** (-2.3)) # mass represented by each binary simulated by COMPAS m_rep = (1/fint) * m_avg * (1.5 + (1-fbin)/fbin) - return m_rep \ No newline at end of file + return m_rep + + +################################################### +def analytical_star_forming_mass_per_binary_using_kroupa_imf( + m1_min, m1_max, m2_min, fbin=1., imf_mass_bounds=[0.01,0.08,0.5,200] +): + """ + Analytical computation of the mass of stars formed per binary star formed within the + [m1 min, m1 max] and [m2 min, ..] rage, using the Kroupa IMF: + + p(M) \propto M^-0.3 for M between m1 and m2 + p(M) \propto M^-1.3 for M between m2 and m3; + p(M) = alpha * M^-2.3 for M between m3 and m4; + + m1_min, m1_max are the min and max sampled primary masses + m2_min is the min sampled secondary mass + + This function further assumes a flat mass ratio distribution with qmin = m2_min/m1, and m2_max = m1_max + Lieke base on Ilya Mandel's derivation + """ + # Kroupa IMF + m1, m2, m3, m4 = imf_mass_bounds + continuity_constants = [1./(m2*m3), 1./(m3), 1.0] + IMF_powers = [-0.3, -1.3, -2.3] + + if m1_min < m3: + raise ValueError(f"This analytical derivation requires IMF break m3 < m1_min ({m3} !< {m1_min})") + if m1_min > m1_max: + raise ValueError(f"Minimum sampled primary mass cannot be above maximum sampled primary mass: m1_min ({m1_min} !< m1_max {m1_max})") + if m1_max > m4: + raise ValueError(f"Maximum sampled primary mass cannot be above maximum mass of Kroupa IMF: m1_max ({m1_max} !< m4 {m4})") + + # normalize IMF over the complete mass range: + alpha = (-(m4**(-1.3)-m3**(-1.3))/1.3 - (m3**(-0.3)-m2**(-0.3))/(m3*0.3) + (m2**0.7-m1**0.7)/(m2*m3*0.7))**(-1) + + # we want to compute M_stellar_sys_in_universe / N_binaries_in_COMPAS + # = N_binaries_in_universe/N_binaries_in_COMPAS * N_stellar_sys_in_universe/N_binaries_in_universe * M_stellar_sys_in_universe/N_stellar_sys_in_universe + # = 1/fint * 1/fbin * average mass of a stellar system in the Universe + + # fint = N_binaries_in_COMPAS/N_binaries_in_universe: fraction of binaries that COMPAS simulates + fint = -alpha / 1.3 * (m1_max ** (-1.3) - m1_min ** (-1.3)) + alpha * m2_min / 2.3 * (m1_max ** (-2.3) - m1_min ** (-2.3)) + + # Next for N_stellar_sys_in_universe/N_binaries_in_universe * M_stellar_sys_in_universe/N_stellar_sys_in_universe + # N_stellar_sys_in_universe/N_binaries_in_universe = the binary fraction + # fbin edges and values are chosen to approximately follow Figure 1 from Offner et al. (2023) + binary_bin_edges = [m1, 0.08, 0.5, 1, 10, m4] + if fbin == None: + # use a binary fraction that varies with mass + binaryFractions = [0.1, 0.225, 0.5, 0.8, 1.0] + else: + # otherwise use a constant binary fraction + binaryFractions = [fbin] * 5 + + # M_stellar_sys_in_universe/N_stellar_sys_in_universe = average mass of a stellar system in the Universe, + # we are computing 1/fbin * M_stellar_sys_in_universe/N_stellar_sys_in_universe, skipping steps this leads to: + # int_A^B (1/fb(m1) + 0.5) m1 P(m1) dm1. + # This is a double piecewise integral, i.e. pieces over the binary fraction bins and IMF mass bins. + piece_wise_integral = 0 + + # For every binary fraction bin + for i in range(len(binary_bin_edges) - 1): + fbin = binaryFractions[i] # Binary fraction for this range + + # And every piece of the Kroupa IMF + for j in range(len(imf_mass_bounds) - 1): + exponent = IMF_powers[j] # IMF exponent for these masses + + # Check if the binary fraction bin overlaps with the IMF mass bin + if binary_bin_edges[i + 1] <= imf_mass_bounds[j] or binary_bin_edges[i] >= imf_mass_bounds[j + 1]: + continue # No overlap + + # Integrate from the most narrow range + m_start = max(binary_bin_edges[i], imf_mass_bounds[j]) + m_end = min(binary_bin_edges[i + 1], imf_mass_bounds[j + 1]) + + # Compute the definite integral: + integral = ( m_end**(exponent + 2) - m_start**(exponent + 2) ) / (exponent + 2) * continuity_constants[j] + + # Compute the sum term + sum_term = (1 /fbin + 0.5) * integral + piece_wise_integral += sum_term + + # combining them: + Average_mass_stellar_sys_per_fbin = alpha * piece_wise_integral + + # Now compute the average mass per binary in COMPAS M_stellar_sys_in_universe / N_binaries_in_COMPAS + M_sf_Univ_per_N_binary_COMPAS = (1/fint) * Average_mass_stellar_sys_per_fbin + + return M_sf_Univ_per_N_binary_COMPAS \ No newline at end of file diff --git a/py_tests/conftest.py b/py_tests/conftest.py index 53053e2ed..956f97f78 100644 --- a/py_tests/conftest.py +++ b/py_tests/conftest.py @@ -7,6 +7,10 @@ from compas_python_utils.cosmic_integration.binned_cosmic_integrator.bbh_population import \ generate_mock_bbh_population_file +# Testvalues used in test_total_mass_evolved_per_z defined in py_tests/test_values.py +from py_tests.test_values import MAKE_PLOTS, M1_MIN, M1_MAX, M2_MIN, F_BIN + + HERE = os.path.dirname(__file__) TEST_CONFIG_DIR = os.path.join(HERE, "test_data") TEST_BASH = os.path.join(TEST_CONFIG_DIR, "run.sh") @@ -60,5 +64,8 @@ def fake_compas_output(tmpdir) -> str: fname = f"{tmpdir}/COMPAS_mock_output.h5" generate_mock_bbh_population_file( filename=fname, + m1_min=M1_MIN, + m1_max=M1_MAX, + m2_min=M2_MIN ) return fname \ No newline at end of file diff --git a/py_tests/test_total_mass_evolved_per_z.py b/py_tests/test_total_mass_evolved_per_z.py index 8f77ec4ed..c27fc831b 100644 --- a/py_tests/test_total_mass_evolved_per_z.py +++ b/py_tests/test_total_mass_evolved_per_z.py @@ -5,16 +5,16 @@ from compas_python_utils.cosmic_integration.binned_cosmic_integrator.bbh_population import \ generate_mock_bbh_population_file import numpy as np + +from py_tests.conftest import test_archive_dir, fake_compas_output + import matplotlib.pyplot as plt import h5py as h5 import pytest -MAKE_PLOTS = False - -M1_MIN = 5 -M1_MAX = 150 -M2_MIN = 0.1 +# Testvalues defined in py_tests/test_values.py +from py_tests.test_values import MAKE_PLOTS, M1_MIN, M1_MAX, M2_MIN, F_BIN def test_imf(test_archive_dir): @@ -65,36 +65,42 @@ def test_analytical_vs_numerical_star_forming_mass_per_binary(fake_compas_output m1_max = M1_MAX m1_min = M1_MIN m2_min = M2_MIN - fbin = 1 + fbin = F_BIN - numerical = star_forming_mass_per_binary(fake_compas_output, m1_min, m1_max, m2_min, fbin) analytical = analytical_star_forming_mass_per_binary_using_kroupa_imf(m1_min, m1_max, m2_min, fbin) - + numerical = star_forming_mass_per_binary(fake_compas_output, m1_min, m1_max, m2_min, fbin) + assert numerical > 0 assert analytical > 0 assert np.isclose(numerical, analytical, rtol=1) if MAKE_PLOTS: fig = plot_star_forming_mass_per_binary_comparison(tmpdir, analytical, m1_min, m1_max, m2_min, fbin) - fig.savefig(f"{test_archive_dir}/analytical_vs_numerical.png") + fig.savefig(f"{test_archive_dir}/analytical_vs_numerical_var.png") def plot_star_forming_mass_per_binary_comparison( tmpdir, analytical, m1_min, m1_max, m2_min, fbin, nreps=5, nsamps=5 ): - plt.axhline(analytical, color='tab:blue', label="analytical", ls='--') + # Analytical values + plt.axhline(analytical, color='tab:blue', label=f"analytical fbin = {fbin}", ls='--') + + # Compute numerical values n_samps = np.geomspace(1e3, 5e4, nsamps) numerical_vals = [] for _ in range(nreps): vals = np.zeros(len(n_samps)) for i, n in enumerate(n_samps): fname = f"{tmpdir}/test_{i}.h5" - generate_mock_bbh_population_file(tmpdir, n_systems=int(n)) + + generate_mock_bbh_population_file(filename=fname, n_systems=int(n), + m1_min=m1_min, m1_max=m1_max, m2_min=m2_min) + # generate_mock_bbh_population_file(fname, n_systems=int(n)) vals[i] = (star_forming_mass_per_binary(fname, m1_min, m1_max, m2_min, fbin)) numerical_vals.append(vals) - # plot the upper and lower bounds of the numerical values + # plot the upper and lower bounds of the numerical values numerical_vals = np.array(numerical_vals) lower = np.percentile(numerical_vals, 5, axis=0) upper = np.percentile(numerical_vals, 95, axis=0) @@ -104,8 +110,9 @@ def plot_star_forming_mass_per_binary_comparison( ) plt.plot(n_samps, np.median(numerical_vals, axis=0), color='tab:orange', label="numerical") plt.xscale("log") - plt.ylabel("Star forming mass per binary [M$_{\odot}$]") + plt.ylabel(r"Star forming mass per binary [M$_{\odot}$]") plt.xlabel("Number of samples") + plt.ylim(bottom=10) plt.xlim(min(n_samps), max(n_samps)) plt.legend() return plt.gcf() diff --git a/py_tests/test_values.py b/py_tests/test_values.py new file mode 100644 index 000000000..f38ef0c28 --- /dev/null +++ b/py_tests/test_values.py @@ -0,0 +1,6 @@ +# Testvalues used in test_total_mass_evolved_per_z.py +MAKE_PLOTS = True +M1_MIN = 5 +M1_MAX = 150 +M2_MIN = 0.1 +F_BIN = None # None = variable f_bin, otherwise fixed value \ No newline at end of file