lilit.likelihood
1import pickle 2import os 3import time 4 5import matplotlib.pyplot as plt 6import numpy as np 7from cobaya.likelihood import Likelihood 8 9from .functions import ( 10 get_keys, 11 get_Gauss_keys, 12 cov_filling, 13 sigma, 14 inv_sigma, 15 CAMBres2dict, 16) 17 18 19class LiLit(Likelihood): 20 21 """Class defining the Likelihood for LiteBIRD (LiLit). 22 23 Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing. 24 25 Parameters: 26 name (str): 27 The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None). 28 fields (list): 29 List of fields in the data file (default: None). 30 lmax (int or list): 31 Maximum multipole to consider (default: None). 32 like (str, optional): 33 Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian". 34 lmin (int or list): 35 Minimum multipole to consider (default: 2). 36 cl_file (str, optional): 37 Path to Cl file (default: None). 38 nl_file (str, optional): 39 Path to noise file (default: None). 40 experiment (str, optional): 41 Name of experiment (default: None). 42 nside (int, optional): 43 Nside of the map (default: None). 44 r (float, optional): 45 Tensor-to-scalar ratio (default: None). 46 nt (float, optional): 47 Tensor spectral tilt (default: None). 48 pivot_t (float, optional): 49 Pivot scale of the tensor primordial power spectrum (default: 0.01). 50 fsky (float or list): 51 Sky fraction (default: 1). 52 sep (str, optional): 53 Separator used in the data file (default: ""). 54 debug (bool, optional): 55 If True, produces more verbose output (default: None). 56 57 58 Attributes: 59 fields (list): 60 List of fields in the data file. 61 n_fields (int): 62 Number of fields. 63 keys (list): 64 List of keywords for the dictionaries. 65 gauss_keys (list): 66 List of keywords for the Gaussian likelihood (4-points). 67 sigma2 (np.ndarray): 68 Array of covariances for the Gaussian likelihood case. 69 lmax (int or list): 70 List of lmax values. 71 lmaxes (dict): 72 Dictionary of lmax values. 73 fsky (int or list): 74 List of fsky values. 75 fskies (dict): 76 Dictionary of fsky values. 77 lmin (int or list): 78 Minimum multipole to consider. 79 lmins (dict): 80 Dictionary of lmin values. 81 like (str): 82 Type of likelihood to use. 83 cl_file (str): 84 Path to Cl file. 85 fiduCLS (dict): 86 Dictionary of fiducial Cls. 87 noiseCLS (dict): 88 Dictionary of noise Cls. 89 fiduCOV (np.ndarray): 90 Fiducial covariance matrix obtained from the corresponding dictionary. 91 noiseCOV (np.ndarray): 92 Noise covariance matrix obtained from the corresponding dictionary. 93 data (np.ndarray): 94 Data vector obtained by summing fiduCOV + noiseCOV. 95 cobaCLS (dict): 96 Dictionary of Cobaya Cls. 97 cobaCOV (np.ndarray): 98 Cobaya covariance matrix obtained from the corresponding dictionary. 99 coba (np.ndarray): 100 Cobaya vector obtained by summing cobaCOV + noiseCOV. 101 nl_file (str): 102 Path to noise file. 103 experiment (str): 104 Name of experiment. 105 nside (int): 106 Nside of the map. 107 r (float): 108 Tensor-to-scalar ratio. 109 nt (float): 110 Tensor spectral tilt. 111 pivot_t (float): 112 Pivot scale of the tensor primordial power spectrum. 113 sep (str): 114 Separator used in the data file. 115 debug (bool): 116 If True, produces more output. 117 """ 118 119 def __init__( 120 self, 121 name=None, 122 fields=None, 123 lmax=None, 124 like="exact", 125 lmin=2, 126 cl_file=None, 127 nl_file=None, 128 experiment=None, 129 nside=None, 130 r=None, 131 nt=None, 132 pivot_t=0.01, 133 fsky=1, 134 sep="", 135 debug=None, 136 ): 137 # Check that the user has provided the name of the likelihood 138 assert ( 139 name is not None 140 ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')" 141 # Check that the user has provided the fields 142 assert ( 143 fields is not None 144 ), "You must provide the fields (e.g. 'b' or ['t', 'e'])" 145 # Check that the user has provided the maximum multipole 146 assert lmax is not None, "You must provide the lmax (e.g. 300)" 147 148 self.fields = fields 149 self.n = len(fields) 150 self.lmin = lmin 151 self.like = like 152 self.sep = sep 153 self.cl_file = cl_file 154 self.nl_file = nl_file 155 self.experiment = experiment 156 if self.experiment is not None: 157 # Check that the user has provided the nside if an experiment is used 158 assert nside is not None, "You must provide an nside to compute the noise" 159 self.nside = nside 160 self.debug = debug 161 self.keys = get_keys(fields=self.fields, debug=self.debug) 162 if "bb" in self.keys: 163 # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used 164 assert ( 165 r is not None 166 ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)" 167 self.r = r 168 self.nt = nt 169 self.pivot_t = pivot_t 170 171 self.set_lmin_lmax_fsky(lmin, lmax, fsky) 172 173 Likelihood.__init__(self, name=name) 174 175 def set_lmin_lmax_fsky(self, lmin, lmax, fsky): 176 """Take lmin, lmax and fsky parameters and set the corresponding attributes. 177 178 Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped. 179 180 Parameters: 181 lmin (int or list): 182 Value or list of values of lmin. 183 lmax (int or list): 184 Value or list of values of lmax. 185 fsky (float or list): 186 Value or list of values of fsky. 187 """ 188 189 self.lmins = {} 190 self.lmaxs = {} 191 self.fskies = {} 192 193 if isinstance(lmin, list): 194 assert ( 195 len(lmin) == self.n 196 ), "If you provide multiple lmin, they must match the number of requested fields with the same order" 197 for i in range(self.n): 198 for j in range(i, self.n): 199 key = self.fields[i] + self.sep + self.fields[j] 200 self.lmins[key] = int( 201 np.ceil(np.sqrt(lmin[i] * lmin[j])) 202 ) # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good. 203 self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j]))) 204 self.lmin = min(lmin) 205 else: 206 self.lmin = lmin 207 208 if isinstance(lmax, list): 209 assert ( 210 len(lmax) == self.n 211 ), "If you provide multiple lmax, they must match the number of requested fields with the same order" 212 for i in range(self.n): 213 for j in range(i, self.n): 214 key = self.fields[i] + self.sep + self.fields[j] 215 self.lmaxs[key] = int( 216 np.floor(np.sqrt(lmax[i] * lmax[j])) 217 ) # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good. 218 self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j]))) 219 self.lmax = max(lmax) 220 else: 221 self.lmax = lmax 222 223 if isinstance(fsky, list): 224 assert ( 225 len(fsky) == self.n 226 ), "If you provide multiple fsky, they must match the number of requested fields with the same order" 227 for i in range(self.n): 228 for j in range(i, self.n): 229 key = self.fields[i] + self.sep + self.fields[j] 230 self.fskies[key] = np.sqrt( 231 fsky[i] * fsky[j] 232 ) # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations) 233 self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j]) 234 self.fsky = None 235 else: 236 self.fsky = fsky 237 return 238 239 def get_reduced_data(self, mat): 240 """Find the reduced data eliminating the singularity of the matrix. 241 242 Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices. 243 244 Parameters: 245 ndarray (np.ndarray): 246 A ndarray containing the covariance matrices, with some singular ones. 247 """ 248 idx = np.where(np.diag(mat) == 0)[0] 249 return np.delete(np.delete(mat, idx, axis=0), idx, axis=1) 250 251 def prod_fidu(self): 252 """Produce fiducial spectra or read the input ones. 253 254 If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that. 255 """ 256 257 if self.cl_file is not None: 258 if not self.cl_file.endswith(".pkl"): 259 print( 260 "The file provided is not a pickle file. You should provide a pickle file containing a dictionary with keys such as 'tt', 'ee', 'te', 'bb' and 'tb'." 261 ) 262 raise TypeError 263 with open(self.cl_file, "rb") as pickle_file: 264 return pickle.load(pickle_file) 265 try: 266 import camb 267 except ImportError: 268 print("CAMB seems to be not installed. Check the requirements.") 269 270 path = os.path.dirname(os.path.abspath(__file__)) 271 planck_path = os.path.join(path, "planck_2018.ini") 272 pars = camb.read_ini(planck_path) 273 274 if "bb" in self.keys: # If we want to include the tensor mode 275 print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}") 276 pars.InitPower.set_params( 277 As=2.100549e-9, 278 ns=0.9660499, 279 r=self.r, 280 nt=self.nt, 281 pivot_tensor=self.pivot_t, 282 pivot_scalar=0.05, 283 parameterization=2, 284 ) 285 pars.WantTensors = True 286 pars.Accuracy.AccurateBB = True 287 pars.DoLensing = True 288 289 if self.debug: 290 print(pars) 291 292 results = camb.get_results(pars) 293 res = results.get_cmb_power_spectra( 294 CMB_unit="muK", 295 lmax=self.lmax, 296 raw_cl=False, 297 ) 298 return CAMBres2dict(res, self.keys) 299 300 def prod_noise(self): 301 """Produce noise power spectra or read the input ones. 302 303 If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that. 304 """ 305 if self.nl_file is not None: 306 if not self.nl_file.endswith(".pkl"): 307 print( 308 "The file provided for the noise is not a pickle file. You should provide a pickle file containing a dictionary with keys such as 'tt', 'ee', 'te', 'bb' and 'tb'." 309 ) 310 raise TypeError 311 with open(self.nl_file, "rb") as pickle_file: 312 return pickle.load(pickle_file) 313 314 print( 315 "***WARNING***: the inverse noise weighting performed here severely underestimates \ 316 the actual noise level of LiteBIRD. You should provide an input \ 317 noise power spectrum with a more realistic noise." 318 ) 319 320 try: 321 import yaml 322 from yaml.loader import SafeLoader 323 import healpy as hp 324 except ImportError: 325 print("YAML or Healpy seems to be not installed. Check the requirements.") 326 327 assert ( 328 self.experiment is not None 329 ), "You must specify the experiment you want to consider" 330 print(f"\nComputing noise for {self.experiment}") 331 332 path = os.path.dirname(os.path.abspath(__file__)) 333 experiments_path = os.path.join(path, "experiments.yaml") 334 with open(experiments_path) as f: 335 data = yaml.load(f, Loader=SafeLoader) 336 337 instrument = data[self.experiment] 338 339 fwhms = np.array(instrument["fwhm"]) 340 341 freqs = np.array(instrument["frequency"]) 342 343 depth_p = np.array(instrument["depth_p"]) 344 depth_i = np.array(instrument["depth_i"]) 345 346 # Convert to microK-rad 347 depth_p /= hp.nside2resol(self.nside, arcmin=True) 348 depth_i /= hp.nside2resol(self.nside, arcmin=True) 349 depth_p *= np.sqrt(hp.nside2pixarea(self.nside, degrees=False)) 350 depth_i *= np.sqrt(hp.nside2pixarea(self.nside, degrees=False)) 351 352 n_freq = len(freqs) 353 354 ell = np.arange(0, self.lmax + 1, 1) 355 356 keys = ["tt", "ee", "bb"] 357 358 sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0)) 359 sigma2 = sigma**2 360 361 g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis]) 362 363 pol_factor = np.array( 364 [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2], 365 ) 366 367 pol_factor = np.exp(pol_factor) 368 369 # Calculate the Gaussian beam function for each polarization 370 G = [] 371 for i, arr in enumerate(pol_factor): 372 G.append(g * arr[:, np.newaxis]) 373 g = np.array(G) 374 375 res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys} 376 377 res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2) 378 res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2) 379 res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2) 380 381 res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi 382 res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi 383 res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi 384 385 res["tt"][:2] = [0, 0] 386 res["ee"][:2] = [0, 0] 387 res["bb"][:2] = [0, 0] 388 389 return res 390 391 def initialize(self): 392 """Initializes the fiducial spectra and the noise power spectra.""" 393 self.fiduCLS = self.prod_fidu() 394 self.noiseCLS = self.prod_noise() 395 396 self.fiduCOV = cov_filling( 397 self.fields, self.lmin, self.lmax, self.fiduCLS, self.lmins, self.lmaxs 398 ) 399 self.noiseCOV = cov_filling( 400 self.fields, self.lmin, self.lmax, self.noiseCLS, self.lmins, self.lmaxs 401 ) 402 403 if self.debug: 404 print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}") 405 print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}") 406 407 print("\nPrinting the first few values to check that it starts from 0...") 408 field = list(self.fiduCLS.keys())[1] 409 print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}") 410 field = list(self.noiseCLS.keys())[1] 411 print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}") 412 413 self.data = ( 414 self.fiduCOV[:, :, self.lmin : self.lmax + 1] 415 + self.noiseCOV[:, :, self.lmin : self.lmax + 1] 416 ) 417 418 if self.like == "gaussian": 419 self.gauss_keys = get_Gauss_keys(n=self.n, keys=self.keys, debug=self.debug) 420 sigma2 = sigma( 421 self.n, 422 self.lmin, 423 self.lmax, 424 self.gauss_keys, 425 self.fiduCLS, 426 self.noiseCLS, 427 self.fsky, 428 self.fskies, 429 ) 430 self.sigma2 = inv_sigma(self.lmin, self.lmax, sigma2) 431 432 def get_requirements(self): 433 """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing.""" 434 requitements = {} 435 requitements["Cl"] = {cl: self.lmax for cl in self.keys} 436 if self.debug: 437 requitements["CAMBdata"] = None 438 print( 439 f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}", 440 ) 441 return requitements 442 443 def data_vector(self, cov): 444 """Get data vector from the covariance matrix. 445 446 Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax. 447 448 Parameters: 449 cov (np.ndarray): 450 A ndarray containing the covariance matrices, with some null ones. 451 """ 452 453 upper_triang = cov[np.triu_indices(self.n)] 454 # Get indices of null diagonal elements 455 idx = np.where(upper_triang == 0)[0] 456 # Remove corresponding rows and columns 457 upper_triang = np.delete(upper_triang, idx, axis=0) 458 459 return upper_triang, idx 460 461 def chi_exact(self, i=0): 462 """Computes proper chi-square term for the exact likelihood case. 463 464 Parameters: 465 i (int, optional): 466 ell index if needed. Defaults to 0. 467 """ 468 if self.n != 1: 469 ell = np.arange(0, self.lmax + 1, 1) 470 coba = self.coba[:, :, i] 471 data = self.data[:, :, i] 472 if ( 473 np.linalg.det(coba) == 0 474 ): # If the determinant is null, we need to reduce the covariance matrix 475 data = self.get_reduced_data(data) 476 coba = self.get_reduced_data(coba) 477 M = np.linalg.solve(coba, data) 478 return (2 * ell[i] + 1) * ( 479 np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0] 480 ) 481 else: 482 ell = np.arange(2, self.lmax + 1, 1) 483 M = self.data / self.coba 484 return (2 * ell + 1) * (M - np.log(np.abs(M)) - 1) 485 486 def chi_gaussian(self, i=0): 487 """Computes proper chi-square term for the Gaussian likelihood case. 488 489 Parameters: 490 i (int, optional): 491 ell index if needed. Defaults to 0. 492 """ 493 if self.n != 1: 494 coba, idx = self.data_vector(self.coba[:, :, i]) 495 data, _ = self.data_vector(self.data[:, :, i]) 496 COV = np.delete(self.sigma2[:, :, i], idx, axis=0) 497 COV = np.delete(COV, idx, axis=1) 498 return (coba - data) @ COV @ (coba - data) 499 else: 500 coba = self.coba[0, 0, :] 501 data = self.data[0, 0, :] 502 res = (coba - data) * self.sigma2 * (coba - data) 503 return res 504 505 def compute_chi_part(self, i=0): 506 """Chooses which chi-square term to compute. 507 508 Parameters: 509 i (int, optional): 510 ell index if needed. Defaults to 0. 511 """ 512 if self.like == "exact": 513 return self.chi_exact(i) 514 elif self.like == "gaussian": 515 return self.chi_gaussian(i) 516 else: 517 print("You requested something different from 'exact or 'gaussian'!") 518 return 519 520 def log_likelihood(self): 521 """Computes the log likelihood.""" 522 # Get the array of multipoles 523 ell = np.arange(self.lmin, self.lmax + 1, 1) 524 # Compute the log likelihood for each multipole 525 if self.n != 1: 526 logp_ℓ = np.zeros(ell.shape) 527 for i in range(0, self.lmax + 1 - self.lmin): 528 logp_ℓ[i] = -0.5 * self.compute_chi_part(i) 529 else: 530 logp_ℓ = -0.5 * self.compute_chi_part() 531 # Sum the log likelihood over multipoles 532 return np.sum(logp_ℓ) 533 534 def logp(self, **params_values): 535 """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process.""" 536 if self.debug: 537 CAMBdata = self.provider.get_CAMBdata() 538 pars = CAMBdata.Params 539 print(pars) 540 541 # Get the Cls from Cobaya 542 self.cobaCLS = self.provider.get_Cl(ell_factor=True) 543 ell = np.arange(0, self.lmax + 1, 1) 544 for key, value in self.cobaCLS.items(): 545 if key == "pp": 546 value[2 : self.lmax + 1] = ( 547 value[2 : self.lmax + 1] / (ell * (ell + 1))[2:] 548 ) 549 elif "p" in key: 550 value[2 : self.lmax + 1] = ( 551 value[2 : self.lmax + 1] / np.sqrt(ell * (ell + 1))[2:] 552 ) 553 self.cobaCLS[key] = value[: self.lmax + 1] 554 555 if self.debug: 556 print(f"Keys of Cobaya CLs ---> {self.cobaCLS.keys()}") 557 558 field = list(self.cobaCLS.keys())[1] 559 print("\nPrinting the first few values to check that it starts from 0...") 560 print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLS[field][0:5]}") 561 562 # Fill the covariance matrix with the Cls from Cobaya 563 self.cobaCOV = cov_filling( 564 self.fields, self.lmin, self.lmax, self.cobaCLS, self.lmins, self.lmaxs 565 ) 566 567 if self.debug: 568 ell = np.arange(0, self.lmax + 1, 1) 569 obs1 = 0 570 obs2 = 0 571 plt.plot(ell, self.fiduCOV[obs1, obs2, :], label="Fiducial CLs") 572 plt.plot(ell, self.cobaCOV[obs1, obs2, :], label="Cobaya CLs", ls="--") 573 plt.plot(ell, self.noiseCOV[obs1, obs2, :], label="Noise CLs") 574 plt.loglog() 575 plt.xlim(2, None) 576 plt.legend() 577 plt.show() 578 579 # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya 580 self.coba = ( 581 self.cobaCOV[:, :, self.lmin : self.lmax + 1] 582 + self.noiseCOV[:, :, self.lmin : self.lmax + 1] 583 ) 584 585 # Compute the likelihood 586 logp = self.log_likelihood() 587 588 if self.debug: 589 print(f"Log-posterior --> {logp}") 590 exit() 591 592 return logp 593 594 595__all__ = ["LiLit"] 596 597__docformat__ = "google" 598__pdoc__ = {} 599__pdoc__[ 600 "Likelihood" 601] = "Likelihood class from Cobaya, refer to Cobaya documentation for more information."
20class LiLit(Likelihood): 21 22 """Class defining the Likelihood for LiteBIRD (LiLit). 23 24 Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing. 25 26 Parameters: 27 name (str): 28 The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None). 29 fields (list): 30 List of fields in the data file (default: None). 31 lmax (int or list): 32 Maximum multipole to consider (default: None). 33 like (str, optional): 34 Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian". 35 lmin (int or list): 36 Minimum multipole to consider (default: 2). 37 cl_file (str, optional): 38 Path to Cl file (default: None). 39 nl_file (str, optional): 40 Path to noise file (default: None). 41 experiment (str, optional): 42 Name of experiment (default: None). 43 nside (int, optional): 44 Nside of the map (default: None). 45 r (float, optional): 46 Tensor-to-scalar ratio (default: None). 47 nt (float, optional): 48 Tensor spectral tilt (default: None). 49 pivot_t (float, optional): 50 Pivot scale of the tensor primordial power spectrum (default: 0.01). 51 fsky (float or list): 52 Sky fraction (default: 1). 53 sep (str, optional): 54 Separator used in the data file (default: ""). 55 debug (bool, optional): 56 If True, produces more verbose output (default: None). 57 58 59 Attributes: 60 fields (list): 61 List of fields in the data file. 62 n_fields (int): 63 Number of fields. 64 keys (list): 65 List of keywords for the dictionaries. 66 gauss_keys (list): 67 List of keywords for the Gaussian likelihood (4-points). 68 sigma2 (np.ndarray): 69 Array of covariances for the Gaussian likelihood case. 70 lmax (int or list): 71 List of lmax values. 72 lmaxes (dict): 73 Dictionary of lmax values. 74 fsky (int or list): 75 List of fsky values. 76 fskies (dict): 77 Dictionary of fsky values. 78 lmin (int or list): 79 Minimum multipole to consider. 80 lmins (dict): 81 Dictionary of lmin values. 82 like (str): 83 Type of likelihood to use. 84 cl_file (str): 85 Path to Cl file. 86 fiduCLS (dict): 87 Dictionary of fiducial Cls. 88 noiseCLS (dict): 89 Dictionary of noise Cls. 90 fiduCOV (np.ndarray): 91 Fiducial covariance matrix obtained from the corresponding dictionary. 92 noiseCOV (np.ndarray): 93 Noise covariance matrix obtained from the corresponding dictionary. 94 data (np.ndarray): 95 Data vector obtained by summing fiduCOV + noiseCOV. 96 cobaCLS (dict): 97 Dictionary of Cobaya Cls. 98 cobaCOV (np.ndarray): 99 Cobaya covariance matrix obtained from the corresponding dictionary. 100 coba (np.ndarray): 101 Cobaya vector obtained by summing cobaCOV + noiseCOV. 102 nl_file (str): 103 Path to noise file. 104 experiment (str): 105 Name of experiment. 106 nside (int): 107 Nside of the map. 108 r (float): 109 Tensor-to-scalar ratio. 110 nt (float): 111 Tensor spectral tilt. 112 pivot_t (float): 113 Pivot scale of the tensor primordial power spectrum. 114 sep (str): 115 Separator used in the data file. 116 debug (bool): 117 If True, produces more output. 118 """ 119 120 def __init__( 121 self, 122 name=None, 123 fields=None, 124 lmax=None, 125 like="exact", 126 lmin=2, 127 cl_file=None, 128 nl_file=None, 129 experiment=None, 130 nside=None, 131 r=None, 132 nt=None, 133 pivot_t=0.01, 134 fsky=1, 135 sep="", 136 debug=None, 137 ): 138 # Check that the user has provided the name of the likelihood 139 assert ( 140 name is not None 141 ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')" 142 # Check that the user has provided the fields 143 assert ( 144 fields is not None 145 ), "You must provide the fields (e.g. 'b' or ['t', 'e'])" 146 # Check that the user has provided the maximum multipole 147 assert lmax is not None, "You must provide the lmax (e.g. 300)" 148 149 self.fields = fields 150 self.n = len(fields) 151 self.lmin = lmin 152 self.like = like 153 self.sep = sep 154 self.cl_file = cl_file 155 self.nl_file = nl_file 156 self.experiment = experiment 157 if self.experiment is not None: 158 # Check that the user has provided the nside if an experiment is used 159 assert nside is not None, "You must provide an nside to compute the noise" 160 self.nside = nside 161 self.debug = debug 162 self.keys = get_keys(fields=self.fields, debug=self.debug) 163 if "bb" in self.keys: 164 # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used 165 assert ( 166 r is not None 167 ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)" 168 self.r = r 169 self.nt = nt 170 self.pivot_t = pivot_t 171 172 self.set_lmin_lmax_fsky(lmin, lmax, fsky) 173 174 Likelihood.__init__(self, name=name) 175 176 def set_lmin_lmax_fsky(self, lmin, lmax, fsky): 177 """Take lmin, lmax and fsky parameters and set the corresponding attributes. 178 179 Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped. 180 181 Parameters: 182 lmin (int or list): 183 Value or list of values of lmin. 184 lmax (int or list): 185 Value or list of values of lmax. 186 fsky (float or list): 187 Value or list of values of fsky. 188 """ 189 190 self.lmins = {} 191 self.lmaxs = {} 192 self.fskies = {} 193 194 if isinstance(lmin, list): 195 assert ( 196 len(lmin) == self.n 197 ), "If you provide multiple lmin, they must match the number of requested fields with the same order" 198 for i in range(self.n): 199 for j in range(i, self.n): 200 key = self.fields[i] + self.sep + self.fields[j] 201 self.lmins[key] = int( 202 np.ceil(np.sqrt(lmin[i] * lmin[j])) 203 ) # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good. 204 self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j]))) 205 self.lmin = min(lmin) 206 else: 207 self.lmin = lmin 208 209 if isinstance(lmax, list): 210 assert ( 211 len(lmax) == self.n 212 ), "If you provide multiple lmax, they must match the number of requested fields with the same order" 213 for i in range(self.n): 214 for j in range(i, self.n): 215 key = self.fields[i] + self.sep + self.fields[j] 216 self.lmaxs[key] = int( 217 np.floor(np.sqrt(lmax[i] * lmax[j])) 218 ) # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good. 219 self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j]))) 220 self.lmax = max(lmax) 221 else: 222 self.lmax = lmax 223 224 if isinstance(fsky, list): 225 assert ( 226 len(fsky) == self.n 227 ), "If you provide multiple fsky, they must match the number of requested fields with the same order" 228 for i in range(self.n): 229 for j in range(i, self.n): 230 key = self.fields[i] + self.sep + self.fields[j] 231 self.fskies[key] = np.sqrt( 232 fsky[i] * fsky[j] 233 ) # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations) 234 self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j]) 235 self.fsky = None 236 else: 237 self.fsky = fsky 238 return 239 240 def get_reduced_data(self, mat): 241 """Find the reduced data eliminating the singularity of the matrix. 242 243 Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices. 244 245 Parameters: 246 ndarray (np.ndarray): 247 A ndarray containing the covariance matrices, with some singular ones. 248 """ 249 idx = np.where(np.diag(mat) == 0)[0] 250 return np.delete(np.delete(mat, idx, axis=0), idx, axis=1) 251 252 def prod_fidu(self): 253 """Produce fiducial spectra or read the input ones. 254 255 If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that. 256 """ 257 258 if self.cl_file is not None: 259 if not self.cl_file.endswith(".pkl"): 260 print( 261 "The file provided is not a pickle file. You should provide a pickle file containing a dictionary with keys such as 'tt', 'ee', 'te', 'bb' and 'tb'." 262 ) 263 raise TypeError 264 with open(self.cl_file, "rb") as pickle_file: 265 return pickle.load(pickle_file) 266 try: 267 import camb 268 except ImportError: 269 print("CAMB seems to be not installed. Check the requirements.") 270 271 path = os.path.dirname(os.path.abspath(__file__)) 272 planck_path = os.path.join(path, "planck_2018.ini") 273 pars = camb.read_ini(planck_path) 274 275 if "bb" in self.keys: # If we want to include the tensor mode 276 print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}") 277 pars.InitPower.set_params( 278 As=2.100549e-9, 279 ns=0.9660499, 280 r=self.r, 281 nt=self.nt, 282 pivot_tensor=self.pivot_t, 283 pivot_scalar=0.05, 284 parameterization=2, 285 ) 286 pars.WantTensors = True 287 pars.Accuracy.AccurateBB = True 288 pars.DoLensing = True 289 290 if self.debug: 291 print(pars) 292 293 results = camb.get_results(pars) 294 res = results.get_cmb_power_spectra( 295 CMB_unit="muK", 296 lmax=self.lmax, 297 raw_cl=False, 298 ) 299 return CAMBres2dict(res, self.keys) 300 301 def prod_noise(self): 302 """Produce noise power spectra or read the input ones. 303 304 If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that. 305 """ 306 if self.nl_file is not None: 307 if not self.nl_file.endswith(".pkl"): 308 print( 309 "The file provided for the noise is not a pickle file. You should provide a pickle file containing a dictionary with keys such as 'tt', 'ee', 'te', 'bb' and 'tb'." 310 ) 311 raise TypeError 312 with open(self.nl_file, "rb") as pickle_file: 313 return pickle.load(pickle_file) 314 315 print( 316 "***WARNING***: the inverse noise weighting performed here severely underestimates \ 317 the actual noise level of LiteBIRD. You should provide an input \ 318 noise power spectrum with a more realistic noise." 319 ) 320 321 try: 322 import yaml 323 from yaml.loader import SafeLoader 324 import healpy as hp 325 except ImportError: 326 print("YAML or Healpy seems to be not installed. Check the requirements.") 327 328 assert ( 329 self.experiment is not None 330 ), "You must specify the experiment you want to consider" 331 print(f"\nComputing noise for {self.experiment}") 332 333 path = os.path.dirname(os.path.abspath(__file__)) 334 experiments_path = os.path.join(path, "experiments.yaml") 335 with open(experiments_path) as f: 336 data = yaml.load(f, Loader=SafeLoader) 337 338 instrument = data[self.experiment] 339 340 fwhms = np.array(instrument["fwhm"]) 341 342 freqs = np.array(instrument["frequency"]) 343 344 depth_p = np.array(instrument["depth_p"]) 345 depth_i = np.array(instrument["depth_i"]) 346 347 # Convert to microK-rad 348 depth_p /= hp.nside2resol(self.nside, arcmin=True) 349 depth_i /= hp.nside2resol(self.nside, arcmin=True) 350 depth_p *= np.sqrt(hp.nside2pixarea(self.nside, degrees=False)) 351 depth_i *= np.sqrt(hp.nside2pixarea(self.nside, degrees=False)) 352 353 n_freq = len(freqs) 354 355 ell = np.arange(0, self.lmax + 1, 1) 356 357 keys = ["tt", "ee", "bb"] 358 359 sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0)) 360 sigma2 = sigma**2 361 362 g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis]) 363 364 pol_factor = np.array( 365 [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2], 366 ) 367 368 pol_factor = np.exp(pol_factor) 369 370 # Calculate the Gaussian beam function for each polarization 371 G = [] 372 for i, arr in enumerate(pol_factor): 373 G.append(g * arr[:, np.newaxis]) 374 g = np.array(G) 375 376 res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys} 377 378 res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2) 379 res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2) 380 res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2) 381 382 res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi 383 res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi 384 res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi 385 386 res["tt"][:2] = [0, 0] 387 res["ee"][:2] = [0, 0] 388 res["bb"][:2] = [0, 0] 389 390 return res 391 392 def initialize(self): 393 """Initializes the fiducial spectra and the noise power spectra.""" 394 self.fiduCLS = self.prod_fidu() 395 self.noiseCLS = self.prod_noise() 396 397 self.fiduCOV = cov_filling( 398 self.fields, self.lmin, self.lmax, self.fiduCLS, self.lmins, self.lmaxs 399 ) 400 self.noiseCOV = cov_filling( 401 self.fields, self.lmin, self.lmax, self.noiseCLS, self.lmins, self.lmaxs 402 ) 403 404 if self.debug: 405 print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}") 406 print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}") 407 408 print("\nPrinting the first few values to check that it starts from 0...") 409 field = list(self.fiduCLS.keys())[1] 410 print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}") 411 field = list(self.noiseCLS.keys())[1] 412 print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}") 413 414 self.data = ( 415 self.fiduCOV[:, :, self.lmin : self.lmax + 1] 416 + self.noiseCOV[:, :, self.lmin : self.lmax + 1] 417 ) 418 419 if self.like == "gaussian": 420 self.gauss_keys = get_Gauss_keys(n=self.n, keys=self.keys, debug=self.debug) 421 sigma2 = sigma( 422 self.n, 423 self.lmin, 424 self.lmax, 425 self.gauss_keys, 426 self.fiduCLS, 427 self.noiseCLS, 428 self.fsky, 429 self.fskies, 430 ) 431 self.sigma2 = inv_sigma(self.lmin, self.lmax, sigma2) 432 433 def get_requirements(self): 434 """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing.""" 435 requitements = {} 436 requitements["Cl"] = {cl: self.lmax for cl in self.keys} 437 if self.debug: 438 requitements["CAMBdata"] = None 439 print( 440 f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}", 441 ) 442 return requitements 443 444 def data_vector(self, cov): 445 """Get data vector from the covariance matrix. 446 447 Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax. 448 449 Parameters: 450 cov (np.ndarray): 451 A ndarray containing the covariance matrices, with some null ones. 452 """ 453 454 upper_triang = cov[np.triu_indices(self.n)] 455 # Get indices of null diagonal elements 456 idx = np.where(upper_triang == 0)[0] 457 # Remove corresponding rows and columns 458 upper_triang = np.delete(upper_triang, idx, axis=0) 459 460 return upper_triang, idx 461 462 def chi_exact(self, i=0): 463 """Computes proper chi-square term for the exact likelihood case. 464 465 Parameters: 466 i (int, optional): 467 ell index if needed. Defaults to 0. 468 """ 469 if self.n != 1: 470 ell = np.arange(0, self.lmax + 1, 1) 471 coba = self.coba[:, :, i] 472 data = self.data[:, :, i] 473 if ( 474 np.linalg.det(coba) == 0 475 ): # If the determinant is null, we need to reduce the covariance matrix 476 data = self.get_reduced_data(data) 477 coba = self.get_reduced_data(coba) 478 M = np.linalg.solve(coba, data) 479 return (2 * ell[i] + 1) * ( 480 np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0] 481 ) 482 else: 483 ell = np.arange(2, self.lmax + 1, 1) 484 M = self.data / self.coba 485 return (2 * ell + 1) * (M - np.log(np.abs(M)) - 1) 486 487 def chi_gaussian(self, i=0): 488 """Computes proper chi-square term for the Gaussian likelihood case. 489 490 Parameters: 491 i (int, optional): 492 ell index if needed. Defaults to 0. 493 """ 494 if self.n != 1: 495 coba, idx = self.data_vector(self.coba[:, :, i]) 496 data, _ = self.data_vector(self.data[:, :, i]) 497 COV = np.delete(self.sigma2[:, :, i], idx, axis=0) 498 COV = np.delete(COV, idx, axis=1) 499 return (coba - data) @ COV @ (coba - data) 500 else: 501 coba = self.coba[0, 0, :] 502 data = self.data[0, 0, :] 503 res = (coba - data) * self.sigma2 * (coba - data) 504 return res 505 506 def compute_chi_part(self, i=0): 507 """Chooses which chi-square term to compute. 508 509 Parameters: 510 i (int, optional): 511 ell index if needed. Defaults to 0. 512 """ 513 if self.like == "exact": 514 return self.chi_exact(i) 515 elif self.like == "gaussian": 516 return self.chi_gaussian(i) 517 else: 518 print("You requested something different from 'exact or 'gaussian'!") 519 return 520 521 def log_likelihood(self): 522 """Computes the log likelihood.""" 523 # Get the array of multipoles 524 ell = np.arange(self.lmin, self.lmax + 1, 1) 525 # Compute the log likelihood for each multipole 526 if self.n != 1: 527 logp_ℓ = np.zeros(ell.shape) 528 for i in range(0, self.lmax + 1 - self.lmin): 529 logp_ℓ[i] = -0.5 * self.compute_chi_part(i) 530 else: 531 logp_ℓ = -0.5 * self.compute_chi_part() 532 # Sum the log likelihood over multipoles 533 return np.sum(logp_ℓ) 534 535 def logp(self, **params_values): 536 """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process.""" 537 if self.debug: 538 CAMBdata = self.provider.get_CAMBdata() 539 pars = CAMBdata.Params 540 print(pars) 541 542 # Get the Cls from Cobaya 543 self.cobaCLS = self.provider.get_Cl(ell_factor=True) 544 ell = np.arange(0, self.lmax + 1, 1) 545 for key, value in self.cobaCLS.items(): 546 if key == "pp": 547 value[2 : self.lmax + 1] = ( 548 value[2 : self.lmax + 1] / (ell * (ell + 1))[2:] 549 ) 550 elif "p" in key: 551 value[2 : self.lmax + 1] = ( 552 value[2 : self.lmax + 1] / np.sqrt(ell * (ell + 1))[2:] 553 ) 554 self.cobaCLS[key] = value[: self.lmax + 1] 555 556 if self.debug: 557 print(f"Keys of Cobaya CLs ---> {self.cobaCLS.keys()}") 558 559 field = list(self.cobaCLS.keys())[1] 560 print("\nPrinting the first few values to check that it starts from 0...") 561 print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLS[field][0:5]}") 562 563 # Fill the covariance matrix with the Cls from Cobaya 564 self.cobaCOV = cov_filling( 565 self.fields, self.lmin, self.lmax, self.cobaCLS, self.lmins, self.lmaxs 566 ) 567 568 if self.debug: 569 ell = np.arange(0, self.lmax + 1, 1) 570 obs1 = 0 571 obs2 = 0 572 plt.plot(ell, self.fiduCOV[obs1, obs2, :], label="Fiducial CLs") 573 plt.plot(ell, self.cobaCOV[obs1, obs2, :], label="Cobaya CLs", ls="--") 574 plt.plot(ell, self.noiseCOV[obs1, obs2, :], label="Noise CLs") 575 plt.loglog() 576 plt.xlim(2, None) 577 plt.legend() 578 plt.show() 579 580 # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya 581 self.coba = ( 582 self.cobaCOV[:, :, self.lmin : self.lmax + 1] 583 + self.noiseCOV[:, :, self.lmin : self.lmax + 1] 584 ) 585 586 # Compute the likelihood 587 logp = self.log_likelihood() 588 589 if self.debug: 590 print(f"Log-posterior --> {logp}") 591 exit() 592 593 return logp
Class defining the Likelihood for LiteBIRD (LiLit).
Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing.
Arguments:
- name (str): The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None).
- fields (list): List of fields in the data file (default: None).
- lmax (int or list): Maximum multipole to consider (default: None).
- like (str, optional): Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian".
- lmin (int or list): Minimum multipole to consider (default: 2).
- cl_file (str, optional): Path to Cl file (default: None).
- nl_file (str, optional): Path to noise file (default: None).
- experiment (str, optional): Name of experiment (default: None).
- nside (int, optional): Nside of the map (default: None).
- r (float, optional): Tensor-to-scalar ratio (default: None).
- nt (float, optional): Tensor spectral tilt (default: None).
- pivot_t (float, optional): Pivot scale of the tensor primordial power spectrum (default: 0.01).
- fsky (float or list): Sky fraction (default: 1).
- sep (str, optional): Separator used in the data file (default: "").
- debug (bool, optional): If True, produces more verbose output (default: None).
Attributes:
- fields (list): List of fields in the data file.
- n_fields (int): Number of fields.
- keys (list): List of keywords for the dictionaries.
- gauss_keys (list): List of keywords for the Gaussian likelihood (4-points).
- sigma2 (np.ndarray): Array of covariances for the Gaussian likelihood case.
- lmax (int or list): List of lmax values.
- lmaxes (dict): Dictionary of lmax values.
- fsky (int or list): List of fsky values.
- fskies (dict): Dictionary of fsky values.
- lmin (int or list): Minimum multipole to consider.
- lmins (dict): Dictionary of lmin values.
- like (str): Type of likelihood to use.
- cl_file (str): Path to Cl file.
- fiduCLS (dict): Dictionary of fiducial Cls.
- noiseCLS (dict): Dictionary of noise Cls.
- fiduCOV (np.ndarray): Fiducial covariance matrix obtained from the corresponding dictionary.
- noiseCOV (np.ndarray): Noise covariance matrix obtained from the corresponding dictionary.
- data (np.ndarray): Data vector obtained by summing fiduCOV + noiseCOV.
- cobaCLS (dict): Dictionary of Cobaya Cls.
- cobaCOV (np.ndarray): Cobaya covariance matrix obtained from the corresponding dictionary.
- coba (np.ndarray): Cobaya vector obtained by summing cobaCOV + noiseCOV.
- nl_file (str): Path to noise file.
- experiment (str): Name of experiment.
- nside (int): Nside of the map.
- r (float): Tensor-to-scalar ratio.
- nt (float): Tensor spectral tilt.
- pivot_t (float): Pivot scale of the tensor primordial power spectrum.
- sep (str): Separator used in the data file.
- debug (bool): If True, produces more output.
120 def __init__( 121 self, 122 name=None, 123 fields=None, 124 lmax=None, 125 like="exact", 126 lmin=2, 127 cl_file=None, 128 nl_file=None, 129 experiment=None, 130 nside=None, 131 r=None, 132 nt=None, 133 pivot_t=0.01, 134 fsky=1, 135 sep="", 136 debug=None, 137 ): 138 # Check that the user has provided the name of the likelihood 139 assert ( 140 name is not None 141 ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')" 142 # Check that the user has provided the fields 143 assert ( 144 fields is not None 145 ), "You must provide the fields (e.g. 'b' or ['t', 'e'])" 146 # Check that the user has provided the maximum multipole 147 assert lmax is not None, "You must provide the lmax (e.g. 300)" 148 149 self.fields = fields 150 self.n = len(fields) 151 self.lmin = lmin 152 self.like = like 153 self.sep = sep 154 self.cl_file = cl_file 155 self.nl_file = nl_file 156 self.experiment = experiment 157 if self.experiment is not None: 158 # Check that the user has provided the nside if an experiment is used 159 assert nside is not None, "You must provide an nside to compute the noise" 160 self.nside = nside 161 self.debug = debug 162 self.keys = get_keys(fields=self.fields, debug=self.debug) 163 if "bb" in self.keys: 164 # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used 165 assert ( 166 r is not None 167 ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)" 168 self.r = r 169 self.nt = nt 170 self.pivot_t = pivot_t 171 172 self.set_lmin_lmax_fsky(lmin, lmax, fsky) 173 174 Likelihood.__init__(self, name=name)
176 def set_lmin_lmax_fsky(self, lmin, lmax, fsky): 177 """Take lmin, lmax and fsky parameters and set the corresponding attributes. 178 179 Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped. 180 181 Parameters: 182 lmin (int or list): 183 Value or list of values of lmin. 184 lmax (int or list): 185 Value or list of values of lmax. 186 fsky (float or list): 187 Value or list of values of fsky. 188 """ 189 190 self.lmins = {} 191 self.lmaxs = {} 192 self.fskies = {} 193 194 if isinstance(lmin, list): 195 assert ( 196 len(lmin) == self.n 197 ), "If you provide multiple lmin, they must match the number of requested fields with the same order" 198 for i in range(self.n): 199 for j in range(i, self.n): 200 key = self.fields[i] + self.sep + self.fields[j] 201 self.lmins[key] = int( 202 np.ceil(np.sqrt(lmin[i] * lmin[j])) 203 ) # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good. 204 self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j]))) 205 self.lmin = min(lmin) 206 else: 207 self.lmin = lmin 208 209 if isinstance(lmax, list): 210 assert ( 211 len(lmax) == self.n 212 ), "If you provide multiple lmax, they must match the number of requested fields with the same order" 213 for i in range(self.n): 214 for j in range(i, self.n): 215 key = self.fields[i] + self.sep + self.fields[j] 216 self.lmaxs[key] = int( 217 np.floor(np.sqrt(lmax[i] * lmax[j])) 218 ) # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good. 219 self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j]))) 220 self.lmax = max(lmax) 221 else: 222 self.lmax = lmax 223 224 if isinstance(fsky, list): 225 assert ( 226 len(fsky) == self.n 227 ), "If you provide multiple fsky, they must match the number of requested fields with the same order" 228 for i in range(self.n): 229 for j in range(i, self.n): 230 key = self.fields[i] + self.sep + self.fields[j] 231 self.fskies[key] = np.sqrt( 232 fsky[i] * fsky[j] 233 ) # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations) 234 self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j]) 235 self.fsky = None 236 else: 237 self.fsky = fsky 238 return
Take lmin, lmax and fsky parameters and set the corresponding attributes.
Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.
Arguments:
- lmin (int or list): Value or list of values of lmin.
- lmax (int or list): Value or list of values of lmax.
- fsky (float or list): Value or list of values of fsky.
240 def get_reduced_data(self, mat): 241 """Find the reduced data eliminating the singularity of the matrix. 242 243 Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices. 244 245 Parameters: 246 ndarray (np.ndarray): 247 A ndarray containing the covariance matrices, with some singular ones. 248 """ 249 idx = np.where(np.diag(mat) == 0)[0] 250 return np.delete(np.delete(mat, idx, axis=0), idx, axis=1)
Find the reduced data eliminating the singularity of the matrix.
Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.
Arguments:
- ndarray (np.ndarray): A ndarray containing the covariance matrices, with some singular ones.
252 def prod_fidu(self): 253 """Produce fiducial spectra or read the input ones. 254 255 If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that. 256 """ 257 258 if self.cl_file is not None: 259 if not self.cl_file.endswith(".pkl"): 260 print( 261 "The file provided is not a pickle file. You should provide a pickle file containing a dictionary with keys such as 'tt', 'ee', 'te', 'bb' and 'tb'." 262 ) 263 raise TypeError 264 with open(self.cl_file, "rb") as pickle_file: 265 return pickle.load(pickle_file) 266 try: 267 import camb 268 except ImportError: 269 print("CAMB seems to be not installed. Check the requirements.") 270 271 path = os.path.dirname(os.path.abspath(__file__)) 272 planck_path = os.path.join(path, "planck_2018.ini") 273 pars = camb.read_ini(planck_path) 274 275 if "bb" in self.keys: # If we want to include the tensor mode 276 print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}") 277 pars.InitPower.set_params( 278 As=2.100549e-9, 279 ns=0.9660499, 280 r=self.r, 281 nt=self.nt, 282 pivot_tensor=self.pivot_t, 283 pivot_scalar=0.05, 284 parameterization=2, 285 ) 286 pars.WantTensors = True 287 pars.Accuracy.AccurateBB = True 288 pars.DoLensing = True 289 290 if self.debug: 291 print(pars) 292 293 results = camb.get_results(pars) 294 res = results.get_cmb_power_spectra( 295 CMB_unit="muK", 296 lmax=self.lmax, 297 raw_cl=False, 298 ) 299 return CAMBres2dict(res, self.keys)
Produce fiducial spectra or read the input ones.
If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.
301 def prod_noise(self): 302 """Produce noise power spectra or read the input ones. 303 304 If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that. 305 """ 306 if self.nl_file is not None: 307 if not self.nl_file.endswith(".pkl"): 308 print( 309 "The file provided for the noise is not a pickle file. You should provide a pickle file containing a dictionary with keys such as 'tt', 'ee', 'te', 'bb' and 'tb'." 310 ) 311 raise TypeError 312 with open(self.nl_file, "rb") as pickle_file: 313 return pickle.load(pickle_file) 314 315 print( 316 "***WARNING***: the inverse noise weighting performed here severely underestimates \ 317 the actual noise level of LiteBIRD. You should provide an input \ 318 noise power spectrum with a more realistic noise." 319 ) 320 321 try: 322 import yaml 323 from yaml.loader import SafeLoader 324 import healpy as hp 325 except ImportError: 326 print("YAML or Healpy seems to be not installed. Check the requirements.") 327 328 assert ( 329 self.experiment is not None 330 ), "You must specify the experiment you want to consider" 331 print(f"\nComputing noise for {self.experiment}") 332 333 path = os.path.dirname(os.path.abspath(__file__)) 334 experiments_path = os.path.join(path, "experiments.yaml") 335 with open(experiments_path) as f: 336 data = yaml.load(f, Loader=SafeLoader) 337 338 instrument = data[self.experiment] 339 340 fwhms = np.array(instrument["fwhm"]) 341 342 freqs = np.array(instrument["frequency"]) 343 344 depth_p = np.array(instrument["depth_p"]) 345 depth_i = np.array(instrument["depth_i"]) 346 347 # Convert to microK-rad 348 depth_p /= hp.nside2resol(self.nside, arcmin=True) 349 depth_i /= hp.nside2resol(self.nside, arcmin=True) 350 depth_p *= np.sqrt(hp.nside2pixarea(self.nside, degrees=False)) 351 depth_i *= np.sqrt(hp.nside2pixarea(self.nside, degrees=False)) 352 353 n_freq = len(freqs) 354 355 ell = np.arange(0, self.lmax + 1, 1) 356 357 keys = ["tt", "ee", "bb"] 358 359 sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0)) 360 sigma2 = sigma**2 361 362 g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis]) 363 364 pol_factor = np.array( 365 [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2], 366 ) 367 368 pol_factor = np.exp(pol_factor) 369 370 # Calculate the Gaussian beam function for each polarization 371 G = [] 372 for i, arr in enumerate(pol_factor): 373 G.append(g * arr[:, np.newaxis]) 374 g = np.array(G) 375 376 res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys} 377 378 res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2) 379 res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2) 380 res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2) 381 382 res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi 383 res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi 384 res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi 385 386 res["tt"][:2] = [0, 0] 387 res["ee"][:2] = [0, 0] 388 res["bb"][:2] = [0, 0] 389 390 return res
Produce noise power spectra or read the input ones.
If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.
392 def initialize(self): 393 """Initializes the fiducial spectra and the noise power spectra.""" 394 self.fiduCLS = self.prod_fidu() 395 self.noiseCLS = self.prod_noise() 396 397 self.fiduCOV = cov_filling( 398 self.fields, self.lmin, self.lmax, self.fiduCLS, self.lmins, self.lmaxs 399 ) 400 self.noiseCOV = cov_filling( 401 self.fields, self.lmin, self.lmax, self.noiseCLS, self.lmins, self.lmaxs 402 ) 403 404 if self.debug: 405 print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}") 406 print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}") 407 408 print("\nPrinting the first few values to check that it starts from 0...") 409 field = list(self.fiduCLS.keys())[1] 410 print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}") 411 field = list(self.noiseCLS.keys())[1] 412 print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}") 413 414 self.data = ( 415 self.fiduCOV[:, :, self.lmin : self.lmax + 1] 416 + self.noiseCOV[:, :, self.lmin : self.lmax + 1] 417 ) 418 419 if self.like == "gaussian": 420 self.gauss_keys = get_Gauss_keys(n=self.n, keys=self.keys, debug=self.debug) 421 sigma2 = sigma( 422 self.n, 423 self.lmin, 424 self.lmax, 425 self.gauss_keys, 426 self.fiduCLS, 427 self.noiseCLS, 428 self.fsky, 429 self.fskies, 430 ) 431 self.sigma2 = inv_sigma(self.lmin, self.lmax, sigma2)
Initializes the fiducial spectra and the noise power spectra.
433 def get_requirements(self): 434 """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing.""" 435 requitements = {} 436 requitements["Cl"] = {cl: self.lmax for cl in self.keys} 437 if self.debug: 438 requitements["CAMBdata"] = None 439 print( 440 f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}", 441 ) 442 return requitements
Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing.
444 def data_vector(self, cov): 445 """Get data vector from the covariance matrix. 446 447 Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax. 448 449 Parameters: 450 cov (np.ndarray): 451 A ndarray containing the covariance matrices, with some null ones. 452 """ 453 454 upper_triang = cov[np.triu_indices(self.n)] 455 # Get indices of null diagonal elements 456 idx = np.where(upper_triang == 0)[0] 457 # Remove corresponding rows and columns 458 upper_triang = np.delete(upper_triang, idx, axis=0) 459 460 return upper_triang, idx
Get data vector from the covariance matrix.
Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.
Arguments:
- cov (np.ndarray): A ndarray containing the covariance matrices, with some null ones.
462 def chi_exact(self, i=0): 463 """Computes proper chi-square term for the exact likelihood case. 464 465 Parameters: 466 i (int, optional): 467 ell index if needed. Defaults to 0. 468 """ 469 if self.n != 1: 470 ell = np.arange(0, self.lmax + 1, 1) 471 coba = self.coba[:, :, i] 472 data = self.data[:, :, i] 473 if ( 474 np.linalg.det(coba) == 0 475 ): # If the determinant is null, we need to reduce the covariance matrix 476 data = self.get_reduced_data(data) 477 coba = self.get_reduced_data(coba) 478 M = np.linalg.solve(coba, data) 479 return (2 * ell[i] + 1) * ( 480 np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0] 481 ) 482 else: 483 ell = np.arange(2, self.lmax + 1, 1) 484 M = self.data / self.coba 485 return (2 * ell + 1) * (M - np.log(np.abs(M)) - 1)
Computes proper chi-square term for the exact likelihood case.
Arguments:
- i (int, optional): ell index if needed. Defaults to 0.
487 def chi_gaussian(self, i=0): 488 """Computes proper chi-square term for the Gaussian likelihood case. 489 490 Parameters: 491 i (int, optional): 492 ell index if needed. Defaults to 0. 493 """ 494 if self.n != 1: 495 coba, idx = self.data_vector(self.coba[:, :, i]) 496 data, _ = self.data_vector(self.data[:, :, i]) 497 COV = np.delete(self.sigma2[:, :, i], idx, axis=0) 498 COV = np.delete(COV, idx, axis=1) 499 return (coba - data) @ COV @ (coba - data) 500 else: 501 coba = self.coba[0, 0, :] 502 data = self.data[0, 0, :] 503 res = (coba - data) * self.sigma2 * (coba - data) 504 return res
Computes proper chi-square term for the Gaussian likelihood case.
Arguments:
- i (int, optional): ell index if needed. Defaults to 0.
506 def compute_chi_part(self, i=0): 507 """Chooses which chi-square term to compute. 508 509 Parameters: 510 i (int, optional): 511 ell index if needed. Defaults to 0. 512 """ 513 if self.like == "exact": 514 return self.chi_exact(i) 515 elif self.like == "gaussian": 516 return self.chi_gaussian(i) 517 else: 518 print("You requested something different from 'exact or 'gaussian'!") 519 return
Chooses which chi-square term to compute.
Arguments:
- i (int, optional): ell index if needed. Defaults to 0.
521 def log_likelihood(self): 522 """Computes the log likelihood.""" 523 # Get the array of multipoles 524 ell = np.arange(self.lmin, self.lmax + 1, 1) 525 # Compute the log likelihood for each multipole 526 if self.n != 1: 527 logp_ℓ = np.zeros(ell.shape) 528 for i in range(0, self.lmax + 1 - self.lmin): 529 logp_ℓ[i] = -0.5 * self.compute_chi_part(i) 530 else: 531 logp_ℓ = -0.5 * self.compute_chi_part() 532 # Sum the log likelihood over multipoles 533 return np.sum(logp_ℓ)
Computes the log likelihood.
535 def logp(self, **params_values): 536 """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process.""" 537 if self.debug: 538 CAMBdata = self.provider.get_CAMBdata() 539 pars = CAMBdata.Params 540 print(pars) 541 542 # Get the Cls from Cobaya 543 self.cobaCLS = self.provider.get_Cl(ell_factor=True) 544 ell = np.arange(0, self.lmax + 1, 1) 545 for key, value in self.cobaCLS.items(): 546 if key == "pp": 547 value[2 : self.lmax + 1] = ( 548 value[2 : self.lmax + 1] / (ell * (ell + 1))[2:] 549 ) 550 elif "p" in key: 551 value[2 : self.lmax + 1] = ( 552 value[2 : self.lmax + 1] / np.sqrt(ell * (ell + 1))[2:] 553 ) 554 self.cobaCLS[key] = value[: self.lmax + 1] 555 556 if self.debug: 557 print(f"Keys of Cobaya CLs ---> {self.cobaCLS.keys()}") 558 559 field = list(self.cobaCLS.keys())[1] 560 print("\nPrinting the first few values to check that it starts from 0...") 561 print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLS[field][0:5]}") 562 563 # Fill the covariance matrix with the Cls from Cobaya 564 self.cobaCOV = cov_filling( 565 self.fields, self.lmin, self.lmax, self.cobaCLS, self.lmins, self.lmaxs 566 ) 567 568 if self.debug: 569 ell = np.arange(0, self.lmax + 1, 1) 570 obs1 = 0 571 obs2 = 0 572 plt.plot(ell, self.fiduCOV[obs1, obs2, :], label="Fiducial CLs") 573 plt.plot(ell, self.cobaCOV[obs1, obs2, :], label="Cobaya CLs", ls="--") 574 plt.plot(ell, self.noiseCOV[obs1, obs2, :], label="Noise CLs") 575 plt.loglog() 576 plt.xlim(2, None) 577 plt.legend() 578 plt.show() 579 580 # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya 581 self.coba = ( 582 self.cobaCOV[:, :, self.lmin : self.lmax + 1] 583 + self.noiseCOV[:, :, self.lmin : self.lmax + 1] 584 ) 585 586 # Compute the likelihood 587 logp = self.log_likelihood() 588 589 if self.debug: 590 print(f"Log-posterior --> {logp}") 591 exit() 592 593 return logp
Gets the log likelihood and pass it to Cobaya to carry on the MCMC process.
Inherited Members
- cobaya.likelihood.Likelihood
- marginal
- calculate
- wait
- cobaya.theory.Theory
- must_provide
- initialize_with_params
- initialize_with_provider
- get_param
- get_result
- get_can_provide_methods
- get_can_provide
- get_can_provide_params
- get_can_support_params
- get_allow_agnostic
- input_params_extra
- set_cache_size
- check_cache_and_compute
- get_current_derived
- get_provider
- get_helper_theories
- update_for_helper_theories
- get_attr_list_with_helpers
- get_speed
- set_measured_speed
- cobaya.component.CobayaComponent
- set_timing_on
- get_name
- close
- set_instance_defaults
- get_version
- has_version
- get_kind
- compare_versions
- cobaya.log.HasLogger
- set_logger
- is_debug
- mpi_warning
- mpi_info
- mpi_debug
- cobaya.component.HasDefaults
- get_qualified_names
- get_qualified_class_name
- get_class_path
- get_file_base_name
- get_root_file_name
- get_yaml_file
- get_desc
- get_bibtex
- get_associated_file_content
- get_class_options
- get_defaults
- get_annotations
- cobaya.likelihood.LikelihoodInterface
- current_logp