Skip to content

Photon List

photon_list

PhotonList

Create the photon lists associated with the observation of a cluster, using an interpolation of spectra in temperature and abundance

Source code in src/xifu_cluster_sim/photon_list.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
class PhotonList:
    """
    Create the photon lists associated 
    with the observation of a cluster, using
    an interpolation of spectra in temperature
    and abundance

    """
    def __init__(self, 
                 cluster, 
                 E_bounds, 
                 Z_scale, 
                 T_scale, 
                 interp_spec_table,
                 exposure = 125e3,
                 exposure_overhead = 10,
                 eff_area = 2e4,
                 fov_radius_pix = 26.5):
        """
        Load cluster object and define parameters of observation

        Parameters:
            cluster (object): cluster object
            E_bounds (np.array): Bounds in energy used to sample photons
            Z_scale (np.array) : Abundance scale used for interpolated grid
            T_scale (np.array) : Temperature scale used for interpolated grid
            interp_spec_table (np.array) : Table of spectra to interpolate from
            exposure (float) : Exposure in seconds
            exposure_overhead (float) : Multiplying factor for the exposure for oversampling of photon list
            eff_area (float) : Approximate total effective area in cm$^2$

        """
        self.cluster = cluster
        self.E_bounds = E_bounds
        self.E = (E_bounds[1:]+E_bounds[:-1])/2
        self.T_scale = T_scale
        self.Z_scale = Z_scale
        self.interp_spec_table = interp_spec_table
        self.exposure = exposure
        self.exposure_overhead = exposure_overhead
        self.nb_cells = np.prod(cluster.grid.shape)
        self.cluster_z = cluster.cluster_z
        self.eff_area = eff_area
        self.fov_radius_pix = fov_radius_pix

    def select_sub_pointing(self,
                           x_offset=0,
                           y_offset=0,
                           pointing_shape = (58,58)):

        """
        Selects the portion of input grid that falls within selected pointing

        Parameters:
            x_offset (int): Offset along X, in pixels
            y_offset (int): Offset along Y, in pixels
            pointing_shape (tuple): Shape of pointing to extract from input grid

        """

        total_shape = self.cluster.grid.shape
        X,Y = np.meshgrid(np.arange(total_shape[0]),
                          np.arange(total_shape[1]))

        X = X.astype('float64')
        Y = Y.astype('float64')
        mask = np.where(np.sqrt((X - total_shape[0]/2 - x_offset)**2 + (Y - total_shape[1]/2 - y_offset)**2) < self.fov_radius_pix)

        self.ra = self.cluster.ra[mask]
        self.dec = self.cluster.dec[mask]
        self.norm = self.cluster.norm[mask]
        self.kT_cube = self.cluster.kT_cube[mask]
        self.Z_cube = self.cluster.Z_cube[mask]
        self.z_cube = self.cluster.z_cube[mask]

    def process_los(self,ra_los,dec_los,norm_los,kT_los, Z_los, z_los):
        """
        Process a single line of sight within the input data


        Parameters:
            ra_los (jnp.array): Right ascension
            dec_los (jnp.array): Declination
            norm_los (jnp.array): Norm of spectrum
            kT_los (jnp.array): Temperature
            Z_los (jnp.array): Abundance
            z_los (jnp.array): Redshift
        """

        res = []
        for k in range(self.cluster.grid.shape[-1]):
            ra = ra_los[k]
            dec = dec_los[k]
            norm = norm_los[k]
            kT = kT_los[k]
            Z = Z_los[k]
            z = z_los[k]

            res.append(self.add_photons_interpTZ_new(ra, 
                                             dec, 
                                             norm, 
                                             kT, 
                                             Z, 
                                             z)
                      )

        return np.hstack([res_item for res_item in res if res_item is not None])


    def worker_block(self,args_block):
        """
        Worker function that parallelizes the processing of a single line of sight over 
        numthreads threads

        Parameters:
            args_block (list): block of arguments to give to process_los
        """

        results = []
        with ThreadPoolExecutor(max_workers=self.numthreads) as executor:
            for r in executor.map(lambda args: self.process_los(*args), args_block):
                results.append(r)
        return results


    def chunkify(self,lst, n):
        """
        Convenience function to turn a list into n chunks of equal size

        Parameters:
            lst (list): list to chunkify
            n (int): number of chunks
        """
        return [lst[i::n] for i in range(n)]

    def get_args_list(self,
                      numprocs= 4,
                      numthreads  = 8):
        """
        Convenience function to get the arguments for the multiprocessing function.
        The computation is divided in multiprocessing and multithreading as it was found to
        be the best balance in speed and memory usage. For 32 cores, the best is 4 processes
        and 8 threads each.

        Parameters:
            numprocs (int): Number of processes
            numthreads (int): Number of threads

        """

        self.numprocs = numprocs
        self.numthreads = numthreads
        los_len = self.cluster.grid.shape[-1]

        args_list = [
                        (ra.copy(), dec.copy(), norm.copy(), kT.copy(), Z.copy(), z.copy())
                        for ra, dec, norm, kT, Z, z in zip(
                            np.array(self.ra.reshape(-1,los_len)),
                            np.array(self.dec.reshape(-1,los_len)),
                            np.array(self.norm.reshape(-1,los_len)),
                            np.array(self.kT_cube.reshape(-1,los_len)),
                            np.array(self.Z_cube.reshape(-1,los_len)),
                            np.array(self.z_cube.reshape(-1,los_len))
                        )
                    ]
        return self.chunkify(args_list, numprocs)


    def create_photon_list_multiproc_old(self,
                                    numproc=None):
        '''
        Create the photon list associated with the loaded cluster object 

        Parameters:
            numproc (int): Number of available cores for the parallel computation
        '''   

        n_cells = np.prod(self.ra.shape)

        args_list = zip(
                        np.array(self.ra.flatten()[:n_cells]),
                        np.array(self.dec.flatten()[:n_cells]),
                        np.array(self.norm.flatten()[:n_cells]),
                        np.array(self.kT_cube.flatten()[:n_cells]),
                        np.array(self.Z_cube.flatten()[:n_cells]),
                        np.array(self.z_cube.flatten()[:n_cells])
                        )
        if numproc > 1 :
            batch_size = max(1, n_cells//(4*numproc))
        else :
            batch_size = 'auto'                                                       

        results = Parallel(n_jobs = numproc, 
                           backend = "loky",
                          batch_size = batch_size)(
                                delayed(self.add_photons_interpTZ_new)(
                                    ra,
                                    dec,
                                    norm,
                                    kT,
                                    Z,
                                    z
                                    ) for ra, dec, norm, kT, Z, z in tqdm_notebook(args_list, 
                                                                                   total = n_cells, 
                                                                                   desc = 'Creating photon list'
                                                                                )
                                )


        results = [res_item for res_item in results if res_item is not None] 

        photon_list = np.hstack(results)
        self.fluxes = photon_list[0]
        self.energies = photon_list[1]
        self.ras = photon_list[2]
        self.decs = photon_list[3]


    def add_photons_interpTZ_new(self,
                                 ra,
                                 dec,
                                 norm,
                                 temperature,
                                 abundance,
                                 redshift):
        '''
        Utilitary function used to generate photons from a given point 
        in norm, temperature, abundance and redshift

        Parameters:
            ra (float): RA of given point
            dec (float): DEC of given point
            norm (float): norm of given point
            temperature (float): norm of given point
            abundance (float): abundance of given point
            redshift (float): redshift of given  point
        '''
        emin=self.E_bounds[:-1]
        emax=self.E_bounds[1:]
        #Compute number of photons for this cell
        indT=np.digitize(temperature,self.T_scale)-1
        indZ=np.digitize(abundance,self.Z_scale)-1

        # Interpolate from table
        dT = temperature - self.T_scale[indT]
        deltaT = self.T_scale[indT+1] - self.T_scale[indT]
        dZ = abundance - self.Z_scale[indZ]
        deltaZ = self.Z_scale[indZ+1] - self.Z_scale[indZ]
        delta_fluxT = self.interp_spec_table[indZ][indT+1] - self.interp_spec_table[indZ][indT]
        delta_fluxZ = self.interp_spec_table[indZ+1][indT] - self.interp_spec_table[indZ][indT]
        delta_fluxTZ = self.interp_spec_table[indZ][indT] + self.interp_spec_table[indZ+1][indT+1] - self.interp_spec_table[indZ][indT+1] - self.interp_spec_table[indZ+1][indT]    

        # Interpolated spectrum
        flux_inter=(delta_fluxT*dT/deltaT + delta_fluxZ*dZ/deltaZ + delta_fluxTZ*(dT/deltaT)*(dZ/deltaZ) + self.interp_spec_table[indZ][indT])*norm

        # Redshift
        E_shift = self.E*(1+self.cluster_z)/(1+redshift)
        flux_inter = np.interp(self.E, E_shift, flux_inter)

        # Flux in ergs.cm-2 for the simput file
        flux_erg = np.trapz(flux_inter*self.E)*units.keV.to(units.erg)

        # Flux of spectrum, in photons
        flux_spectrum = np.trapz(flux_inter)

        # Number of photons to draw
        nphotons = flux_spectrum * self.eff_area * self.exposure * self.exposure_overhead
        nphotons = np.random.poisson(nphotons)        

        # If there are any photons to draw
        if nphotons:

            # CDF of spectrum
            flux_cdf = np.cumsum(flux_inter/np.sum(flux_inter))

            # Single draw of random numbers
            random_numbers = np.random.uniform(size=2*nphotons)

            # Use first draw to get indices distributed according to CDF of spectrum
            search_indexes = np.searchsorted(flux_cdf,random_numbers[:nphotons]*flux_cdf[-1])

            # Use second draw to get energies and each is uniformly drawn in its energy bin
            energy_list = emin[search_indexes] + random_numbers[nphotons:]*(emax[search_indexes]-emin[search_indexes])

            # Make into arrays
            flux_erg_list = flux_erg*np.ones(nphotons)/nphotons
            ra_list = ra*np.ones(nphotons)
            dec_list = dec*np.ones(nphotons)

            return np.vstack((flux_erg_list, energy_list, ra_list, dec_list))

        else : 
            return None

    def write_photon_list_to_simputs(self,
                                    photon_list,
                                    num_divisions=None,
                                    path = '/xifu/home/mola/xifu_cluster_sim/',
                                    name_format = 'ph_list'):
        '''
        Divide the photon list into equal divisions, and write each to a simput file.
        This subdivision is done in order to run multiple SIXTE processes in parallel.
        A folder "sixte_files" is created in the given path. Several folders called
        "part_1", "part_2"... are created in "sixte_files.

        Parameters:
            photon_list (list): List of photons created
            num_divisions (float): Number of divisions of the photon list
            path (str): Path for where to save the photon list
            name_format (str): Prefix to give to each photon list
        '''
        self.fluxes = photon_list[0]
        self.energies = photon_list[1]
        self.ras = photon_list[2]
        self.decs = photon_list[3]

        # Get indices for equal divisions
        indexes_phlist = np.linspace(0,self.ras.shape[0]-1, num_divisions+1).astype(int)

        if not os.path.exists(path + '/sixte_files/'):
            os.mkdir(path + '/sixte_files/')

        # Iterate for number of divisions
        for i in range(num_divisions):



            # Create sub folder for each simput
            if not os.path.exists(path + '/sixte_files/part{}/'.format(i)):
                os.mkdir(path + '/sixte_files/part{}/'.format(i))

            # Write simput
            self.write_single_simput_file(
                                        self.ras[indexes_phlist[i]:indexes_phlist[i+1]], 
                                        self.decs[indexes_phlist[i]:indexes_phlist[i+1]], 
                                        self.energies[indexes_phlist[i]:indexes_phlist[i+1]], 
                                        np.sum(self.fluxes[indexes_phlist[i]:indexes_phlist[i+1]]), 
                                        path + '/sixte_files/part{}/'.format(i) + name_format +'_{}.fits'.format(i), 
                                        clobber=True
                                        )

    def write_single_simput_file(self,
                        ra, 
                        dec, 
                        energy, 
                        flux, 
                        simputfile, 
                        clobber=False):
        """

        Write photons to a SIMPUT file that may be read by the SIXTE instrument
        simulator. See the SIMPUT definition for reference: 
        http://hea-www.harvard.edu/heasarc/formats/simput-1.1.0.pdf

        Parameters:
            ra (np.array): The RA positions of the photons in degrees.
            dec (np.array): The Dec positions of the photons in degrees.
            energy (np.array): The energies of the photons in keV. 
            flux (float): Total flux of photons in erg/s/cm**2 in the reference energy band.
            simputfile (str): Name of the simput file to write
            clobber (bool): Set to True to overwrite previous files.
        """


        col1 = fits.Column(name='ENERGY', format='E', array=energy)
        col2 = fits.Column(name='RA', format='D', array=ra)
        col3 = fits.Column(name='DEC', format='D', array=dec)

        coldefs = fits.ColDefs([col1, col2, col3])

        #tbhdu=fits.new_table(coldefs)
        tbhdu = fits.BinTableHDU.from_columns(coldefs)
        tbhdu.name = "PHLIST"

        tbhdu.header["HDUCLASS"] = "HEASARC/SIMPUT"
        tbhdu.header["HDUCLAS1"] = "PHOTONS"
        tbhdu.header["HDUVERS"] = "1.1.0"
        tbhdu.header["EXTVER"] = 1
        tbhdu.header["REFRA"] = 0.0
        tbhdu.header["REFDEC"] = 0.0
        tbhdu.header["TUNIT1"] = "keV"
        tbhdu.header["TUNIT2"] = "deg"
        tbhdu.header["TUNIT3"] = "deg"

        #Construct source catalog    
        col1 = fits.Column(name='SRC_ID', format='J', array=np.array([1]).astype("int32"))
        col2 = fits.Column(name='RA', format='D', array=np.array([0.0]))
        col3 = fits.Column(name='DEC', format='D', array=np.array([0.0]))
        col4 = fits.Column(name='E_MIN', format='D', array=np.array([float(self.E_bounds[0])]))
        col5 = fits.Column(name='E_MAX', format='D', array=np.array([float(self.E_bounds[-1])]))
        col6 = fits.Column(name='FLUX', format='D', array=np.array([flux]))
        col7 = fits.Column(name='SPECTRUM', format='80A', 
                             array=np.array(["[PHLIST,1]"]))
        col8 = fits.Column(name='IMAGE', format='80A', 
                             array=np.array(["[PHLIST,1]"]))

        coldefs = fits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8])

        #wrhdu=fits.new_table(coldefs)
        wrhdu = fits.BinTableHDU.from_columns(coldefs)
        wrhdu.name = "SRC_CAT"

        wrhdu.header["HDUCLASS"] = "HEASARC"
        wrhdu.header["HDUCLAS1"] = "SIMPUT"
        wrhdu.header["HDUCLAS2"] = "SRC_CAT"
        wrhdu.header["HDUVERS"] = "1.1.0"
        wrhdu.header["RADECSYS"] = "FK5"
        wrhdu.header["EQUINOX"] = 2000.0
        wrhdu.header["TUNIT2"] = "deg"
        wrhdu.header["TUNIT3"] = "deg"
        wrhdu.header["TUNIT4"] = "keV"
        wrhdu.header["TUNIT5"] = "keV"
        wrhdu.header["TUNIT6"] = "erg/s/cm**2"

        primary_hdu = fits.PrimaryHDU()
        thdulist = fits.HDUList([primary_hdu, wrhdu,tbhdu])
        thdulist.writeto(simputfile,overwrite=clobber)

__init__(cluster, E_bounds, Z_scale, T_scale, interp_spec_table, exposure=125000.0, exposure_overhead=10, eff_area=20000.0, fov_radius_pix=26.5)

Load cluster object and define parameters of observation

Parameters:

Name Type Description Default
cluster object

cluster object

required
E_bounds array

Bounds in energy used to sample photons

required
Z_scale np.array)

Abundance scale used for interpolated grid

required
T_scale np.array)

Temperature scale used for interpolated grid

required
interp_spec_table np.array)

Table of spectra to interpolate from

required
exposure float)

Exposure in seconds

125000.0
exposure_overhead float)

Multiplying factor for the exposure for oversampling of photon list

10
eff_area float)

Approximate total effective area in cm\(^2\)

20000.0
Source code in src/xifu_cluster_sim/photon_list.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def __init__(self, 
             cluster, 
             E_bounds, 
             Z_scale, 
             T_scale, 
             interp_spec_table,
             exposure = 125e3,
             exposure_overhead = 10,
             eff_area = 2e4,
             fov_radius_pix = 26.5):
    """
    Load cluster object and define parameters of observation

    Parameters:
        cluster (object): cluster object
        E_bounds (np.array): Bounds in energy used to sample photons
        Z_scale (np.array) : Abundance scale used for interpolated grid
        T_scale (np.array) : Temperature scale used for interpolated grid
        interp_spec_table (np.array) : Table of spectra to interpolate from
        exposure (float) : Exposure in seconds
        exposure_overhead (float) : Multiplying factor for the exposure for oversampling of photon list
        eff_area (float) : Approximate total effective area in cm$^2$

    """
    self.cluster = cluster
    self.E_bounds = E_bounds
    self.E = (E_bounds[1:]+E_bounds[:-1])/2
    self.T_scale = T_scale
    self.Z_scale = Z_scale
    self.interp_spec_table = interp_spec_table
    self.exposure = exposure
    self.exposure_overhead = exposure_overhead
    self.nb_cells = np.prod(cluster.grid.shape)
    self.cluster_z = cluster.cluster_z
    self.eff_area = eff_area
    self.fov_radius_pix = fov_radius_pix

add_photons_interpTZ_new(ra, dec, norm, temperature, abundance, redshift)

Utilitary function used to generate photons from a given point in norm, temperature, abundance and redshift

Parameters:

Name Type Description Default
ra float

RA of given point

required
dec float

DEC of given point

required
norm float

norm of given point

required
temperature float

norm of given point

required
abundance float

abundance of given point

required
redshift float

redshift of given point

required
Source code in src/xifu_cluster_sim/photon_list.py
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
def add_photons_interpTZ_new(self,
                             ra,
                             dec,
                             norm,
                             temperature,
                             abundance,
                             redshift):
    '''
    Utilitary function used to generate photons from a given point 
    in norm, temperature, abundance and redshift

    Parameters:
        ra (float): RA of given point
        dec (float): DEC of given point
        norm (float): norm of given point
        temperature (float): norm of given point
        abundance (float): abundance of given point
        redshift (float): redshift of given  point
    '''
    emin=self.E_bounds[:-1]
    emax=self.E_bounds[1:]
    #Compute number of photons for this cell
    indT=np.digitize(temperature,self.T_scale)-1
    indZ=np.digitize(abundance,self.Z_scale)-1

    # Interpolate from table
    dT = temperature - self.T_scale[indT]
    deltaT = self.T_scale[indT+1] - self.T_scale[indT]
    dZ = abundance - self.Z_scale[indZ]
    deltaZ = self.Z_scale[indZ+1] - self.Z_scale[indZ]
    delta_fluxT = self.interp_spec_table[indZ][indT+1] - self.interp_spec_table[indZ][indT]
    delta_fluxZ = self.interp_spec_table[indZ+1][indT] - self.interp_spec_table[indZ][indT]
    delta_fluxTZ = self.interp_spec_table[indZ][indT] + self.interp_spec_table[indZ+1][indT+1] - self.interp_spec_table[indZ][indT+1] - self.interp_spec_table[indZ+1][indT]    

    # Interpolated spectrum
    flux_inter=(delta_fluxT*dT/deltaT + delta_fluxZ*dZ/deltaZ + delta_fluxTZ*(dT/deltaT)*(dZ/deltaZ) + self.interp_spec_table[indZ][indT])*norm

    # Redshift
    E_shift = self.E*(1+self.cluster_z)/(1+redshift)
    flux_inter = np.interp(self.E, E_shift, flux_inter)

    # Flux in ergs.cm-2 for the simput file
    flux_erg = np.trapz(flux_inter*self.E)*units.keV.to(units.erg)

    # Flux of spectrum, in photons
    flux_spectrum = np.trapz(flux_inter)

    # Number of photons to draw
    nphotons = flux_spectrum * self.eff_area * self.exposure * self.exposure_overhead
    nphotons = np.random.poisson(nphotons)        

    # If there are any photons to draw
    if nphotons:

        # CDF of spectrum
        flux_cdf = np.cumsum(flux_inter/np.sum(flux_inter))

        # Single draw of random numbers
        random_numbers = np.random.uniform(size=2*nphotons)

        # Use first draw to get indices distributed according to CDF of spectrum
        search_indexes = np.searchsorted(flux_cdf,random_numbers[:nphotons]*flux_cdf[-1])

        # Use second draw to get energies and each is uniformly drawn in its energy bin
        energy_list = emin[search_indexes] + random_numbers[nphotons:]*(emax[search_indexes]-emin[search_indexes])

        # Make into arrays
        flux_erg_list = flux_erg*np.ones(nphotons)/nphotons
        ra_list = ra*np.ones(nphotons)
        dec_list = dec*np.ones(nphotons)

        return np.vstack((flux_erg_list, energy_list, ra_list, dec_list))

    else : 
        return None

chunkify(lst, n)

Convenience function to turn a list into n chunks of equal size

Parameters:

Name Type Description Default
lst list

list to chunkify

required
n int

number of chunks

required
Source code in src/xifu_cluster_sim/photon_list.py
143
144
145
146
147
148
149
150
151
def chunkify(self,lst, n):
    """
    Convenience function to turn a list into n chunks of equal size

    Parameters:
        lst (list): list to chunkify
        n (int): number of chunks
    """
    return [lst[i::n] for i in range(n)]

create_photon_list_multiproc_old(numproc=None)

Create the photon list associated with the loaded cluster object

Parameters:

Name Type Description Default
numproc int

Number of available cores for the parallel computation

None
Source code in src/xifu_cluster_sim/photon_list.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def create_photon_list_multiproc_old(self,
                                numproc=None):
    '''
    Create the photon list associated with the loaded cluster object 

    Parameters:
        numproc (int): Number of available cores for the parallel computation
    '''   

    n_cells = np.prod(self.ra.shape)

    args_list = zip(
                    np.array(self.ra.flatten()[:n_cells]),
                    np.array(self.dec.flatten()[:n_cells]),
                    np.array(self.norm.flatten()[:n_cells]),
                    np.array(self.kT_cube.flatten()[:n_cells]),
                    np.array(self.Z_cube.flatten()[:n_cells]),
                    np.array(self.z_cube.flatten()[:n_cells])
                    )
    if numproc > 1 :
        batch_size = max(1, n_cells//(4*numproc))
    else :
        batch_size = 'auto'                                                       

    results = Parallel(n_jobs = numproc, 
                       backend = "loky",
                      batch_size = batch_size)(
                            delayed(self.add_photons_interpTZ_new)(
                                ra,
                                dec,
                                norm,
                                kT,
                                Z,
                                z
                                ) for ra, dec, norm, kT, Z, z in tqdm_notebook(args_list, 
                                                                               total = n_cells, 
                                                                               desc = 'Creating photon list'
                                                                            )
                            )


    results = [res_item for res_item in results if res_item is not None] 

    photon_list = np.hstack(results)
    self.fluxes = photon_list[0]
    self.energies = photon_list[1]
    self.ras = photon_list[2]
    self.decs = photon_list[3]

get_args_list(numprocs=4, numthreads=8)

Convenience function to get the arguments for the multiprocessing function. The computation is divided in multiprocessing and multithreading as it was found to be the best balance in speed and memory usage. For 32 cores, the best is 4 processes and 8 threads each.

Parameters:

Name Type Description Default
numprocs int

Number of processes

4
numthreads int

Number of threads

8
Source code in src/xifu_cluster_sim/photon_list.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def get_args_list(self,
                  numprocs= 4,
                  numthreads  = 8):
    """
    Convenience function to get the arguments for the multiprocessing function.
    The computation is divided in multiprocessing and multithreading as it was found to
    be the best balance in speed and memory usage. For 32 cores, the best is 4 processes
    and 8 threads each.

    Parameters:
        numprocs (int): Number of processes
        numthreads (int): Number of threads

    """

    self.numprocs = numprocs
    self.numthreads = numthreads
    los_len = self.cluster.grid.shape[-1]

    args_list = [
                    (ra.copy(), dec.copy(), norm.copy(), kT.copy(), Z.copy(), z.copy())
                    for ra, dec, norm, kT, Z, z in zip(
                        np.array(self.ra.reshape(-1,los_len)),
                        np.array(self.dec.reshape(-1,los_len)),
                        np.array(self.norm.reshape(-1,los_len)),
                        np.array(self.kT_cube.reshape(-1,los_len)),
                        np.array(self.Z_cube.reshape(-1,los_len)),
                        np.array(self.z_cube.reshape(-1,los_len))
                    )
                ]
    return self.chunkify(args_list, numprocs)

process_los(ra_los, dec_los, norm_los, kT_los, Z_los, z_los)

Process a single line of sight within the input data

Parameters:

Name Type Description Default
ra_los array

Right ascension

required
dec_los array

Declination

required
norm_los array

Norm of spectrum

required
kT_los array

Temperature

required
Z_los array

Abundance

required
z_los array

Redshift

required
Source code in src/xifu_cluster_sim/photon_list.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
def process_los(self,ra_los,dec_los,norm_los,kT_los, Z_los, z_los):
    """
    Process a single line of sight within the input data


    Parameters:
        ra_los (jnp.array): Right ascension
        dec_los (jnp.array): Declination
        norm_los (jnp.array): Norm of spectrum
        kT_los (jnp.array): Temperature
        Z_los (jnp.array): Abundance
        z_los (jnp.array): Redshift
    """

    res = []
    for k in range(self.cluster.grid.shape[-1]):
        ra = ra_los[k]
        dec = dec_los[k]
        norm = norm_los[k]
        kT = kT_los[k]
        Z = Z_los[k]
        z = z_los[k]

        res.append(self.add_photons_interpTZ_new(ra, 
                                         dec, 
                                         norm, 
                                         kT, 
                                         Z, 
                                         z)
                  )

    return np.hstack([res_item for res_item in res if res_item is not None])

select_sub_pointing(x_offset=0, y_offset=0, pointing_shape=(58, 58))

Selects the portion of input grid that falls within selected pointing

Parameters:

Name Type Description Default
x_offset int

Offset along X, in pixels

0
y_offset int

Offset along Y, in pixels

0
pointing_shape tuple

Shape of pointing to extract from input grid

(58, 58)
Source code in src/xifu_cluster_sim/photon_list.py
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
def select_sub_pointing(self,
                       x_offset=0,
                       y_offset=0,
                       pointing_shape = (58,58)):

    """
    Selects the portion of input grid that falls within selected pointing

    Parameters:
        x_offset (int): Offset along X, in pixels
        y_offset (int): Offset along Y, in pixels
        pointing_shape (tuple): Shape of pointing to extract from input grid

    """

    total_shape = self.cluster.grid.shape
    X,Y = np.meshgrid(np.arange(total_shape[0]),
                      np.arange(total_shape[1]))

    X = X.astype('float64')
    Y = Y.astype('float64')
    mask = np.where(np.sqrt((X - total_shape[0]/2 - x_offset)**2 + (Y - total_shape[1]/2 - y_offset)**2) < self.fov_radius_pix)

    self.ra = self.cluster.ra[mask]
    self.dec = self.cluster.dec[mask]
    self.norm = self.cluster.norm[mask]
    self.kT_cube = self.cluster.kT_cube[mask]
    self.Z_cube = self.cluster.Z_cube[mask]
    self.z_cube = self.cluster.z_cube[mask]

worker_block(args_block)

Worker function that parallelizes the processing of a single line of sight over numthreads threads

Parameters:

Name Type Description Default
args_block list

block of arguments to give to process_los

required
Source code in src/xifu_cluster_sim/photon_list.py
127
128
129
130
131
132
133
134
135
136
137
138
139
140
def worker_block(self,args_block):
    """
    Worker function that parallelizes the processing of a single line of sight over 
    numthreads threads

    Parameters:
        args_block (list): block of arguments to give to process_los
    """

    results = []
    with ThreadPoolExecutor(max_workers=self.numthreads) as executor:
        for r in executor.map(lambda args: self.process_los(*args), args_block):
            results.append(r)
    return results

write_photon_list_to_simputs(photon_list, num_divisions=None, path='/xifu/home/mola/xifu_cluster_sim/', name_format='ph_list')

Divide the photon list into equal divisions, and write each to a simput file. This subdivision is done in order to run multiple SIXTE processes in parallel. A folder "sixte_files" is created in the given path. Several folders called "part_1", "part_2"... are created in "sixte_files.

Parameters:

Name Type Description Default
photon_list list

List of photons created

required
num_divisions float

Number of divisions of the photon list

None
path str

Path for where to save the photon list

'/xifu/home/mola/xifu_cluster_sim/'
name_format str

Prefix to give to each photon list

'ph_list'
Source code in src/xifu_cluster_sim/photon_list.py
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
def write_photon_list_to_simputs(self,
                                photon_list,
                                num_divisions=None,
                                path = '/xifu/home/mola/xifu_cluster_sim/',
                                name_format = 'ph_list'):
    '''
    Divide the photon list into equal divisions, and write each to a simput file.
    This subdivision is done in order to run multiple SIXTE processes in parallel.
    A folder "sixte_files" is created in the given path. Several folders called
    "part_1", "part_2"... are created in "sixte_files.

    Parameters:
        photon_list (list): List of photons created
        num_divisions (float): Number of divisions of the photon list
        path (str): Path for where to save the photon list
        name_format (str): Prefix to give to each photon list
    '''
    self.fluxes = photon_list[0]
    self.energies = photon_list[1]
    self.ras = photon_list[2]
    self.decs = photon_list[3]

    # Get indices for equal divisions
    indexes_phlist = np.linspace(0,self.ras.shape[0]-1, num_divisions+1).astype(int)

    if not os.path.exists(path + '/sixte_files/'):
        os.mkdir(path + '/sixte_files/')

    # Iterate for number of divisions
    for i in range(num_divisions):



        # Create sub folder for each simput
        if not os.path.exists(path + '/sixte_files/part{}/'.format(i)):
            os.mkdir(path + '/sixte_files/part{}/'.format(i))

        # Write simput
        self.write_single_simput_file(
                                    self.ras[indexes_phlist[i]:indexes_phlist[i+1]], 
                                    self.decs[indexes_phlist[i]:indexes_phlist[i+1]], 
                                    self.energies[indexes_phlist[i]:indexes_phlist[i+1]], 
                                    np.sum(self.fluxes[indexes_phlist[i]:indexes_phlist[i+1]]), 
                                    path + '/sixte_files/part{}/'.format(i) + name_format +'_{}.fits'.format(i), 
                                    clobber=True
                                    )

write_single_simput_file(ra, dec, energy, flux, simputfile, clobber=False)

Write photons to a SIMPUT file that may be read by the SIXTE instrument simulator. See the SIMPUT definition for reference: http://hea-www.harvard.edu/heasarc/formats/simput-1.1.0.pdf

Parameters:

Name Type Description Default
ra array

The RA positions of the photons in degrees.

required
dec array

The Dec positions of the photons in degrees.

required
energy array

The energies of the photons in keV.

required
flux float

Total flux of photons in erg/s/cm**2 in the reference energy band.

required
simputfile str

Name of the simput file to write

required
clobber bool

Set to True to overwrite previous files.

False
Source code in src/xifu_cluster_sim/photon_list.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
def write_single_simput_file(self,
                    ra, 
                    dec, 
                    energy, 
                    flux, 
                    simputfile, 
                    clobber=False):
    """

    Write photons to a SIMPUT file that may be read by the SIXTE instrument
    simulator. See the SIMPUT definition for reference: 
    http://hea-www.harvard.edu/heasarc/formats/simput-1.1.0.pdf

    Parameters:
        ra (np.array): The RA positions of the photons in degrees.
        dec (np.array): The Dec positions of the photons in degrees.
        energy (np.array): The energies of the photons in keV. 
        flux (float): Total flux of photons in erg/s/cm**2 in the reference energy band.
        simputfile (str): Name of the simput file to write
        clobber (bool): Set to True to overwrite previous files.
    """


    col1 = fits.Column(name='ENERGY', format='E', array=energy)
    col2 = fits.Column(name='RA', format='D', array=ra)
    col3 = fits.Column(name='DEC', format='D', array=dec)

    coldefs = fits.ColDefs([col1, col2, col3])

    #tbhdu=fits.new_table(coldefs)
    tbhdu = fits.BinTableHDU.from_columns(coldefs)
    tbhdu.name = "PHLIST"

    tbhdu.header["HDUCLASS"] = "HEASARC/SIMPUT"
    tbhdu.header["HDUCLAS1"] = "PHOTONS"
    tbhdu.header["HDUVERS"] = "1.1.0"
    tbhdu.header["EXTVER"] = 1
    tbhdu.header["REFRA"] = 0.0
    tbhdu.header["REFDEC"] = 0.0
    tbhdu.header["TUNIT1"] = "keV"
    tbhdu.header["TUNIT2"] = "deg"
    tbhdu.header["TUNIT3"] = "deg"

    #Construct source catalog    
    col1 = fits.Column(name='SRC_ID', format='J', array=np.array([1]).astype("int32"))
    col2 = fits.Column(name='RA', format='D', array=np.array([0.0]))
    col3 = fits.Column(name='DEC', format='D', array=np.array([0.0]))
    col4 = fits.Column(name='E_MIN', format='D', array=np.array([float(self.E_bounds[0])]))
    col5 = fits.Column(name='E_MAX', format='D', array=np.array([float(self.E_bounds[-1])]))
    col6 = fits.Column(name='FLUX', format='D', array=np.array([flux]))
    col7 = fits.Column(name='SPECTRUM', format='80A', 
                         array=np.array(["[PHLIST,1]"]))
    col8 = fits.Column(name='IMAGE', format='80A', 
                         array=np.array(["[PHLIST,1]"]))

    coldefs = fits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8])

    #wrhdu=fits.new_table(coldefs)
    wrhdu = fits.BinTableHDU.from_columns(coldefs)
    wrhdu.name = "SRC_CAT"

    wrhdu.header["HDUCLASS"] = "HEASARC"
    wrhdu.header["HDUCLAS1"] = "SIMPUT"
    wrhdu.header["HDUCLAS2"] = "SRC_CAT"
    wrhdu.header["HDUVERS"] = "1.1.0"
    wrhdu.header["RADECSYS"] = "FK5"
    wrhdu.header["EQUINOX"] = 2000.0
    wrhdu.header["TUNIT2"] = "deg"
    wrhdu.header["TUNIT3"] = "deg"
    wrhdu.header["TUNIT4"] = "keV"
    wrhdu.header["TUNIT5"] = "keV"
    wrhdu.header["TUNIT6"] = "erg/s/cm**2"

    primary_hdu = fits.PrimaryHDU()
    thdulist = fits.HDUList([primary_hdu, wrhdu,tbhdu])
    thdulist.writeto(simputfile,overwrite=clobber)

create_photon_list(ph_list_object, blocks, numproc)

Function to create the photon list. It has to be instanciated outside of the photon_list class.

Parameters:

Name Type Description Default
ph_list_object class

photon_list object

required
blocks list)

Chunkified list of arguments (ra, dec, norm, temp, abund, redshift)

required
numproc int)

Number of processes

required
Source code in src/xifu_cluster_sim/photon_list.py
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
def create_photon_list(ph_list_object,
                      blocks,
                      numproc, 
                      ):
    """
    Function to create the photon list. It has to be instanciated outside of 
    the photon_list class.

    Parameters:
        ph_list_object (class): photon_list object
        blocks (list) : Chunkified list of arguments (ra, dec, norm, temp, abund, redshift)
        numproc (int) : Number of processes
    """

    with Pool(processes=numproc, initializer=lambda: globals().update({'ph_list': ph_list_object})) as pool:
        results_nested = []
        for block_result in pool.imap(ph_list_object.worker_block, blocks):
            results_nested.append(block_result)
            #results_nested = pool.map(worker_block, tqdm(blocks, desc='Processes'))

        results = [r for block in results_nested for r in block]

    return np.hstack(results)