import re
import math
import inspect

class YodaReader:
    def __setattr__(self, name, value):
        # Get the function that called __setattr__
        caller = inspect.stack()[1]
        caller_function = caller.function  # Name of the calling function
        
        print(f"Attribute '{name}' is being set to '{type(value)}' by function '{caller_function}'")
        
        # Set the attribute normally
        super().__setattr__(name, value)

    def __init__(self, mapping, full_ew_flag, suffix="" , *dir_pathes):
        self.index_dict = {}
        self._suffix = suffix
        for key, file in mapping.items():
            parse_flag, _ = self.file_tokenizer(file)
            if parse_flag:
                print(f"Parsing file {file}")
                # Parsing histo
                histo = self.parse_file(file, "HISTO")
                
                # Parsing STXS
                stxs = self.parse_file(file, "STXS")

                # Parsing BIGSTXS (index 90)
                # bigstxs = self.parse_file(file, "BIGSTXS")
                # self.index_dict.update(ind_sample)
                
                # Updating HISTO params
                for hist in histo:
                    try:
                        hist.update(self.index_parser(hist["index"], "HISTO"))
                    except:
                        pass
                # Updating STXS params
                for stx in stxs:
                    try:
                        stx.update(self.index_parser(stx["index"], "STXS"))
                    except:
                        pass
                # Updating BIGSTXS params
                # for stx in bigstxs:
                #     stx.update(self.index_parser(stx["index"], "BIGSTXS"))
                
                if "bigstxs" in key:
                    setattr(self, f"histo_{key}{suffix}", histo)
                    setattr(self, f"{key}{suffix}", stxs)
                else:
                    setattr(self, f"histo_{key}{suffix}", histo)
                    setattr(self, f"stxs_{key}{suffix}", stxs)
                # setattr(self, f"bigstxs_{key}", bigstxs)
        self._postprocess_general_scales(full_ew_flag)
        self._postprocess_general_scales_histo(full_ew_flag)
        self._postprocess_general_scales_with_stat(full_ew_flag)
        self._postprocess_scales()
        self._postprocess_scales_histo()
        #self._postprocess_scales_madh()
        self._postprocess_scales_histo_madh()
        #new_data_1 = list(filter(lambda x: "HISTO" in x["index"],
        #                         getattr(self, f"histo_sherpa_dipole{self._suffix}")))
        #setattr(self, f"histo_postprocessed_sherpa_dipole{suffix}", new_data_1)
        #new_data_1 = list(filter(lambda x: "HISTO" in x["index"],
         #                        getattr(self, f"histo_sherpa_dire{self._suffix}")))
        #setattr(self, f"histo_postprocessed_sherpa_dire{suffix}", new_data_1)

    def _postprocess_general_scales(self, full_ew_flag = False):
        attributes = list(filter(lambda x: "stxs" in x and not "_scale_" in x and x[0] != '_', dir(self)))
        attributes_madh = list(filter(lambda x: "scale_madH7" in x and x[0] != '_', dir(self)))
        attributes = attributes + attributes_madh
        indexes_to_scale = ['ptj1-STXS\n',
        'ptj2-STXS\n',
        'mjj-STXS\n',
        'yjj-STXS\n',
        'ptH-STXS\n',
        'ptHjj-STXS\n',
        'yj1-STXS\n',
        'yj2-STXS\n',
        'yH-STXS\n']

        
        for atr in attributes:
            try:
                data = getattr(self, atr)
                new_data = {}
                for target_index in indexes_to_scale:
                    scaled_data = []
                    target_indexes = self._construct_scales_indexes(target_index, atr, full_ew_flag)
                    # print(target_indexes)
                    # print(target_indexes)
                    target_data = list(map(lambda x: x["data"], filter(lambda x: x["index"] == target_index, data)))[0]
                    filtered_data = list(map(lambda x: x["data"], filter(lambda x: x["index"] in target_indexes, data)))
                    for i in range(len(target_data)):

                        max_value = max(list(map(lambda x: x[i][2], filtered_data)))
                        min_value = min(list(map(lambda x: x[i][2], filtered_data)))
                        x_min = filtered_data[0][i][0]
                        x_max = filtered_data[0][i][1]
                        central_value = target_data[i][2]
                        envelope = sorted((max_value, min_value, central_value))
                        scaled_data.append((x_min, x_max, envelope[1],
                                            abs(envelope[1] - envelope[0]),
                                            abs(envelope[2] - envelope[1])))
                    new_data[target_index] = scaled_data
                if not "scale" in atr:
                    setattr(self, f"postprocessed_{atr}{self._suffix}", new_data)
                else: 
                    setattr(self, f"postprocessed_{atr}{self._suffix}", new_data)
            except Exception as e:
                print(f"Couldn'[t postprocess atr {atr}: {e}")

    def _postprocess_general_scales_histo(self, full_ew_flag = False):
        attributes = list(filter(lambda x: "histo_" in x and not "_scale_" in x and x[0] != '_', dir(self)))
        attributes_madh = list(filter(lambda x: "scale_madH7" in x and x[0] != '_' and 'histo' in x and 'postprocessed' not in x,
                                      dir(self)))
        attributes = attributes + attributes_madh
        print(attributes)
        indexes_to_scale = list(filter(lambda x: "HISTO" in x and not "[" in x, map(lambda x: x["index"],
                                                                                    getattr(self, f"histo_pythia{self._suffix}"))))
        
        for atr in attributes:
            #try:
                print(f"Postprocessing {atr}")
                data = getattr(self, atr)
                new_data = []
                for target_index in indexes_to_scale:
                    scaled_data = []
                    
                    target_indexes = self._construct_scales_indexes(target_index, atr, full_ew_flag)
                    if "madH" in atr:
                        target_indexes = list(map(lambda x: x["index"],
                                                filter(lambda x: target_index[0:-1] in x["index"] and '[' in x["index"],
                                                       data)))
                    
                    target_data = list(map(lambda x: x["data"], filter(lambda x: x["index"] == target_index, data)))[0]
                    filtered_data = list(map(lambda x: x["data"], filter(lambda x: x["index"] in target_indexes, data)))
                    for i in range(len(target_data)):
                        try:
                            max_value = max(list(map(lambda x: x[i][2], filtered_data)))
                        except:
                            print(filtered_data)
                            print(target_indexes)
                            raise Exception
                        min_value = min(list(map(lambda x: x[i][2], filtered_data)))
                        x_min = filtered_data[0][i][0]
                        x_max = filtered_data[0][i][1]
                        central_value = target_data[i][2]
                        envelope = sorted((max_value, min_value, central_value))
                        '''
                            envelope
                            (x_min, x_max, envelope[1],
                                            abs(envelope[1] - envelope[0]),
                                            abs(envelope[2] - envelope[1]))
                                            '''
                        scaled_data.append((x_min, x_max,
                                      central_value,
                                      central_value - min_value if central_value - min_value > 0 else 0,
                                      max_value - central_value if max_value - central_value > 0 else 0
                                      #sorted((t2[2], t3[2], t1[2]))[1],
                                      #sorted((t2[2], t3[2], t1[2]))[1] - sorted((t2[2], t3[2], t1[2]))[0],
                                      #sorted((t2[2], t3[2], t1[2]))[2] - sorted((t2[2], t3[2], t1[2]))[1]
                                      )
                                            )
                    new_data.append({"index": target_index, "data":scaled_data})
                if not "scale" in atr:
                    setattr(self, f"postprocessed_{atr}{self._suffix}", new_data)
                else:
                    setattr(self, f"postprocessed_{atr}{self._suffix}", new_data)
            #except Exception as e:
            #    print(f"Couldn'[t postprocess atr {atr}: {e}")

    def _postprocess_general_scales_with_stat(self, full_ew_flag = False):
        attributes = list(filter(lambda x: "stxs" in x and not "_scale_" in x, dir(self)))
        indexes_to_scale = ['ptj1-STXS\n',
        'ptj2-STXS\n',
        'mjj-STXS\n',
        'yjj-STXS\n',
        'ptH-STXS\n',
        'ptHjj-STXS\n',
        'yj1-STXS\n',
        'yj2-STXS\n',
        'yH-STXS\n']

        
        for atr in attributes:
            try:
                data = getattr(self, atr)
                new_data = {}
                for target_index in indexes_to_scale:
                    scaled_data = []

                    target_indexes = self._construct_scales_indexes(target_index, atr, full_ew_flag)
                    # print(target_indexes)
                    # print(target_indexes)
                    target_data = list(map(lambda x: x["data"], filter(lambda x: x["index"] == target_index, data)))[0]
                    filtered_data = list(map(lambda x: x["data"], filter(lambda x: x["index"] in target_indexes, data)))
                    for i in range(len(target_data)):

                        max_value = max(list(map(lambda x: x[i][2], filtered_data)))
                        min_value = min(list(map(lambda x: x[i][2], filtered_data)))
                        x_min = filtered_data[0][i][0]
                        x_max = filtered_data[0][i][1]
                        central_value = target_data[i][2]
                        stat_unc = target_data[i][3] #  TODO remake it with envelope as above
                        scaled_data.append((x_min, x_max, central_value,
                                            math.sqrt((central_value - min_value)**2 + stat_unc**2 / 4),
                                            math.sqrt((max_value - central_value)**2 + stat_unc**2 / 4)))
                    new_data[target_index] = scaled_data
                setattr(self, f"postprocessed_with_stat_{atr}{self._suffix}", new_data)
            except Exception as e:
                print(f"Couldn't postprocess atr {atr}: {e}")


    @staticmethod
    def _construct_scales_indexes(index: str, atr: str, full_ew_flag: bool = False):
        sherpa_scales =[
 '[MUR=0.5__MUF=0.5__LHAPDF=93300]\n',
 '[MUR=0.5__MUF=1__LHAPDF=93300]\n',
 '[MUR=1__MUF=0.5__LHAPDF=93300]\n',
 '[MUR=1__MUF=2__LHAPDF=93300]\n',
 '[MUR=2__MUF=1__LHAPDF=93300]\n',
 '[MUR=2__MUF=2__LHAPDF=93300]\n']
        madh_scales = ['[tag=              0 dyn=  10 muR=0.50000E+00 muF=0.20000E+01]\n',
 '[tag=            602 dyn=  10 muR=0.10000E+01 muF=0.20000E+01]\n',
 '[tag=            600 dyn=  10 muR=0.50000E+00 muF=0.50000E+00]\n',
 '[tag=              0 dyn=  10 muR=0.10000E+01 muF=0.20000E+01]\n',
 '[tag=            602 dyn=  10 muR=0.20000E+01 muF=0.20000E+01]\n',
 '[tag=            602 dyn=  10 muR=0.10000E+01 muF=0.10000E+01]\n',
 '[tag=            602 dyn=  10 muR=0.50000E+00 muF=0.20000E+01]\n',
 '[tag=            600 dyn=  10 muR=0.10000E+01 muF=0.50000E+00]\n',
 '[tag=            600 dyn=  10 muR=0.10000E+01 muF=0.20000E+01]\n',
 '[tag=            600 dyn=  10 muR=0.20000E+01 muF=0.50000E+00]\n',
 '[tag=              0 dyn=  10 muR=0.50000E+00 muF=0.10000E+01]\n',
 '[tag=            600 dyn=  10 muR=0.50000E+00 muF=0.20000E+01]\n',
 '[tag=              0 dyn=  10 muR=0.10000E+01 muF=0.10000E+01]\n',
 '[tag=              0 dyn=  10 muR=0.50000E+00 muF=0.50000E+00]\n',
 '[tag=            600 dyn=  10 muR=0.10000E+01 muF=0.10000E+01]\n',
 '[tag=            602 dyn=  10 muR=0.20000E+01 muF=0.10000E+01]\n',
 '[tag=            600 dyn=  10 muR=0.20000E+01 muF=0.20000E+01]\n',
 '[tag=            600 dyn=  10 muR=0.20000E+01 muF=0.10000E+01]\n',
 '[tag=            602 dyn=  10 muR=0.50000E+00 muF=0.50000E+00]\n',
 '[tag=              0 dyn=  10 muR=0.20000E+01 muF=0.20000E+01]\n',
 '[tag=            600 dyn=  10 muR=0.50000E+00 muF=0.10000E+01]\n',
 '[tag=            602 dyn=  10 muR=0.20000E+01 muF=0.50000E+00]\n',
 '[tag=              0 dyn=  10 muR=0.10000E+01 muF=0.50000E+00]\n',
 '[tag=            602 dyn=  10 muR=0.10000E+01 muF=0.50000E+00]\n',
 '[tag=              0 dyn=  10 muR=0.20000E+01 muF=0.10000E+01]\n',
 '[tag=            602 dyn=  10 muR=0.50000E+00 muF=0.10000E+01]\n',
 '[tag=              0 dyn=  10 muR=0.20000E+01 muF=0.50000E+00]\n']

        if "powheg" in atr:
            scale_indexes = [index[0:-1]+ f"[W{i}]" + "\n" for i in range(1, 9)]
        elif ("pythia" in atr) or ("vincia" in atr and not full_ew_flag):
            scale_indexes = [index[0:-1]+ f"[VAR{i}]" + "\n" for i in range(1, 7)]
        elif ("vincia_pt_2" in atr and full_ew_flag):
            scale_indexes = [index[0:-1]+ f"[AUX_VAR{i}]" + "\n" for i in range(1, 7)]
        elif ("vincia" in atr and full_ew_flag):
            scale_indexes = [index[0:-1]+ f"[VAR{i}]" + "\n" for i in range(1, 7)]
        elif "sherpa" in atr:
            scale_indexes = [index[0:-1] + i for i in sherpa_scales]
        elif "scale_madH7" in atr:
            scale_indexes = [index[0:-1] + i for i in madh_scales]
        elif full_ew_flag and not "powheg" in atr:
            scale_indexes = [index[0:-1]+ f"[AUX_VAR{i}]" + "\n" for i in range(1, 7)]
        
        return scale_indexes

    
    def _postprocess_scales(self):
        '''Postprocess scales to find min and max error'''
       
        """[{'index': 'ptH-STXS\n',
  'data': [(0.0, 20.0, 0.0004210902536051715, 1.9804054264047737e-07),
   (20.0, 40.0, 0.0004220870896410954, 1.980405145747891e-07),
   (40.0, 60.0, 0.0004243276141759322, 1.980405195789374e-07),
   (60.0, 80.0, 0.0004268973836899015, 1.9804051645490083e-07),
   (80.0, 100.0, 0.0004293144442050631, 1.9804043733028995e-07),
   (100.0, 120.0, 0.00043137079224166223, 1.980405317881138e-07),
   (120.0, 140.0, 0.0004334206728041775, 1.9804053501362865e-07),
   (140.0, 160.0, 0.0004343235269732479, 1.980405358722087e-07),
   (160.0, 180.0, 0.00043550716689285914, 1.9804053802822138e-07),
   (180.0, 200.0, 0.00043578026565222665, 1.9804054355943345e-07),
   (200.0, 220.0, 0.0004369399939674598, 1.9804048899061407e-07),
   (220.0, 240.0, 0.0004370000123736343, 1.9804051127856764e-07),"""
        
        all_scales = list(filter(lambda x: "_scale_" in x and not 'madH' in x, dir(self)))
        scaled_indexes = set(map(lambda x: x.split("_scale_")[-1], all_scales))

        bigstxs_mins = []
        bigstxs_maxs = []
        bigstxs_centrals = []
        for scaled_index in scaled_indexes:
            bigstxs_mins.append(f"bigstxs_min_scale_{scaled_index}")
            bigstxs_maxs.append(f"bigstxs_max_scale_{scaled_index}")
            bigstxs_centrals.append(f"bigstxs_central_scale_{scaled_index}")
        for i in range(len(bigstxs_mins)):
            atrname = "bigstxs_postprocessed_" + bigstxs_mins[i].split("_scale_")[-1]

            min_data = getattr(self, bigstxs_mins[i])
            max_data = getattr(self, bigstxs_maxs[i])
            central_data = getattr(self, bigstxs_centrals[i])

            new_data = []
            for data in central_data:
                min_d = list(filter(lambda x: x["index"] == data["index"], min_data))[0]
                max_d = list(filter(lambda x: x["index"] == data["index"], max_data))[0]
                new_dict = {
                            'index': data['index'],
                            'data': [(t1[0], t1[1],
                                      sorted((t2[2], t3[2], t1[2]))[1],
                                      sorted((t2[2], t3[2], t1[2]))[1] - sorted((t2[2], t3[2], t1[2]))[0],
                                      sorted((t2[2], t3[2], t1[2]))[2] - sorted((t2[2], t3[2], t1[2]))[1]
                                      ) for t1, t2, t3 in zip(data['data'], max_d['data'], min_d['data'])],
                            'x_param': data['x_param'],
                            'other_params': data['other_params']
                        }
                new_data.append(new_dict)
            
            setattr(self, atrname + self._suffix, new_data)

    def _postprocess_scales_madh(self):
        '''Postprocess scales to find min and max error'''
       
        """[{'index': 'ptH-STXS\n',
  'data': [(0.0, 20.0, 0.0004210902536051715, 1.9804054264047737e-07),
   (20.0, 40.0, 0.0004220870896410954, 1.980405145747891e-07),
   (40.0, 60.0, 0.0004243276141759322, 1.980405195789374e-07),
   (60.0, 80.0, 0.0004268973836899015, 1.9804051645490083e-07),
   (80.0, 100.0, 0.0004293144442050631, 1.9804043733028995e-07),
   (100.0, 120.0, 0.00043137079224166223, 1.980405317881138e-07),
   (120.0, 140.0, 0.0004334206728041775, 1.9804053501362865e-07),
   (140.0, 160.0, 0.0004343235269732479, 1.980405358722087e-07),
   (160.0, 180.0, 0.00043550716689285914, 1.9804053802822138e-07),
   (180.0, 200.0, 0.00043578026565222665, 1.9804054355943345e-07),
   (200.0, 220.0, 0.0004369399939674598, 1.9804048899061407e-07),
   (220.0, 240.0, 0.0004370000123736343, 1.9804051127856764e-07),"""
        
        all_scales = list(filter(lambda x: "postprocessed_stxs_bigstxs" in x, dir(self)))
        scaled_indexes = set(map(lambda x: x.split("_scale_")[-1], all_scales))

        bigstxs_mins = []
        bigstxs_maxs = []
        bigstxs_centrals = []
        for scaled_index in scaled_indexes:
            bigstxs_mins.append(f"postprocessed_stxs_bigstxs_min_scale_{scaled_index}")
            bigstxs_maxs.append(f"postprocessed_stxs_bigstxs_max_scale_{scaled_index}")
            bigstxs_centrals.append(f"postprocessed_stxs_bigstxs_central_scale_{scaled_index}")
        for i in range(len(bigstxs_mins)):
            atrname = "bigstxs_postprocessed_" + bigstxs_mins[i].split("_scale_")[-1]
            min_data = self._transform_to_data_list(getattr(self, bigstxs_mins[i]))
            max_data = self._transform_to_data_list(getattr(self, bigstxs_maxs[i]))
            central_data = self._transform_to_data_list(getattr(self, bigstxs_centrals[i]))

            new_data = []
            for data in central_data:
                min_d = list(filter(lambda x: x["index"] == data["index"], min_data))[0]
                max_d = list(filter(lambda x: x["index"] == data["index"], max_data))[0]
                new_dict = {
                            'index': data['index'],
                            'data': [(t1[0], t1[1],
                                      sorted((t2[2], t3[2], t1[2]))[1],
                                      sorted((t2[2], t3[2], t1[2]))[1] - sorted((t2[2], t3[2], t1[2]))[0],
                                      sorted((t2[2], t3[2], t1[2]))[2] - sorted((t2[2], t3[2], t1[2]))[1]
                                      ) for t1, t2, t3 in zip(data['data'], max_d['data'], min_d['data'])]
                        }
                new_data.append(new_dict)
            
            setattr(self, atrname + self._suffix, new_data)

    def _transform_to_data_list(self, data):
        data_list = []
        for key, value in data.items():
            data_list.append(
                {
                    "index": key,
                    "data": value
                }
            )
        return data_list

    def _postprocess_scales_histo(self):
        '''Postprocess scales to find min and max error'''
       
        """[{'index': 'ptH-STXS\n',
  'data': [(0.0, 20.0, 0.0004210902536051715, 1.9804054264047737e-07),
   (20.0, 40.0, 0.0004220870896410954, 1.980405145747891e-07),
   (40.0, 60.0, 0.0004243276141759322, 1.980405195789374e-07),
   (60.0, 80.0, 0.0004268973836899015, 1.9804051645490083e-07),
   (80.0, 100.0, 0.0004293144442050631, 1.9804043733028995e-07),
   (100.0, 120.0, 0.00043137079224166223, 1.980405317881138e-07),
   (120.0, 140.0, 0.0004334206728041775, 1.9804053501362865e-07),
   (140.0, 160.0, 0.0004343235269732479, 1.980405358722087e-07),
   (160.0, 180.0, 0.00043550716689285914, 1.9804053802822138e-07),
   (180.0, 200.0, 0.00043578026565222665, 1.9804054355943345e-07),
   (200.0, 220.0, 0.0004369399939674598, 1.9804048899061407e-07),
   (220.0, 240.0, 0.0004370000123736343, 1.9804051127856764e-07),"""
        
        all_scales = list(filter(lambda x: "_scale_" in x and not 'madH' in x, dir(self)))
        scaled_indexes = set(map(lambda x: x.split("_scale_")[-1], all_scales))

        bigstxs_mins = []
        bigstxs_maxs = []
        bigstxs_centrals = []
        for scaled_index in scaled_indexes:
            bigstxs_mins.append(f"histo_bigstxs_min_scale_{scaled_index}")
            bigstxs_maxs.append(f"histo_bigstxs_max_scale_{scaled_index}")
            bigstxs_centrals.append(f"histo_bigstxs_central_scale_{scaled_index}")
        for i in range(len(bigstxs_mins)):
            atrname = "histo_bigstxs_postprocessed_" + bigstxs_mins[i].split("_scale_")[-1]

            min_data = getattr(self, bigstxs_mins[i])
            max_data = getattr(self, bigstxs_maxs[i])
            central_data = getattr(self, bigstxs_centrals[i])

            new_data = []
            for data in central_data:
                min_d = list(filter(lambda x: x["index"] == data["index"], min_data))[0]
                max_d = list(filter(lambda x: x["index"] == data["index"], max_data))[0]
                new_dict = {
                            'index': data['index'],
                            'data': [(t1[0], t1[1],
                                      t1[2],
                                      t1[2] - t3[2] if t1[2] - t3[2] > 0 else 0,
                                      t2[2] - t1[2] if t2[2] - t1[2] > 0 else 0
                                      #sorted((t2[2], t3[2], t1[2]))[1],
                                      #sorted((t2[2], t3[2], t1[2]))[1] - sorted((t2[2], t3[2], t1[2]))[0],
                                      #sorted((t2[2], t3[2], t1[2]))[2] - sorted((t2[2], t3[2], t1[2]))[1]
                                      ) for t1, t2, t3 in zip(data['data'], max_d['data'], min_d['data'])],
                            #'x_param': data['x_param'],
                            #'other_params': data['other_params']
                        }
                new_data.append(new_dict)
            
            new_data_1 = list(filter(lambda x: "HISTO" in x["index"], new_data))
            setattr(self, atrname + self._suffix, new_data_1)

    def _postprocess_scales_histo_madh(self):
        '''Postprocess scales to find min and max error'''
       
        """[{'index': 'ptH-STXS\n',
  'data': [(0.0, 20.0, 0.0004210902536051715, 1.9804054264047737e-07),
   (20.0, 40.0, 0.0004220870896410954, 1.980405145747891e-07),
   (40.0, 60.0, 0.0004243276141759322, 1.980405195789374e-07),
   (60.0, 80.0, 0.0004268973836899015, 1.9804051645490083e-07),
   (80.0, 100.0, 0.0004293144442050631, 1.9804043733028995e-07),
   (100.0, 120.0, 0.00043137079224166223, 1.980405317881138e-07),
   (120.0, 140.0, 0.0004334206728041775, 1.9804053501362865e-07),
   (140.0, 160.0, 0.0004343235269732479, 1.980405358722087e-07),
   (160.0, 180.0, 0.00043550716689285914, 1.9804053802822138e-07),
   (180.0, 200.0, 0.00043578026565222665, 1.9804054355943345e-07),
   (200.0, 220.0, 0.0004369399939674598, 1.9804048899061407e-07),
   (220.0, 240.0, 0.0004370000123736343, 1.9804051127856764e-07),"""
        
        all_scales = list(filter(lambda x: "postprocessed_histo_bigstxs" in x, dir(self)))
        scaled_indexes = set(map(lambda x: x.split("_scale_")[-1], all_scales))
        print('\n\n')
        bigstxs_mins = []
        bigstxs_maxs = []
        bigstxs_centrals = []
        for scaled_index in scaled_indexes:
            bigstxs_mins.append(f"postprocessed_histo_bigstxs_min_scale_{scaled_index}")
            bigstxs_maxs.append(f"postprocessed_histo_bigstxs_max_scale_{scaled_index}")
            bigstxs_centrals.append(f"postprocessed_histo_bigstxs_central_scale_{scaled_index}")
            
        for i in range(len(bigstxs_mins)):
            atrname = "histo_bigstxs_postprocessed_" + bigstxs_mins[i].split("_scale_")[-1]
            

            min_data = getattr(self, bigstxs_mins[i])
            max_data = getattr(self, bigstxs_maxs[i])
            central_data = getattr(self, bigstxs_centrals[i])

            
            new_data = []
            for data in central_data:
                min_d = list(filter(lambda x: x["index"] == data["index"], min_data))[0]
                max_d = list(filter(lambda x: x["index"] == data["index"], max_data))[0]
                new_dict = {
                            'index': data['index'],
                            'data': [(t1[0], t1[1],
                                      t1[2],
                                      t1[2] - t3[2] if t1[2] - t3[2] > 0 else 0,
                                      t2[2] - t1[2] if t2[2] - t1[2] > 0 else 0
                                      #sorted((t2[2], t3[2], t1[2]))[1],
                                      #sorted((t2[2], t3[2], t1[2]))[1] - sorted((t2[2], t3[2], t1[2]))[0],
                                      #sorted((t2[2], t3[2], t1[2]))[2] - sorted((t2[2], t3[2], t1[2]))[1]
                                      ) for t1, t2, t3 in zip(data['data'], max_d['data'], min_d['data'])],
                            #'x_param': data['x_param'],
                            #'other_params': data['other_params']
                        }
                new_data.append(new_dict)
            new_data_1 = list(filter(lambda x: "HISTO" in x["index"], new_data))
            setattr(self, atrname, new_data_1)

    @staticmethod
    def file_tokenizer(file_path: str):
        """ Tokenize filename 
        
        Returns (True, token) if this file needs to be processed
        Returns (False, None) to skip this file
        
        .top files to be processed
        """
        flag = False
        value = None
        try:
            token, extension = tuple(file_path.split("/")[-1].split("."))
            if extension == "yoda":
                flag = True
                value = token.replace("-", "_")
        except:
            print(f"Couldn'[t tokenize file {file_path}")
        return flag, value

    @staticmethod
    def parse_file(filename, data_flag = "HISTO"):
        """Parse HISTO data from file.

        Also returns index mapping which connects integer index to physical index:
        {2 : MJJ-350-700-PTH-0-200-PTHJJ-0-25-DPHIJJ-0-PIov2}
        which is useful in MoCaNLO
        
        """
        from math import sqrt
        histo_data = {}
        with open(filename, 'r') as file:
            lines = file.readlines()
            for line in lines:
                pattern = r"^.*STXS index.*$"
                if data_flag in line and "END" not in line and "RAW" not in line:
                    index = line.split("/")[-1]
                    try:
                        if "1D" in lines[lines.index(line)+4]:
                            str_type = "Histo1D"
                            func = lambda x: (float(x.split()[0]), float(x.split()[1]),
                                              float(x.split()[2]) / (1000*(float(x.split()[1]) - float(x.split()[0]))),#/ float(x.split()[6]) if float(x.split()[6]) != 0 else 0, applying bin width
                                              float(x.split()[3]) / (1000*(float(x.split()[1]) - float(x.split()[0]))),
                                              float(x.split()[3]) / float(x.split()[6]) if float(x.split()[6]) != 0 else 0)
                            if "Area" in lines[lines.index(line)+7]:
                                lan = 13
                            else:
                                lan = 11
                        elif "2D" in lines[lines.index(line)+4]:
                            str_type = "Histo2D"
                            func = lambda x: (float(x.split()[0]), float(x.split()[1]),
                                              float(x.split()[2]) / (1000*(float(x.split()[1]) - float(x.split()[0]))),
                                              float(x.split()[3]) / (1000*(float(x.split()[1]) - float(x.split()[0]))),
                                              float(x.split()[4]) / (1000*(float(x.split()[1]) - float(x.split()[0]))),
                                              float(x.split()[4]) / float(x.split()[11]) if float(x.split()[11]) != 0 else 0,
                                              float(x.split()[5]) / float(x.split()[11]) if float(x.split()[11]) != 0 else 0,)
                            lan = 12
                        else:
                            continue
                    except:
                        break
                    data = []
                    for row in lines[lines.index(line)+lan:]:
                        # print(row)
                        if row.strip() and not row.startswith('#') and not "END" in row:
                            data.append(func(row))
                        else:
                            break
                    histo_data[index] = data
                elif re.match(pattern, line):
                    print(line)
                
        new_histo = []
    
        for key, value in histo_data.items():
            new_histo.append({"index": key, "data": value})
        return new_histo

    @staticmethod
    def index_parser(index: str, data_flag = "HISTO"):
        """Returns x_param and other params in the following format:
        {"x_param": x_param: str,
        "other_params": [{"name": name_of_param: str,
                        "min_val": min_val: str,
                        "max_val": max_val: str}]
        If other params have only one value, then min_value = param_value
        """
        
        x_param = index.split("-")[1]
        if data_flag == "HISTO":
            params_list = index.split("-")[2:]
        elif data_flag == "STXS":
            params_list = index.split("-")[1:]
        elif data_flag == "BIGSTXS":
            params_list = []
            x_param = index.split("-")[0]
        

        other_params_list = []
        temp_param = {}
        for ind, param in enumerate(params_list):
            try:
                if "PI" in param or param == "INFTY":
                    param_value = param
                else:
                    param_value = float(param)
                # Filling min and max values
                if temp_param.get("min_val", None):
                    temp_param["max_val"] = param
                    # Checking if param is the last in the list
                    if ind == len(params_list) - 1:
                        other_params_list.append(temp_param)
                else:
                    temp_param["min_val"] = param
                    # Checking if param is the last in the list
                    if ind == len(params_list) - 1:
                        other_params_list.append(temp_param)
            except:
                other_params_list.append(temp_param)
                temp_param = {"name": param}
        try:
            other_params_list.pop(0)
        except:
            pass
        return {"x_param": x_param if data_flag == "HISTO" or data_flag == "BIGSTXS" else None, "other_params": other_params_list}