Kaynağa Gözat

added new codes for new figures

arefks 1 hafta önce
ebeveyn
işleme
e49ce59efb

+ 75 - 0
code/Code4Figures/FA_Mapping.py

@@ -0,0 +1,75 @@
+import glob
+import os
+import pandas as pd
+import nibabel as nib
+import matplotlib.pyplot as plt
+import numpy as np
+from matplotlib import rcParams
+
+# Set font properties
+rcParams['font.family'] = 'times new roman'
+rcParams['font.size'] = 8
+
+# Define file paths
+path = r"C:\Users\aswen\Desktop\TestingData\Aswendt_qc_rsfmri_plot\proc_data"
+csv_voting_path =r"C:\Users\aswen\Desktop\TestingData\Aswendt_qc_rsfmri_plot\QC\votings.csv"
+
+# Function to read CSV files and store them in a dictionary
+script_dir = os.path.dirname(__file__)  # Use this line if running from a script file
+out_path = os.path.join(script_dir, '..', 'figures',"PaperFigures",'supplement_figure_6')
+if not os.path.exists(out_path):
+    os.mkdir(out_path)
+
+# Read CSV file for voting
+df_voting = pd.read_csv(csv_voting_path, delimiter=",")
+
+# Find DWI files
+SearchP = os.path.join(path, "**", "dwi", "*dwi.nii.gz")
+BetFiles = glob.glob(SearchP, recursive=True)
+
+for bb in BetFiles:
+    temp = os.path.dirname(bb)
+    SearchFA = os.path.join(temp, "**", "fa_flipped.nii.gz")
+    try:
+        FA = glob.glob(SearchFA, recursive=True)[0]
+    except IndexError:
+        continue
+    title = os.path.basename(bb).replace("_dwi.nii.gz","")
+    # Load DWI and FA images
+    dwi_img = nib.load(bb)
+    fa_img = nib.load(FA)
+
+    # Rotate images 90 degrees
+    dwi_data_rotated = np.rot90(dwi_img.get_fdata(), k=-1)
+    fa_data_rotated = np.rot90(fa_img.get_fdata(), k=-1)
+
+        # Plot images
+    fig, axes = plt.subplots(1, 2, figsize=(8/2.54, 6/2.54))  # Size in inches converted to centimeters
+    fig.subplots_adjust(wspace=-0.1)  # Adjust the horizontal space between subplots to 0
+    fig.suptitle(title)  # Add title
+    axes[0].imshow(dwi_data_rotated[:, :, dwi_data_rotated.shape[-2] // 2+5, dwi_data_rotated.shape[-1] // 2], cmap='gray')
+    axes[0].text(0.5, 0.05, 'DWI', color='white', fontweight='bold', ha='center', transform=axes[0].transAxes)
+    axes[0].axis('off')  # Turn off axes
+    
+    axes[1].imshow(np.fliplr(fa_data_rotated[:, :, fa_data_rotated.shape[-1] // 2+5]), cmap='gray')  # Flip left to right
+    axes[1].text(0.5, 0.05, 'FA Map', color='white', fontweight='bold', ha='center', transform=axes[1].transAxes)
+    axes[1].axis('off')  # Turn off axes
+    
+        
+    # Find corresponding entry in CSV_voting
+    df_bb_voting = df_voting[df_voting['Pathes'] == bb]
+    if not df_bb_voting.empty:
+        voting_value = df_bb_voting['Voting outliers (from 5)'].values[0]
+        if voting_value>1:
+        
+            axes[0].text(5, 15, f'Majority Votes: {voting_value}', color='red', fontweight='bold')
+        else:
+            axes[0].text(5, 15, f'Majority Votes: {voting_value}', color='yellow', fontweight='bold')
+    
+    
+    else:
+        voting_value = 0
+        axes[0].text(5, 15, f'Majority Votes: {voting_value}', color='white', fontweight='bold')
+    
+    plt.savefig(os.path.join(out_path, os.path.basename(bb).replace(".nii.gz", "").replace("Underscore", "_") + ".svg"), transparent=True, bbox_inches='tight')
+    plt.show()

+ 1 - 1
code/Code4Figures/MutualInfoPlot.py

@@ -24,7 +24,7 @@ ax.plot(df['Shift (Voxel)'], df['Severe motion'], label='Severe motion', linewid
 ax.plot(df['Shift (Voxel)'], df['No motion'], label='No motion', linewidth=1, color='blue')  # Adjust the line width
 
 # Set axis labels
-ax.set_xlabel('Shift (Voxel)', **font_properties)
+ax.set_xlabel('Time Points (s)', **font_properties)
 ax.set_ylabel('Mutual information (a.u)', **font_properties)
 
 # Set axis ticks font and number of ticks

+ 38 - 88
code/Code4Figures/ResoPlotAll.py

@@ -2,66 +2,50 @@ import pandas as pd
 import numpy as np
 import matplotlib.pyplot as plt
 import seaborn as sns
-from scipy.stats import ttest_ind, f_oneway
 import glob
 import os
-from scipy.stats import ttest_ind
-from statsmodels.stats.multitest import multipletests
-import re
-
-#% Function to read CSV files and store them in a dictionary
 
 cm = 1/2.54  # centimeters in inches
+
 def read_csv_files(files):
     data_dict = {}
-    for ff,file in enumerate(files):
+    for ff, file in enumerate(files):
         df = pd.read_csv(file)    
-        # Extract the data type from the file name (assuming the file name contains "anatomical", "structural", or "functional")
-        data_type = "anatomical" if "anat" in file else ("structural" if "diff" in file else "functional")
-        data_dict[ff] = {data_type:df} 
-            
+        data_type = "anat" if "anat" in file else ("diff" if "diff" in file else "func")
+        data_dict[ff] = {data_type: df} 
     return data_dict
 
-
-# Function for statistical comparison and plotting
 def compare_and_plot(data, column_name, group_column):
-    sns.set_palette("Set2")  # Use colors for color-blind people
+    sns.set_palette("Set2")  
     plt.figure(figsize=(9*cm, 6*cm))
     sns.boxplot(x=group_column, y=column_name, data=data)
     plt.xlabel(group_column)
     plt.ylabel(column_name)
     plt.title(f"Statistical Comparison of {column_name} by {group_column}")
     plt.tight_layout()
-    #plt.savefig(f"{column_name}_by_{group_column}_boxplot.png")
     plt.show()
 
-#% List of CSV files for each data type
-#Path = r"C:\Users\aswen\Documents\Data\2023_Kalantari_AIDAqc\outputs\validation\QC_Plot_VoxelSizesAll"
-Path = r"C:\Users\aswen\Desktop\Code\Validation"
+Path = r"C:\Users\arefk\Desktop\Projects\AIDAqcOutput_of_all_Datasets"
 
 anatomical_files = glob.glob(os.path.join(Path,"*/*/*caculated_features_anat.csv"), recursive=True)
 structural_files = glob.glob(os.path.join(Path,"*/*/*caculated_features_diff.csv"), recursive=True)
 functional_files = glob.glob(os.path.join(Path,"*/*/*caculated_features_func.csv"), recursive=True)
 
-All_files = [anatomical_files,structural_files,functional_files]
+All_files = [anatomical_files, structural_files, functional_files]
 
-# Read the CSV files and store them in dictionaries
 anatomical_data = read_csv_files(anatomical_files)
 structural_data = read_csv_files(structural_files)
 functional_data = read_csv_files(functional_files)
-All_Data = [anatomical_data,structural_data,functional_data]
-
-All_type = ["anatomical","structural","functional"]
-#% data statistisc figure 7
 
-#features_to_compare = ["SNR Chang", "SNR Normal", "tSNR (Averaged Brain ROI)", "Displacement factor (std of Mutual information)"]
+All_Data = [anatomical_data, structural_data, functional_data]
+All_type = ["anat", "diff", "func"]
 
 features_to_compare = ["SpatRx", "SpatRy", "Slicethick"]
 
+fig, axes = plt.subplots(3, 3, figsize=(18*cm, 18*cm), dpi=300)
 
-Data_of_selected_feature2 = pd.DataFrame()
-for dd,data in enumerate(All_Data):
-    for feature in features_to_compare:
+for i, data in enumerate(All_Data):
+    for j, feature in enumerate(features_to_compare):
         cc = 0
         temp = pd.DataFrame()
         Data_of_selected_feature = pd.DataFrame()
@@ -69,15 +53,12 @@ for dd,data in enumerate(All_Data):
 
         for key in data:
             try:
-                temp_data[feature] = data[key][All_type[dd]][feature]
+                temp_data[feature] = data[key][All_type[i]][feature]
             except KeyError:
                 continue
-            temp_data["Dataset"] = All_files[dd][cc].split(os.sep)[-3]
-            cc = cc +1
+            temp_data["Dataset"] = All_files[i][cc].split(os.sep)[-3]
+            cc = cc + 1
             Data_of_selected_feature = pd.concat([Data_of_selected_feature, temp_data], ignore_index=True)
-            #Data_of_selected_feature2 = pd.concat([Data_of_selected_feature2, temp_data], ignore_index=True)
-            
-            
             
         if not Data_of_selected_feature.empty:
             Data_of_selected_feature['sort'] = Data_of_selected_feature['Dataset'].str.extract('(\d+)', expand=True).astype(int)
@@ -86,7 +67,7 @@ for dd,data in enumerate(All_Data):
             if feature == "SNR Normal":
                 Data_of_selected_feature.rename(columns={"SNR Normal": "SNR-Standard (dB)"}, inplace=True)
                 feature = "SNR-Standard (dB)"
-            if feature == "SNR Chang":
+            elif feature == "SNR Chang":
                 Data_of_selected_feature.rename(columns={"SNR Chang": "SNR-Chang (dB)"}, inplace=True)
                 feature = "SNR-Chang (dB)"    
             elif feature == "tSNR (Averaged Brain ROI)":
@@ -96,59 +77,28 @@ for dd,data in enumerate(All_Data):
                 Data_of_selected_feature.rename(columns={"Displacement factor (std of Mutual information)": "Motion severity (A.U)"}, inplace=True)
                 feature = "Motion severity (A.U)"
             
-            
-            #Data_of_selected_feature2["Vol"] = Data_of_selected_feature2["SpatRx"]*Data_of_selected_feature2["SpatRy"]*Data_of_selected_feature2["Slicethick"]
-            
-            #Data_of_selected_feature = Data_of_selected_feature.sort_values("Dataset",ascending=False)
-            # creating boxplots
-            plt.figure(figsize=(6*cm,3.527*cm),dpi=300)
             sns.set_style('ticks')
-            sns.set(font='Times New Roman', font_scale=0.9,style=None)  # Set font to Times New Roman and font size to 9
+            sns.set(font='Times New Roman', font_scale=0.9,style=None)  
             palette = 'Set2'
-            ax = sns.violinplot(x="Dataset", y=feature, data=Data_of_selected_feature, hue="Dataset", dodge=False,
-                                palette=palette,
-                                scale="width", inner=None,linewidth=1)
-            patches = ax.patches
-            #legend_colors = [patch.get_facecolor() for patch in patches[:]]
-
-            xlim = ax.get_xlim()
-            ylim = ax.get_ylim()
-            for violin in ax.collections:
-                bbox = violin.get_paths()[0].get_extents()
-                x0, y0, width, height = bbox.bounds
-                violin.set_clip_path(plt.Rectangle((x0, y0), width / 2, height, transform=ax.transData))
-            
-            sns.boxplot(x="Dataset", y=feature, data=Data_of_selected_feature, saturation=1, showfliers=False,
-                        width=0.3, boxprops={'zorder': 3, 'facecolor': 'none'}, ax=ax, linewidth=1)
-            old_len_collections = len(ax.collections)
-            sns.stripplot(x="Dataset", y=feature, data=Data_of_selected_feature,size=1.1, hue="Dataset", palette=palette, dodge=False, ax=ax)
-            for dots in ax.collections[old_len_collections:]:
-                dots.set_offsets(dots.get_offsets() + np.array([0.12, 0]))
-            ax.set_xlim(xlim)
-            ax.set_ylim(ylim)
-            ax.legend_.remove()
-            ax
-            ax.set_xticklabels(ax.get_xticklabels(), rotation=45,fontsize=8)
-# =============================================================================
-#             for label, color in zip(ax.get_xticklabels(), legend_colors):
-#                 label.set_color(color)
-# =============================================================================
+            ax = sns.barplot(x="Dataset", y=feature, data=Data_of_selected_feature, ci=None, palette=palette, ax=axes[i, j])
             ax.set_xlabel('')
-            ax.set_title(All_type[dd].capitalize(),weight='bold',fontsize=9)
-            y_label = ax.set_ylabel(ax.get_ylabel(), fontweight='bold',fontsize=9)
-
-# =============================================================================
-            ax.xaxis.grid(True, linestyle='-', which='major', color='gray', linewidth=0)
-            ax.xaxis.grid(True, linestyle='--', which='minor', color='gray', linewidth=00)
-# 
-            ax.yaxis.grid(True, linestyle='-', which='major', color='gray', linewidth=0.5)
-            ax.yaxis.grid(True, linestyle='--', which='minor', color='gray', linewidth=0.5)        
-# =============================================================================
-            ax.spines['top'].set_linewidth(0.5)  # Top border
-            ax.spines['right'].set_linewidth(0.5)  # Right border
-            ax.spines['bottom'].set_linewidth(0.5)  # Bottom border
-            ax.spines['left'].set_linewidth(0.5)  # Left border
-            ax.tick_params(axis='both', which='both', width=0.5,color='gray',length=2)
-            plt.xticks(ha='right')
-            plt.savefig(os.path.join(Path,feature+"_"+All_type[dd]+".svg"), format='svg', bbox_inches='tight',transparent=False)
-            plt.show()
+            ax.set_ylabel(f'{All_type[i]}-{feature} (mm)', fontweight='bold', fontsize=9)
+            ax.set_xticklabels(ax.get_xticklabels(), rotation=45, fontsize=4.5, ha='right')  # Adjust tick labels
+            ax.spines['top'].set_linewidth(0.5)  
+            ax.spines['right'].set_linewidth(0.5)  
+            ax.spines['bottom'].set_linewidth(0.5)  
+            ax.spines['left'].set_linewidth(0.5)  
+            ax.tick_params(axis='both', which='both', width=0.5, color='gray', length=2)
+            ax.yaxis.grid(True)  # Add horizontal gridlines
+            plt.tight_layout()
+
+
+#%%
+output_path = r"C:\Users\arefk\Desktop\Projects\AIDAqc_Figures\figures\PaperFigures\AswenSupplement1"
+
+# Save as SVG
+plt.savefig(output_path + ".svg", format='svg')
+
+# Save as PNG
+plt.savefig(output_path + ".png", dpi=300, format='png')
+plt.show()

+ 16 - 16
code/Code4Figures/ViolinPlots.py

@@ -17,8 +17,8 @@ def read_csv_files(files):
     data_dict = {}
     for ff,file in enumerate(files):
         df = pd.read_csv(file)    
-        # Extract the data type from the file name (assuming the file name contains "anatomical", "structural", or "functional")
-        data_type = "anatomical" if "anatomical" in file else ("structural" if "structural" in file else "functional")
+        # Extract the data type from the file name (assuming the file name contains "anat", "diff", or "func")
+        data_type = "anat" if "anat" in file else ("diff" if "diff" in file else "func")
         data_dict[ff] = {data_type:df} 
             
     return data_dict
@@ -37,21 +37,21 @@ def compare_and_plot(data, column_name, group_column):
     plt.show()
 
 #% List of CSV files for each data type
-Path = r"C:\Users\aswen\Desktop\Code\Validation2"
+Path = r"C:\Users\arefk\Desktop\Projects\AIDAqcOutput_of_all_Datasets"
 
-anatomical_files = glob.glob(os.path.join(Path,"*/*/*caculated_features_anatomical.csv"), recursive=True)
-structural_files = glob.glob(os.path.join(Path,"*/*/*caculated_features_structural.csv"), recursive=True)
-functional_files = glob.glob(os.path.join(Path,"*/*/*caculated_features_functional.csv"), recursive=True)
+anat_files = [file for file in glob.glob(os.path.join(Path, "*/*/*caculated_features_anat.csv"), recursive=True) if "m_Rei" not in file and "7_m_Lo" not in file]
+diff_files = [file for file in glob.glob(os.path.join(Path, "*/*/*caculated_features_diff.csv"), recursive=True) if "m_Rei" not in file and "7_m_Lo" not in file]
+func_files = [file for file in glob.glob(os.path.join(Path, "*/*/*caculated_features_func.csv"), recursive=True) if "m_Rei" not in file and "7_m_Lo" not in file]
 
-All_files = [anatomical_files,structural_files,functional_files]
+All_files = [anat_files,diff_files,func_files]
 
 # Read the CSV files and store them in dictionaries
-anatomical_data = read_csv_files(anatomical_files)
-structural_data = read_csv_files(structural_files)
-functional_data = read_csv_files(functional_files)
-All_Data = [anatomical_data,structural_data,functional_data]
+anat_data = read_csv_files(anat_files)
+diff_data = read_csv_files(diff_files)
+func_data = read_csv_files(func_files)
+All_Data = [anat_data,diff_data,func_data]
 
-All_type = ["anatomical","structural","functional"]
+All_type = ["anat","diff","func"]
 #% data statistisc figure 7
 BINS = [8,8,8]
 features_to_compare = ["SNR Chang", "SNR Normal", "tSNR (Averaged Brain ROI)", "Displacement factor (std of Mutual information)"]
@@ -102,7 +102,7 @@ for dd,data in enumerate(All_Data):
             
             #Data_of_selected_feature = Data_of_selected_feature.sort_values("Dataset",ascending=False)
             # creating boxplots
-            if All_type[dd] == "anatomical":
+            if All_type[dd] == "anat":
                 plt.figure(figsize=(21.3*cm,3.527*cm),dpi=600)
             else:
                 plt.figure(figsize=(9.70*cm,3.527*cm),dpi=600)
@@ -131,7 +131,7 @@ for dd,data in enumerate(All_Data):
                 dots.set_offsets(dots.get_offsets() + np.array([0.12, 0]))
             ax.set_xlim(xlim)
             ax.set_ylim(ylim)
-            ax.legend_.remove()
+            #ax.legend_.remove()
             ax.locator_params(axis='y', nbins=BINS[dd])  # Set the number of ticks for the y-axis
             
             ax
@@ -166,6 +166,6 @@ for dd,data in enumerate(All_Data):
             
             ax.tick_params(axis='both', which='both', width=0.5,color='gray',length=2)
             plt.xticks(ha='right')
-            plt.savefig(os.path.join(out_path,feature+"_"+All_type[dd]+".svg"), format='svg', bbox_inches='tight',transparent=False)
-            plt.savefig(os.path.join(out_path,feature+"_"+All_type[dd]+".png"), format='png', bbox_inches='tight',transparent=False)
+            #plt.savefig(os.path.join(out_path,feature+"_"+All_type[dd]+"withoutAbdominal.svg"), format='svg', bbox_inches='tight',transparent=False)
+            #plt.savefig(os.path.join(out_path,feature+"_"+All_type[dd]+"withoutAbdominal.png"),dpi=300 ,format='png', bbox_inches='tight',transparent=False)
             plt.show()

+ 169 - 0
code/Code4Figures/ViolinPlots_abdominal.py

@@ -0,0 +1,169 @@
+import pandas as pd
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+from scipy.stats import ttest_ind, f_oneway
+import glob
+import os
+from scipy.stats import ttest_ind
+from statsmodels.stats.multitest import multipletests
+import re
+
+#% Function to read CSV files and store them in a dictionary
+script_dir = os.path.dirname(__file__)
+out_path = os.path.join(script_dir, '..', 'figures')
+cm = 1/2.54  # centimeters in inches
+def read_csv_files(files):
+    data_dict = {}
+    for ff,file in enumerate(files):
+        df = pd.read_csv(file)    
+        # Extract the data type from the file name (assuming the file name contains "anat", "diff", or "func")
+        data_type = "anat" if "anat" in file else ("diff" if "diff" in file else "func")
+        data_dict[ff] = {data_type:df} 
+            
+    return data_dict
+
+
+# Function for statistical comparison and plotting
+def compare_and_plot(data, column_name, group_column):
+    sns.set_palette("Set2")  # Use colors for color-blind people
+    plt.figure(figsize=(9*cm, 6*cm))
+    sns.boxplot(x=group_column, y=column_name, data=data)
+    plt.xlabel(group_column)
+    plt.ylabel(column_name)
+    plt.title(f"Statistical Comparison of {column_name} by {group_column}")
+    plt.tight_layout()
+    #plt.savefig(f"{column_name}_by_{group_column}_boxplot.png")
+    plt.show()
+
+Path = r"C:\Users\arefk\Desktop\Projects\AIDAqcOutput_of_all_Datasets"
+
+anat_files = [file for file in glob.glob(os.path.join(Path, "*/*/*caculated_features_anat.csv"), recursive=True) if "m_Rei" in file or "7_m_Lo" in file]
+diff_files = [file for file in glob.glob(os.path.join(Path, "*/*/*caculated_features_diff.csv"), recursive=True) if "m_Rei" in file or "7_m_Lo" in file]
+func_files = [file for file in glob.glob(os.path.join(Path, "*/*/*caculated_features_func.csv"), recursive=True) if "m_Rei" in file or "7_m_Lo" in file]
+
+All_files = [anat_files,diff_files,func_files]
+
+# Read the CSV files and store them in dictionaries
+anat_data = read_csv_files(anat_files)
+diff_data = read_csv_files(diff_files)
+func_data = read_csv_files(func_files)
+All_Data = [anat_data,diff_data,func_data]
+All_type = ["anat","diff","func"]
+#% data statistisc figure 7
+BINS = [8,8,8]
+features_to_compare = ["SNR Chang", "SNR Normal", "tSNR (Averaged Brain ROI)", "Displacement factor (std of Mutual information)"]
+
+#features_to_compare = ["SpatRx", "SpatRy", "Slicethick"]
+
+
+Data_of_selected_feature2 = pd.DataFrame()
+for dd,data in enumerate(All_Data):
+    for feature in features_to_compare:
+        cc = 0
+        temp = pd.DataFrame()
+        Data_of_selected_feature = pd.DataFrame()
+        temp_data = pd.DataFrame()
+
+        for key in data:
+            try:
+                temp_data[feature] = data[key][All_type[dd]][feature]
+            except KeyError:
+                continue
+            temp_data["Dataset"] = All_files[dd][cc].split(os.sep)[-3]
+            cc = cc +1
+            Data_of_selected_feature = pd.concat([Data_of_selected_feature, temp_data], ignore_index=True)
+            #Data_of_selected_feature2 = pd.concat([Data_of_selected_feature2, temp_data], ignore_index=True)
+            
+            
+            
+        if not Data_of_selected_feature.empty:
+            Data_of_selected_feature['sort'] = Data_of_selected_feature['Dataset'].str.extract('(\d+)', expand=True).astype(int)
+            Data_of_selected_feature = Data_of_selected_feature.sort_values('sort')
+            
+            if feature == "SNR Normal":
+                Data_of_selected_feature.rename(columns={"SNR Normal": "SNR-Standard (dB)"}, inplace=True)
+                feature = "SNR-Standard (dB)"
+            if feature == "SNR Chang":
+                Data_of_selected_feature.rename(columns={"SNR Chang": "SNR-Chang (dB)"}, inplace=True)
+                feature = "SNR-Chang (dB)"    
+            elif feature == "tSNR (Averaged Brain ROI)":
+                Data_of_selected_feature.rename(columns={"tSNR (Averaged Brain ROI)": "tSNR (dB)"}, inplace=True)
+                feature = "tSNR (dB)"
+            elif feature == "Displacement factor (std of Mutual information)":
+                Data_of_selected_feature.rename(columns={"Displacement factor (std of Mutual information)": "Motion severity (a.u)"}, inplace=True)
+                BINS[dd] = 10
+                feature = "Motion severity (a.u)"
+            
+            
+            #Data_of_selected_feature2["Vol"] = Data_of_selected_feature2["SpatRx"]*Data_of_selected_feature2["SpatRy"]*Data_of_selected_feature2["Slicethick"]
+            
+            #Data_of_selected_feature = Data_of_selected_feature.sort_values("Dataset",ascending=False)
+            # creating boxplots
+            if All_type[dd] == "anat":
+                plt.figure(figsize=(6*cm,3*cm),dpi=600)
+            else:
+                plt.figure(figsize=(6*cm,3*cm),dpi=600)
+                
+            sns.set_style('ticks')
+            sns.set(font='Times New Roman',style=None)  # Set font to Times New Roman and font size to 9
+            palette = 'Set2'
+            ax = sns.violinplot(x="Dataset", y=feature, data=Data_of_selected_feature, hue="Dataset", dodge=False,
+                                palette=palette,
+                                scale="width", inner=None,linewidth=1)
+            patches = ax.patches
+            #legend_colors = [patch.get_facecolor() for patch in patches[:]]
+
+            xlim = ax.get_xlim()
+            ylim = ax.get_ylim()
+            for violin in ax.collections:
+                bbox = violin.get_paths()[0].get_extents()
+                x0, y0, width, height = bbox.bounds
+                violin.set_clip_path(plt.Rectangle((x0, y0), width / 2, height, transform=ax.transData))
+            
+            sns.boxplot(x="Dataset", y=feature, data=Data_of_selected_feature, saturation=1, showfliers=False,
+                        width=0.3, boxprops={'zorder': 3, 'facecolor': 'none'}, ax=ax, linewidth=1)
+            old_len_collections = len(ax.collections)
+            sns.stripplot(x="Dataset", y=feature, data=Data_of_selected_feature,size=1.1, hue="Dataset", palette=palette, dodge=False, ax=ax)
+            for dots in ax.collections[old_len_collections:]:
+                dots.set_offsets(dots.get_offsets() + np.array([0.12, 0]))
+            ax.set_xlim(xlim)
+            ax.set_ylim(ylim)
+            #ax.legend_.remove()
+            ax.locator_params(axis='y', nbins=BINS[dd])  # Set the number of ticks for the y-axis
+            
+            ax
+            ax.set_xticklabels(ax.get_xticklabels(), rotation=45,fontsize=8)
+            ax.set_yticklabels(ax.get_yticklabels(),fontsize=8)
+            
+            
+            #ax.set_yticks(np.linspace(ax.get_ylim()[0], ax.get_ylim()[1], BINS[dd]))
+
+
+# =============================================================================
+#             for label, color in zip(ax.get_xticklabels(), legend_colors):
+#                 label.set_color(color)
+# =============================================================================
+            ax.set_xlabel('')
+            ax.set_title(All_type[dd].capitalize(),weight='bold',fontsize=10)
+            y_label = ax.set_ylabel(ax.get_ylabel(),fontsize=8)
+
+# =============================================================================
+            ax.xaxis.grid(True, linestyle='-', which='major', color='gray', linewidth=0)
+            ax.xaxis.grid(True, linestyle='--', which='minor', color='gray', linewidth=0)
+# 
+            ax.yaxis.grid(True, linestyle='-', which='major', color='gray', linewidth=0)
+            ax.yaxis.grid(True, linestyle='--', which='minor', color='gray', linewidth=0)        
+# =============================================================================
+            ax.spines['top'].set_linewidth(0.5)  # Top border
+            ax.spines['right'].set_linewidth(0.5)  # Right border
+            ax.spines['bottom'].set_linewidth(0.5)  # Bottom border
+            ax.spines['left'].set_linewidth(0.5)  # Left border
+                        # Set axis ticks font and number of ticks
+            ax.tick_params(axis='both', which='both', width=0.5, color='gray', length=2)
+            
+            ax.tick_params(axis='both', which='both', width=0.5,color='gray',length=2)
+            plt.xticks(ha='right')
+            plt.savefig(os.path.join(out_path,feature+"_"+All_type[dd]+"Abdominal.svg"), format='svg', bbox_inches='tight',transparent=False)
+            plt.savefig(os.path.join(out_path,feature+"_"+All_type[dd]+"Abdominal.png"),dpi=300 ,format='png', bbox_inches='tight',transparent=False)
+            plt.show()

+ 58 - 0
code/Code4Figures/add_motion_figures.py

@@ -0,0 +1,58 @@
+import pandas as pd
+import seaborn as sns
+import matplotlib.pyplot as plt
+from scipy.stats import ttest_ind
+import os
+
+
+# Setting font and font size
+plt.rcParams['font.family'] = 'Times New Roman'
+plt.rcParams['font.size'] = 8
+# Function to read CSV files and store them in a dictionary
+script_dir = os.path.dirname(__file__)
+dirpath = os.path.dirname(script_dir)
+out_path = os.path.join(dirpath, 'figures')
+#os.mkdir(out_path)
+file_address = os.path.join(dirpath, "input", "noise_and_motion", "caculated_features_func.csv")
+# Load CSV file into a DataFrame
+df = pd.read_csv(file_address)
+# Filter data into two groups based on the presence of keywords in the FileAddress column
+group_original = df[~df['FileAddress'].str.contains('mc.nii|motion_added', na=False)]
+group_added_motion = df[df['FileAddress'].str.contains('motion_added', na=False)]
+
+# Extract Displacement factor values for each group
+dfactor_original = group_original['Displacement factor (std of Mutual information)']
+dfactor_added_motion = group_added_motion['Displacement factor (std of Mutual information)']
+
+# Perform t-test
+ttest_dfactor_am_vs_original = ttest_ind(dfactor_added_motion, dfactor_original,equal_var=False)
+
+# Print t-test result
+print("\nT-test results for Displacement factor (Added Motion vs. Original):")
+print("Statistic:", ttest_dfactor_am_vs_original.statistic)
+print("P-value:", ttest_dfactor_am_vs_original.pvalue)
+
+cm = 1/2.53
+# Create boxplot for Displacement factor
+plt.figure(figsize=(6*cm, 4*cm),dpi=300)  # Adjust the figure size as needed
+
+# Combine the data into a list
+data = [dfactor_added_motion,dfactor_original ]
+
+# Create boxplot
+sns.boxplot(data=data, fliersize=3 ,flierprops={"marker": "o"},showfliers=False,palette="Set2",width=0.6, boxprops={'zorder': 3}, linewidth=1)
+
+# Add labels to the x-axis
+plt.xticks([0, 1], ['Added motion', 'Original'], fontsize=8, fontname='Times New Roman')
+
+# Add title and labels
+plt.title('(c)', fontsize=10, fontweight='bold', fontname='Times New Roman', loc='left')
+plt.xlabel('Group', fontsize=8, fontname='Times New Roman')
+plt.ylabel('Motion Severity (a.u)', fontsize=8, fontname='Times New Roman')
+plt.ylim([0,0.11])
+
+# Save the plot
+plt.savefig(os.path.join(out_path, "displacement_factor_comparison.svg"), format='svg' ,dpi=300, bbox_inches='tight')
+
+# Show the plot
+plt.show()

+ 112 - 0
code/Code4Figures/add_noise_figures.py

@@ -0,0 +1,112 @@
+import pandas as pd
+import seaborn as sns
+import matplotlib.pyplot as plt
+from scipy.stats import ttest_ind
+import os
+
+# Setting font and font size
+plt.rcParams['font.family'] = 'Times New Roman'
+plt.rcParams['font.size'] = 8
+
+# Load CSV file into a DataFrame
+script_dir = os.path.dirname(__file__)
+dirpath = os.path.dirname(script_dir)
+out_path = os.path.join(dirpath, 'figures')
+#os.mkdir(out_path)
+file_address = os.path.join(dirpath, "input", "noise_and_motion", "caculated_features_anat.csv")
+# Load CSV file into a DataFrame
+df = pd.read_csv(file_address)
+
+outdir = os.path.join(dirpath, "figures")
+# Filter data into four groups based on the presence of "speckleNoisy.nii.gz", "saltPepperNoisy.nii.gz", "randomNoisy.nii.gz", and "original.nii.gz" in the FileAddress column
+group_with_speckle_noise = df[df['FileAddress'].str.contains('speckleNoisy.nii.gz', na=False)]
+group_with_salt_pepper_noise = df[df['FileAddress'].str.contains('saltPepperNoisy.nii.gz', na=False)]
+group_with_random_noise = df[df['FileAddress'].str.contains('randomNoisy.nii.gz', na=False)]
+group_with_original = df[~df['FileAddress'].str.contains('ois', na=False)]
+
+# Extract SNR values for each group
+snr_chang_with_speckle_noise = group_with_speckle_noise['SNR Chang']
+snr_chang_with_salt_pepper_noise = group_with_salt_pepper_noise['SNR Chang']
+snr_chang_with_random_noise = group_with_random_noise['SNR Chang']
+snr_chang_with_original = group_with_original['SNR Chang']
+
+snr_normal_with_speckle_noise = group_with_speckle_noise['SNR Normal']
+snr_normal_with_salt_pepper_noise = group_with_salt_pepper_noise['SNR Normal']
+snr_normal_with_random_noise = group_with_random_noise['SNR Normal']
+snr_normal_with_original = group_with_original['SNR Normal']
+
+cm = 1/2.53
+# Create boxplot for Displacement factor
+plt.figure(figsize=(12 * cm, 5 * cm), dpi=300)  # Adjust the figure size as needed
+
+plt.subplot(1, 2, 1)
+sns.boxplot(data=[snr_chang_with_random_noise,
+                  snr_chang_with_salt_pepper_noise,
+                  snr_chang_with_speckle_noise, snr_chang_with_original], palette="Set2", fliersize=3,
+            flierprops={"marker": "o"},width=0.6, boxprops={'zorder': 3}, linewidth=1,showfliers=False,)
+plt.title('(a)', fontsize=10, fontweight='bold', loc="left")
+plt.xticks(ticks=[0, 1, 2, 3], labels=['Gaussian ', 'S&P', 'Speckle', 'Original'])
+plt.xlabel('Noise type', fontsize=8, fontname='Times New Roman')
+plt.ylabel('SNR Chang (dB)', fontsize=8, fontname='Times New Roman')
+
+plt.ylim([18,57])
+
+plt.subplot(1, 2, 2)
+sns.boxplot(data=[snr_normal_with_random_noise,
+                  snr_normal_with_salt_pepper_noise,
+                  snr_normal_with_speckle_noise,
+                  snr_normal_with_original], palette="Set2", fliersize=3,
+            flierprops={"marker": "o"}, saturation=1, showfliers=False,
+                        width=0.6, boxprops={'zorder': 3}, linewidth=1)
+
+plt.title('(b)', fontsize=10, fontweight='bold', loc="left")
+plt.xticks(ticks=[0, 1, 2, 3], labels=['Gaussian ', 'S&P', 'Speckle', 'Original'])
+plt.xlabel('Noise type', fontsize=8, fontname='Times New Roman')
+plt.ylabel('SNR Standard (dB)', fontsize=8, fontname='Times New Roman')
+
+plt.ylim([10,42])
+
+
+# Remove rows with NaN values from SNR Chang datasets
+snr_chang_with_speckle_noise = snr_chang_with_speckle_noise.dropna()
+snr_chang_with_salt_pepper_noise = snr_chang_with_salt_pepper_noise.dropna()
+snr_chang_with_random_noise = snr_chang_with_random_noise.dropna()
+
+# Perform Welch's t-tests for SNR Chang between Original and each noise type
+ttest_welch_chang_original_vs_salt_pepper = ttest_ind(snr_chang_with_original, snr_chang_with_salt_pepper_noise, equal_var=False)
+ttest_welch_chang_original_vs_speckle = ttest_ind(snr_chang_with_original, snr_chang_with_speckle_noise, equal_var=False)
+ttest_welch_chang_original_vs_random = ttest_ind(snr_chang_with_original, snr_chang_with_random_noise, equal_var=False)
+
+# Perform Welch's t-tests for SNR Normal between Original and each noise type
+ttest_welch_normal_original_vs_salt_pepper = ttest_ind(snr_normal_with_original, snr_normal_with_salt_pepper_noise, equal_var=False)
+ttest_welch_normal_original_vs_speckle = ttest_ind(snr_normal_with_original, snr_normal_with_speckle_noise, equal_var=False)
+ttest_welch_normal_original_vs_random = ttest_ind(snr_normal_with_original, snr_normal_with_random_noise, equal_var=False)
+
+# Adjust p-values for multiple comparisons using Bonferroni correction
+alpha = 0.05
+num_comparisons = 3  # Number of comparisons
+alpha_adjusted = alpha / num_comparisons
+
+p_values_chang = [ttest_welch_chang_original_vs_salt_pepper.pvalue, ttest_welch_chang_original_vs_speckle.pvalue, ttest_welch_chang_original_vs_random.pvalue]
+reject_null_chang = [p < alpha_adjusted for p in p_values_chang]
+
+p_values_normal = [ttest_welch_normal_original_vs_salt_pepper.pvalue, ttest_welch_normal_original_vs_speckle.pvalue, ttest_welch_normal_original_vs_random.pvalue]
+reject_null_normal = [p < alpha_adjusted for p in p_values_normal]
+
+# Print adjusted p-values
+print("Adjusted p-values for SNR Chang:")
+print("Original vs Salt & Pepper:", ttest_welch_chang_original_vs_salt_pepper.pvalue, "Reject Null:", reject_null_chang[0])
+print("Original vs Speckle:", ttest_welch_chang_original_vs_speckle.pvalue, "Reject Null:", reject_null_chang[1])
+print("Original vs Random:", ttest_welch_chang_original_vs_random.pvalue, "Reject Null:", reject_null_chang[2])
+
+print("\nAdjusted p-values for SNR Normal:")
+print("Original vs Salt & Pepper:", ttest_welch_normal_original_vs_salt_pepper.pvalue, "Reject Null:", reject_null_normal[0])
+print("Original vs Speckle:", ttest_welch_normal_original_vs_speckle.pvalue, "Reject Null:", reject_null_normal[1])
+print("Original vs Random:", ttest_welch_normal_original_vs_random.pvalue, "Reject Null:", reject_null_normal[2])
+# Adjust layout
+plt.tight_layout()
+
+# Save the plot as SVG
+plt.savefig(os.path.join(outdir, "NoiseComparisons.svg"), format='svg', dpi=300)
+
+plt.show()

+ 130 - 0
code/Code4Figures/correlation.py

@@ -0,0 +1,130 @@
+import glob
+import os
+import pandas as pd
+import nibabel as nib
+import matplotlib.pyplot as plt
+import numpy as np
+from matplotlib import rcParams
+
+# Set font properties
+rcParams['font.family'] = 'times new roman'
+rcParams['font.size'] = 8
+
+# Define file paths
+path = r"C:\Users\aswen\Desktop\TestingData\Aswendt_qc_rsfmri_plot\proc_data"
+csv_voting_path = r"C:\Users\aswen\Desktop\TestingData\Aswendt_qc_rsfmri_plot\QC\votings.csv"
+
+# Function to read CSV files and store them in a dictionary
+script_dir = os.path.dirname(__file__)  # Use this line if running from a script file
+out_path = os.path.join(script_dir, '..', 'figures', 'supplement9')
+
+if not os.path.exists(out_path):
+    os.mkdir(out_path)
+
+# Read CSV file for voting
+#df_voting = pd.read_csv(csv_voting_path, delimiter=",")
+
+# Find rs-fMRI files
+search_pattern = os.path.join(path, "**", "func", "rs-fMRI_niiData", "*_mcf_f.nii.gz")
+rs_files = glob.glob(search_pattern, recursive=True)
+
+for rs_file in rs_files:
+    temp_dir = os.path.dirname(os.path.dirname(rs_file))
+    mask_search_pattern = os.path.join(temp_dir, "*AnnoSplit_parental.nii.gz")
+    mask_files = glob.glob(mask_search_pattern, recursive=True)
+    
+
+    mask_file = mask_files[0]
+    
+    # Load rs-fMRI data
+    rs_img = nib.load(rs_file)
+    rs_data = rs_img.get_fdata()
+    
+    # Load mask data
+    mask_img = nib.load(mask_file)
+    mask_data = mask_img.get_fdata()
+    
+    # Convert mask_data to 4D
+    mask_data_4d = np.expand_dims(mask_data, axis=3)
+
+    # Initialize a list to store average time series
+    average_all = []
+
+      # Initialize a list to store average time series for each region
+    average_all_all = []
+    
+    # Iterate over unique values in mask_data_4d
+    unique_regions = np.unique(mask_data_4d)
+    for mm in unique_regions:
+        # Initialize a list to store average time series for the current region
+        average_all = []
+        # Mask out the region in rs_data
+        masked_data = rs_data * (mask_data_4d == mm)
+        for t in range(masked_data.shape[3]):
+            masked_data_t = masked_data[:, :, :, t]
+            masked_data_nonzero = masked_data_t[masked_data_t != 0]
+            average = np.mean(masked_data_nonzero)
+            average_all.append(average)
+        # Append the average time series for the current region to the list
+        average_all_all.append(average_all)
+    
+    # Convert the list of average time series for each region into a matrix
+    average_matrix = np.array(average_all_all)
+
+    # Initialize a list to store average time series for specific regions
+    average_all_specific = []
+
+    # Specify regions of interest
+    regions_of_interest = [329,1080, 2329, 3080]
+    region_names = ["L_MOs","L_HIP","R_MOs","R_HIP"]
+    
+    
+    for sts in regions_of_interest:    
+        BigMaskData = rs_data * (mask_data_4d == sts)
+        BigMaskDataaverage_all =  []
+        for t in range(BigMaskData.shape[3]):
+            BigMaskData_t = BigMaskData[:, :, :, t]
+            BigMaskData_t_nonzero = BigMaskData_t[BigMaskData_t != 0]
+            BigMaskDataaverage = np.mean(BigMaskData_t_nonzero)
+            BigMaskDataaverage_all.append(BigMaskDataaverage)
+        # Calculate average time series for the masked region
+        average_all_specific.append(BigMaskDataaverage_all)
+    average_all_specific_matrix = np.array(average_all_specific)
+    # Plot the average time series for specific regions and the correlation matrix side by side
+    cm = 1/2.53
+    plt.figure(figsize=(9.5*cm, 5*cm))
+
+    # Plot average time series subplot
+    ax1 = plt.subplot(1, 2, 1)
+    for idx, sts in enumerate(regions_of_interest):
+        ax1.plot(average_all_specific_matrix[idx], label=f'Region {sts}')
+    ax1.set_xlabel('Repetition')
+    ax1.set_ylabel('Mean Signal (a.u)')
+    ax1.set_title('')
+    ax1.spines['top'].set_visible(False)
+    ax1.spines['right'].set_visible(False)
+
+    # Calculate Pearson correlation coefficient matrix
+    correlation_matrix = np.corrcoef(average_matrix)
+
+    # Plot correlation matrix subplot
+    ax2 = plt.subplot(1, 2, 2)
+    im = ax2.imshow(correlation_matrix, cmap='viridis', interpolation='nearest')
+    plt.colorbar(im, ax=ax2, label='Correlation')
+    ax2.set_xlabel('Regions')
+    ax2.set_ylabel('Regions')
+    ax2.set_title('')
+    ax2.axis('on')
+    ax2.set_xticks([])
+    ax2.set_yticks([])
+
+    # Save the figure
+    pp = os.path.basename(rs_file).replace(".nii.gz","")
+    pptt = pp.replace("_mcf_f","")
+    ppttx = pptt.replace("_EPI","") 
+    plt.suptitle(f'{ppttx}')
+    plt.tight_layout()
+    plt.savefig(os.path.join(out_path, f"{ppttx}.svg"), format='svg', transparent=True, dpi=100)
+    
+    # Show the figure
+    plt.show()

+ 109 - 0
code/Code4Figures/rsfmriPlots.py

@@ -0,0 +1,109 @@
+import glob
+import os
+import pandas as pd
+import nibabel as nib
+import matplotlib.pyplot as plt
+import numpy as np
+from matplotlib import rcParams
+
+# Set font properties
+rcParams['font.family'] = 'times new roman'
+rcParams['font.size'] = 8
+
+# Define file paths
+path = r"C:\Users\aswen\Desktop\TestingData\Aswendt_qc_rsfmri_plot\proc_data"
+csv_voting_path = r"C:\Users\aswen\Desktop\TestingData\Aswendt_qc_rsfmri_plot\QC\votings.csv"
+
+# Function to read CSV files and store them in a dictionary
+script_dir = os.path.dirname(__file__)  # Use this line if running from a script file
+out_path = os.path.join(script_dir, '..', 'figures', 'supplement_figure_7')
+if not os.path.exists(out_path):
+    os.mkdir(out_path)
+
+# Read CSV file for voting
+df_voting = pd.read_csv(csv_voting_path, delimiter=",")
+
+# Find DWI files
+SearchP = os.path.join(path, "**", "func", "*EPI.nii.gz")
+BetFiles = glob.glob(SearchP, recursive=True)
+
+for bb in BetFiles:
+    temp = os.path.dirname(bb)
+    Searchmask = os.path.join(temp, "**", "*AnnoSplit_parental.nii.gz")
+    mask = glob.glob(Searchmask, recursive=True)[0]
+    SearchBet = os.path.join(temp, "**", "*Bet.nii.gz")
+    Bet = glob.glob(SearchBet, recursive=True)[0]
+    
+    # Load 4D DWI file
+    dwi_img = nib.load(bb)
+    dwi_data = dwi_img.get_fdata()
+    
+    # Load 3D Bet image
+    bet_img = nib.load(Bet)
+    bet_data = bet_img.get_fdata()
+    
+    # Load 3D mask image
+    mask_img = nib.load(mask)
+    mask_data = mask_img.get_fdata()
+    
+    # Plotting
+    fig, axes = plt.subplots(2, 4, figsize=(4.72, 2.54), dpi=300)  # 12 cm wide, DPI=300
+    plt.subplots_adjust(wspace=0, hspace=0)  # Reduce space between subplots
+    
+    # First row: 4 equally distributed slices from the middle of the Bet image
+    middle_slice = bet_data.shape[2] // 2
+    for i, ax in enumerate(axes[0]):
+        slice_data = np.rot90(bet_data[..., middle_slice + 2*i], k=-1)
+        ax.imshow(slice_data, cmap='gray')
+        ax.set_xticks([])  # Remove x ticks
+        ax.set_yticks([])  # Remove y ticks
+        
+        # Find corresponding entry in CSV_voting
+        df_bb_voting = df_voting[df_voting['Pathes'] == bb]
+        if not df_bb_voting.empty and i == 0:
+            voting_value = df_bb_voting['Voting outliers (from 5)'].values[0]
+            if voting_value > 1:
+                ax.text(5, 15, f'Majority Votes: {voting_value}', color='red', fontweight='bold')
+            else:
+                ax.text(5, 15, f'Majority Votes: {voting_value}', color='yellow', fontweight='bold')
+        elif i == 0:
+            voting_value = 0
+            ax.text(5, 15, f'Majority Votes: {voting_value}', color='white', fontweight='bold')
+    
+    # Second row: same slices for the mask image
+    for i, ax in enumerate(axes[1]):
+        slice_data = np.rot90(mask_data[..., middle_slice + 2*i], k=-1)
+        ax.imshow(slice_data, cmap='gray')
+        ax.set_xticks([])  # Remove x ticks
+        ax.set_yticks([])  # Remove y ticks
+        
+    # Save image plots
+   
+    #plt.tight_layout()
+    plt.savefig(os.path.join(out_path, os.path.basename(bb).replace('.nii.gz', '_image_plot.svg')),transparent=True, format='svg')
+    #plt.close('all')
+    plt.show()
+    # Third row: Time course plot
+    time_points = dwi_data.shape[-1] 
+    selected_voxels = [485, 2485, 353, 2353, 985, 2985, 329, 2329, 198, 2198, 1080, 3080]
+    bigmask = np.zeros_like(mask_data)
+    for ss in selected_voxels:
+        bigmask += (mask_data == ss)
+    
+    region_mean = []
+    for tt in range(20, dwi_data.shape[-1]):  # Skip the first 20 seconds
+        tempdwi = dwi_data[..., tt]
+        region_mean.append(np.mean(tempdwi[bigmask != 0]))
+    
+    fig, ax = plt.subplots(figsize=(2.36,  2.54), dpi=300)  # 6 cm wide, DPI=300
+    ax.plot(range(20, time_points), region_mean, label='Merged Region')
+    ax.text(20, max(region_mean), f'Majority Votes: {voting_value}', color='red' if voting_value > 2 else 'red' if voting_value > 0 else "black", fontweight='bold')  # Add voting info as text with color scheme
+    ax.set_xlabel('Time Points')
+    ax.set_ylabel('Average Intensity')
+    ax.spines['top'].set_visible(False)  # Drop upper frame
+    ax.spines['right'].set_visible(False)  # Drop right frame
+    #plt.tight_layout()
+    
+    # Save time course plots
+    #plt.savefig(os.path.join(out_path, os.path.basename(bb).replace('.nii.gz', '_time_course_plot.png')),transparent=True, format='png')
+    plt.close('all')

+ 40 - 0
code/Code4Figures/svg_sorted.py

@@ -0,0 +1,40 @@
+import glob
+import os
+import matplotlib.pyplot as plt
+
+# Define file paths
+script_dir = os.path.dirname(__file__)  # Use this line if running from a script file
+out_path = os.path.join(script_dir, '..', 'figures', 'supplement9')
+
+# Get paths of SVG images
+svgImages_timeCourse = glob.glob(os.path.join(out_path, "*time_course_plot.svg"))
+svgImages_images = glob.glob(os.path.join(out_path, "*image_plot.svg"))
+
+# Calculate number of rows needed
+num_images = max(len(svgImages_timeCourse), len(svgImages_images))
+num_rows = (num_images + 1) // 2  # To make sure we have enough rows
+
+# Create a figure and subplots
+fig, axes = plt.subplots(num_rows, 2, figsize=(10, 5 * num_rows))
+
+# Flatten axes if there is only one row
+if num_rows == 1:
+    axes = [axes]
+
+# Plot images
+for i, (svg_time, svg_image) in enumerate(zip(svgImages_timeCourse, svgImages_images)):
+    row = i // 2
+    col = i % 2
+    ax = axes[row][col]
+    
+    # Load and display SVG images
+    with open(svg_time, 'r') as f_time, open(svg_image, 'r') as f_image:
+        ax.text(0.5, 0.5, f_time.read(), va='center', ha='center')
+        ax.text(0.5, 0.5, f_image.read(), va='center', ha='center')
+        
+    ax.axis('off')
+
+plt.tight_layout()
+print("ahhhhhhh")
+# Save the figure
+plt.savefig(os.path.join(out_path, "combined_images_plot.svg"))

+ 85 - 0
code/Code4Figures/tsnrMaps.py

@@ -0,0 +1,85 @@
+import glob
+import os
+import pandas as pd
+import nibabel as nib
+import matplotlib.pyplot as plt
+import numpy as np
+from matplotlib import rcParams
+
+# Set font properties
+rcParams['font.family'] = 'times new roman'
+rcParams['font.size'] = 8
+
+# Define file paths
+path = r"C:\Users\arefk\Desktop\Projects\testData\proc_data"
+csv_voting_path = r"C:\Users\arefk\Desktop\Projects\testData\proc_data2\QC\votings.csv"
+
+# Function to read CSV files and store them in a dictionary
+script_dir = os.path.dirname(__file__)  # Use this line if running from a script file
+out_path = os.path.join(script_dir, '..', 'figures', 'supplement_figure_6')
+if not os.path.exists(out_path):
+    os.mkdir(out_path)
+
+# Read CSV file for voting
+df_voting = pd.read_csv(csv_voting_path)
+
+# Find DWI files
+SearchP = os.path.join(path, "**", "func", "*EPI.nii.gz")
+BetFiles = glob.glob(SearchP, recursive=True)
+
+for bb in BetFiles:
+    temp = os.path.dirname(bb)
+    Searchmask = os.path.join(temp, "**", "*AnnoSplit_parental.nii.gz")
+    mask = glob.glob(Searchmask, recursive=True)[0]
+    SearchBet = os.path.join(temp, "**", "*Bet.nii.gz")
+    Bet = glob.glob(SearchBet, recursive=True)[0]
+    
+    # Load 4D DWI file
+    dwi_img = nib.load(bb)
+    dwi_data = dwi_img.get_fdata()
+    
+    # Load 3D Bet image
+    bet_img = nib.load(Bet)
+    bet_data = bet_img.get_fdata()
+    
+    # Load 3D mask image
+    mask_img = nib.load(mask)
+    mask_data = mask_img.get_fdata()
+    
+    # Plotting
+    fig, axes = plt.subplots(2, 4, figsize=(4.72, 3.54), dpi=300)  # 12 cm wide, DPI=300
+    plt.subplots_adjust(wspace=0, hspace=0)  # Reduce space between subplots
+    
+    # First row: 4 equally distributed slices from the middle of the Bet image
+    middle_slice = bet_data.shape[2] // 2
+    for i, ax in enumerate(axes[0]):
+        slice_data = np.rot90(bet_data[..., middle_slice + 2*i], k=-1)
+        ax.imshow(slice_data, cmap='gray')
+        ax.set_xticks([])  # Remove x ticks
+        ax.set_yticks([])  # Remove y ticks
+    
+    # Second row: same slices for the mask image
+    for i, ax in enumerate(axes[1]):
+        slice_data = np.rot90(mask_data[..., middle_slice + 2*i], k=-1)
+        ax.imshow(slice_data, cmap='gray')
+        ax.set_xticks([])  # Remove x ticks
+        ax.set_yticks([])  # Remove y ticks
+    
+    # Third row: Time course plot
+    time_points = dwi_data.shape[-1]
+    selected_voxels = [485, 2485, 353, 2353, 985, 2985,329,2329,198,2198,1080,3080]
+    bigmask = np.zeros_like(mask_data)
+    for ss in selected_voxels:
+        bigmask += (mask_data == ss)
+    
+    region_mean = []
+    for tt in range(20, dwi_data.shape[-1]):  # Skip the first 20 seconds
+        tempdwi = dwi_data[..., tt]
+        region_mean.append(np.mean(tempdwi[bigmask != 0]))
+    
+    fig, ax = plt.subplots(figsize=(2.36, 1.77), dpi=300)  # 6 cm wide, DPI=300
+    ax.plot(range(20, time_points), region_mean, label='Merged Region')
+    ax.set_xlabel('Time Points')
+    ax.set_ylabel('Average Intensity')
+    plt.tight_layout()
+    plt.show()