In [1]:
import numpy as np
import seaborn as sns
import pandas as pd
import dabest
from scipy.stats.stats import pearsonr
import matplotlib.pyplot as plt
In [2]:
# Create style

sns.set_style('ticks')
sns.set_context("talk")
In [3]:
df = pd.read_csv("/Users/philipb/surfdrive/PhD/BRAINLINKS/PILOT/COSY results/BL0P_DATA_MASTER.csv")
time = pd.read_csv("/Users/philipb/surfdrive/PhD/BRAINLINKS/PILOT/COSY results/timeseries/timeseries_parents.csv")

df['NAcc OtherWin Mother_vs_Stranger'] = df['MOTHER_OtherWin'] - df['STRANGER_OtherWin']
df['Like Winning Mother_vs_Stranger'] = df['LW_mother'] - df['LW_stranger']

df
Out[3]:
id sex IOS_stranger IOS_mother IOS_father LW_stranger LW_mother LW_father LW_self MOTHER_SelfWin ... FATHER_OtherWin FATHER_BothWin STRANGER_SelfWin STRANGER_OtherWin STRANGER_BothWin Precuneus_FatherWin Precuneus_FatherNoWin difference_fatherwin_father_nowin NAcc OtherWin Mother_vs_Stranger Like Winning Mother_vs_Stranger
0 BL0P001 female 1 6 1 1 5 4 7 1.97 ... 3.90 4.07 3.34 -1.35 0.78 -0.87255 -0.55160 -0.32095 -0.23 4
1 BL0P002 female 2 5 5 4 4 4 5 7.08 ... 7.75 1.48 -1.68 4.96 1.50 -12.13085 -7.09480 -5.03605 -5.81 0
2 BL0P003 female 3 6 5 3 5 5 6 3.35 ... -3.28 -0.10 0.17 2.04 2.78 -1.14860 -2.04310 0.89450 3.65 2
3 BL0P004 male 2 5 5 4 4 4 4 2.61 ... 3.60 4.16 -2.93 -2.74 1.39 -0.31175 1.82980 -2.14155 5.56 0
4 BL0P005 male 1 3 5 2 5 5 6 5.65 ... 4.39 4.51 3.98 -0.53 -0.60 -5.68080 -4.39865 -1.28215 6.14 3
5 BL0P006 male 2 5 1 3 5 3 5 1.06 ... -3.76 -3.82 3.00 3.60 2.01 1.47000 0.79765 0.67235 -3.03 2
6 BL0P008 female 1 5 5 4 5 5 6 0.36 ... 4.73 -2.09 0.12 -8.33 -2.29 2.42060 1.65585 0.76475 6.13 1
7 BL0P009 female 3 6 4 3 5 5 5 4.20 ... -1.03 2.69 2.46 0.22 -1.28 -3.41700 -1.55890 -1.85810 -0.11 2
8 BL0P010 male 1 5 5 3 5 5 4 11.16 ... 2.12 8.16 -0.90 0.14 -1.62 -0.91625 2.11060 -3.02685 1.59 2
9 BL0P011 male 3 6 5 4 5 5 5 0.66 ... 3.53 0.60 3.60 0.37 2.36 -1.64970 -1.53685 -0.11285 0.88 1
10 BL0P012 male 5 6 5 4 5 5 3 0.18 ... -7.36 -6.75 0.44 1.28 2.45 6.94820 7.55035 -0.60215 -1.37 1
11 BL0P013 female 4 6 6 4 4 4 4 5.36 ... -2.37 -1.48 3.21 -0.75 1.23 3.59715 4.63130 -1.03415 5.15 0
12 BL0P014 female 5 6 5 4 4 4 4 6.23 ... 1.57 3.27 -0.81 -5.33 -5.16 -0.62745 5.50380 -6.13125 5.73 0
13 BL0P015 male 4 6 6 4 4 4 4 4.68 ... 1.31 4.65 1.58 1.17 -1.73 3.37860 3.27080 0.10780 -2.42 0
14 BL0P016 female 2 3 2 4 6 5 7 4.47 ... 6.67 2.25 6.98 1.65 5.22 7.23870 7.29760 -0.05890 -2.00 2
15 BL0P017 female 1 5 2 4 5 4 4 -0.04 ... 5.31 4.64 0.17 -0.91 -0.53 1.07650 -1.15245 2.22895 2.67 1
16 BL0P018 female 3 5 3 4 6 5 3 4.80 ... -1.79 -3.24 7.02 4.06 3.90 -1.09800 4.31195 -5.40995 -1.41 2
17 BL0P019 female 1 1 5 1 5 5 6 -1.81 ... 1.66 1.99 4.59 4.87 4.32 -0.36925 5.07390 -5.44315 -13.86 4
18 BL0P020 male 3 6 5 4 5 5 5 7.88 ... 2.18 2.08 0.32 -2.19 -2.32 0.30410 5.70815 -5.40405 5.65 1
19 BL0P021 male 1 7 7 3 7 6 5 0.39 ... 1.39 8.35 4.86 -0.95 1.52 1.09695 -2.51765 3.61460 -9.28 4
20 BL0P022 male 2 6 6 2 7 7 6 1.28 ... 3.52 0.81 -0.57 -0.85 1.76 0.97905 1.96260 -0.98355 0.26 5
21 BL0P023 female 3 5 5 4 4 4 5 -0.70 ... 4.70 4.49 3.63 1.45 -0.75 -3.72290 -3.89080 0.16790 -0.23 0
22 BL0P024 female 3 6 6 3 5 5 5 3.01 ... -0.60 -0.88 -0.20 1.32 -0.03 -5.54700 -1.91800 -3.62900 0.08 2
23 BL0P025 female 3 6 5 2 4 4 5 3.17 ... -0.91 -4.36 6.92 1.89 5.23 2.14235 4.81590 -2.67355 3.89 2
24 BL0P026 female 2 4 2 1 4 4 6 5.15 ... 0.87 0.59 2.00 3.47 -0.19 -1.19390 -0.53875 -0.65515 0.56 3
25 BL0P028 male 3 6 6 2 6 6 6 1.55 ... -1.91 -2.23 4.85 2.10 2.13 1.63555 6.17515 -4.53960 -5.20 4
26 BL0P029 female 2 6 4 3 5 5 7 -0.24 ... -1.34 0.11 0.43 3.25 4.15 6.32950 5.92520 0.40430 -0.90 2
27 BL0P030 female 2 6 6 4 6 6 7 4.16 ... -2.19 6.02 1.17 -8.68 -1.38 -3.63950 0.93965 -4.57915 9.00 2
28 BL0P031 female 2 7 7 3 6 6 5 -1.30 ... 0.20 -0.28 -0.76 3.70 5.63 0.25430 5.34610 -5.09180 -0.57 3
29 BL0P034 female 4 4 3 3 4 4 5 -0.62 ... 0.14 -0.45 -1.29 -4.26 -1.65 5.24890 5.69555 -0.44665 1.99 1

30 rows × 23 columns

In [4]:
# Melt the columns together to create a TIDY DATASET for easier analysis later on
tidy = pd.melt(df,
              id_vars=['id','sex'],
               value_vars=['MOTHER_SelfWin','MOTHER_OtherWin','MOTHER_BothWin',
                          'FATHER_SelfWin','FATHER_OtherWin','FATHER_BothWin',
                          'STRANGER_SelfWin','STRANGER_OtherWin','STRANGER_BothWin',],
               value_name='NAcc')

# Rename the column that has just been created for all conditions & targets
# USE df.rename(columns={'oldName1': 'newName1', 'oldName2': 'newName2'}, inplace=True)
tidy.rename(columns={'variable': 'Condition'}, inplace=True)

# Duplicate the condition column to be able to later split it into TARGET & Condition as two variables/columns
tidy['Target'] = tidy['Condition']

# Delete duplicate information for both condition & target variables.
# USE: df['range'] = df['range'].str.replace(',','-')
# USE with Wildcard / Regex: df['email'] = df['email'].str.replace(r'@.+', '@newcompany.com')

# For Partner
tidy['Condition'] = tidy['Condition'].str.replace('MOTHER_BothWin','BothWin')
tidy['Condition'] = tidy['Condition'].str.replace('MOTHER_SelfWin','SelfWin')
tidy['Condition'] = tidy['Condition'].str.replace('MOTHER_OtherWin','OtherWin')

# For Child
tidy['Condition'] = tidy['Condition'].str.replace('FATHER_BothWin','BothWin')
tidy['Condition'] = tidy['Condition'].str.replace('FATHER_SelfWin','SelfWin')
tidy['Condition'] = tidy['Condition'].str.replace('FATHER_OtherWin','OtherWin')

# For Stranger
tidy['Condition'] = tidy['Condition'].str.replace('STRANGER_BothWin','BothWin')
tidy['Condition'] = tidy['Condition'].str.replace('STRANGER_SelfWin','SelfWin')
tidy['Condition'] = tidy['Condition'].str.replace('STRANGER_OtherWin','OtherWin')

# Replace all target strings with just the Target and not target + condition
tidy['Target'] = tidy['Target'].str.replace('MOTHER.*','MOTHER',regex=True)
tidy['Target'] = tidy['Target'].str.replace('FATHER.*','FATHER',regex=True)
tidy['Target'] = tidy['Target'].str.replace('STRANGER.*','STRANGER',regex=True)

tidy.head()
Out[4]:
id sex Condition NAcc Target
0 BL0P001 female SelfWin 1.97 MOTHER
1 BL0P002 female SelfWin 7.08 MOTHER
2 BL0P003 female SelfWin 3.35 MOTHER
3 BL0P004 male SelfWin 2.61 MOTHER
4 BL0P005 male SelfWin 5.65 MOTHER
In [5]:
#time.head()

IOS Behavioral Results

In [6]:
# We are loading the different groups into a variable using dabest for IOS and like winning

ios_mother_father = dabest.load(df, idx =("IOS_mother","IOS_father"),resamples = 10000)
ios_mother_stranger = dabest.load(df, idx = ("IOS_mother","IOS_stranger"),resamples = 10000)
ios_father_stranger = dabest.load(df, idx = ("IOS_father", "IOS_stranger"),resamples = 10000)
ios_all = dabest.load(df, idx = ("IOS_stranger", "IOS_mother", "IOS_father"),resamples = 10000)

like_winning_self_stranger = dabest.load(df, idx = ("LW_self", "LW_stranger"),resamples = 10000)
like_winning_all = dabest.load(df, idx = ("LW_self","LW_mother","LW_father","LW_stranger"),resamples = 10000)
In [7]:
# Create variables from pandas dataframe

IOS_mother = df.IOS_mother
IOS_father = df.IOS_father
IOS_stranger = df.IOS_stranger
LW_self = df.LW_self
LW_mother = df.LW_mother
LW_father = df.LW_father
LW_stranger = df.LW_stranger
In [8]:
ios_all
Out[8]:
DABEST v0.2.2
=============
             
Good morning!
The current time is Tue Jul 16 11:18:10 2019.

Effect size(s) with 95% confidence intervals will be computed for:
1. IOS_mother minus IOS_stranger
2. IOS_father minus IOS_stranger

10000 resamples will be used to generate the effect size bootstraps.
In [9]:
ios_all.mean_diff
Out[9]:
DABEST v0.2.2
=============
             
Good morning!
The current time is Tue Jul 16 11:18:13 2019.

The unpaired mean difference between IOS_stranger and IOS_mother is 2.83 [95%CI 2.1, 3.33].
The two-sided p-value of the Mann-Whitney test is 5.01e-09.

The unpaired mean difference between IOS_stranger and IOS_father is 2.1 [95%CI 1.3, 2.73].
The two-sided p-value of the Mann-Whitney test is 5.52e-06.

10000 bootstrap samples were taken; the confidence interval is bias-corrected and accelerated.
The p-value(s) reported are the likelihood(s) of observing the effect size(s),
if the null hypothesis of zero difference is true.

To get the results of all valid statistical tests, use `.mean_diff.statistical_tests`
In [10]:
ios_all.mean_diff.plot()
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-10-e78f2af8b8d9> in <module>
----> 1 ios_all.mean_diff.plot()

~/anaconda3/lib/python3.7/site-packages/dabest/_classes.py in plot(self, color_col, raw_marker_size, es_marker_size, swarm_label, contrast_label, swarm_ylim, contrast_ylim, custom_palette, swarm_desat, halfviolin_desat, halfviolin_alpha, float_contrast, show_pairs, group_summaries, fig_size, dpi, swarmplot_kwargs, violinplot_kwargs, slopegraph_kwargs, reflines_kwargs, group_summary_kwargs, legend_kwargs)
   1215         del all_kwargs["self"]
   1216 
-> 1217         out = EffectSizeDataFramePlotter(self, **all_kwargs)
   1218 
   1219         return out

~/anaconda3/lib/python3.7/site-packages/dabest/plotter.py in EffectSizeDataFramePlotter(EffectSizeDataFrame, **plot_kwargs)
    374                          gap_width_percent=1.5,
    375                          type=group_summaries, ax=rawdata_axes,
--> 376                          **group_summary_kwargs)
    377 
    378 

~/anaconda3/lib/python3.7/site-packages/dabest/plot_tools.py in gapped_lines(data, x, y, type, offset, ax, line_color, gap_width_percent, **kwargs)
    145     # Grab the order in which the groups appear,
    146     # depending on whether the x-column is categorical.
--> 147     if isinstance(data[x].dtype, pd.CategoricalDtype):
    148         group_order = pd.unique(data[x]).categories
    149     else:

AttributeError: module 'pandas' has no attribute 'CategoricalDtype'
In [ ]:
ios_mother_father.mean_diff.results
In [ ]:
#ios_mother_father.mean_diff.plot()
In [ ]:
ios_mother_stranger.mean_diff.results
In [ ]:
#ios_mother_stranger.mean_diff.plot()
In [ ]:
ios_father_stranger.mean_diff.results
In [9]:
#ios_father_stranger.mean_diff.plot()
In [11]:
like_winning_self_stranger.mean_diff.results
Out[11]:
control test effect_size is_paired difference ci bca_low bca_high bca_interval_idx pct_low ... pct_interval_idx bootstraps resamples random_seed pvalue_welch statistic_welch pvalue_students_t statistic_students_t pvalue_mann_whitney statistic_mann_whitney
0 LW_self LW_stranger mean difference False -2.033333 95 -2.633333 -1.566667 (165, 9635) -2.566667 ... (250, 9750) [-3.066666666666667, -3.033333333333333, -2.99... 10000 12345 6.520706e-10 7.40297 6.162497e-10 7.40297 1.864945e-08 821.0

1 rows × 21 columns

In [12]:
like_winning_all.mean_diff
Out[12]:
DABEST v0.2.2
=============
             
Good morning!
The current time is Thu Jun 20 10:15:14 2019.

The unpaired mean difference between LW_self and LW_mother is -0.167 [95%CI -0.7, 0.3].
The two-sided p-value of the Mann-Whitney test is 0.465.

The unpaired mean difference between LW_self and LW_father is -0.4 [95%CI -0.933, 0.0667].
The two-sided p-value of the Mann-Whitney test is 0.12.

The unpaired mean difference between LW_self and LW_stranger is -2.03 [95%CI -2.63, -1.57].
The two-sided p-value of the Mann-Whitney test is 1.86e-08.

10000 bootstrap samples were taken; the confidence interval is bias-corrected and accelerated.
The p-value(s) reported are the likelihood(s) of observing the effect size(s),
if the null hypothesis of zero difference is true.

To get the results of all valid statistical tests, use `.mean_diff.statistical_tests`
In [13]:
like_winning_all.mean_diff.plot()
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-13-130a84056f06> in <module>
----> 1 like_winning_all.mean_diff.plot()

~/anaconda3/lib/python3.7/site-packages/dabest/_classes.py in plot(self, color_col, raw_marker_size, es_marker_size, swarm_label, contrast_label, swarm_ylim, contrast_ylim, custom_palette, swarm_desat, halfviolin_desat, halfviolin_alpha, float_contrast, show_pairs, group_summaries, fig_size, dpi, swarmplot_kwargs, violinplot_kwargs, slopegraph_kwargs, reflines_kwargs, group_summary_kwargs, legend_kwargs)
   1215         del all_kwargs["self"]
   1216 
-> 1217         out = EffectSizeDataFramePlotter(self, **all_kwargs)
   1218 
   1219         return out

~/anaconda3/lib/python3.7/site-packages/dabest/plotter.py in EffectSizeDataFramePlotter(EffectSizeDataFrame, **plot_kwargs)
    374                          gap_width_percent=1.5,
    375                          type=group_summaries, ax=rawdata_axes,
--> 376                          **group_summary_kwargs)
    377 
    378 

~/anaconda3/lib/python3.7/site-packages/dabest/plot_tools.py in gapped_lines(data, x, y, type, offset, ax, line_color, gap_width_percent, **kwargs)
    145     # Grab the order in which the groups appear,
    146     # depending on whether the x-column is categorical.
--> 147     if isinstance(data[x].dtype, pd.CategoricalDtype):
    148         group_order = pd.unique(data[x]).categories
    149     else:

AttributeError: module 'pandas' has no attribute 'CategoricalDtype'
In [11]:
plt.figure(figsize=(10, 6))
fig2 = sns.regplot(data = df, x = "IOS_stranger", y = "LW_stranger", ci= 95, x_jitter = 0.05)
sns.despine()
/Users/philipb/anaconda3/lib/python3.7/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
  return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
In [63]:
# Calculate the statistics for the correlation regplot

pearsonr(x = IOS_stranger, y = LW_stranger)
Out[63]:
(0.38511837295019863, 0.03558986089011792)
In [62]:
# Calculate other correlational stats

pearsonr(x=IOS_mother, y=IOS_father)
Out[62]:
(0.3826203317832842, 0.03690579894255074)
In [8]:
# Plot mother father IOS results with some x axis jitter for clarity of visualization

plt.figure(figsize=(12,6))
fig3 = sns.regplot(data=df, x = IOS_mother, y = IOS_father, x_jitter = 0.2, ci)
sns.despine()
In [61]:
# Calculate mother stranger IOS correlation
pearsonr(x=IOS_mother, y=IOS_stranger)
Out[61]:
(0.3228296141628194, 0.08185729780369029)
In [47]:
# Plot mother father IOS results with some x axis jitter for clarity of visualization

plt.figure(figsize=(12,6))
fig3 = sns.regplot(data=df, x = IOS_mother, y = LW_mother, x_jitter = 0.2)
sns.despine()
In [48]:
pearsonr(x=IOS_mother, y=LW_mother)
Out[48]:
(0.18799634872859558, 0.3198082636989189)
In [42]:
# Plot it

plt.figure(figsize=(12,6))
fig4 = sns.regplot(data=df, x = IOS_mother, y = IOS_stranger, x_jitter = 0.1)
sns.despine()
In [60]:
pearsonr(x=IOS_father, y=IOS_stranger)
Out[60]:
(0.18217698971555255, 0.3352862666741778)
In [51]:
# Plot it

plt.figure(figsize=(12,6))
fig5 = sns.regplot(data=df,x = IOS_father, y = IOS_stranger, x_jitter=0.1)
sns.despine()
In [59]:
pearsonr(x=IOS_mother, y=LW_mother)
Out[59]:
(0.18799634872859558, 0.3198082636989189)
In [32]:
# Plot it with seaborn
plt.figure(figsize=(12,6),dpi=150)
fig6 = sns.regplot(data=df,x=IOS_mother, y=LW_mother, x_jitter=0.15)
plt.ylim(0,7.5)
sns.despine()
In [24]:
pearsonr(x=IOS_father, y=LW_father)
Out[24]:
(0.5648753675455273, 0.0011452872087624104)
In [36]:
# Plot it
plt.figure(figsize=(12,8),dpi=600)
fig7 = sns.regplot(data=df,x = IOS_father, y = LW_father, x_jitter = 0.15, y_jitter = 0.15,color ='g')
plt.ylabel('Like winning father')
plt.ylim(0,7.5)
plt.xlim(0.8,7.5)

sns.despine()
In [32]:
# Plot mother father IOS results with some x axis jitter for clarity of visualization

plt.figure(figsize=(12,8),dpi=150)
fig3 = sns.regplot(data=df, x = IOS_stranger, y = LW_stranger, x_jitter = 0.15, y_jitter = 0.15)
plt.ylabel('Like Winning Stranger')
plt.ylim(0,7.5)
plt.xlim(0.8,7.5)

sns.despine()
In [52]:
pearsonr(x=LW_stranger,y=IOS_stranger)
Out[52]:
(0.38511837295019863, 0.03558986089011792)
In [11]:
# Plot it
plt.figure(figsize=(12,6))
fig8 = sns.regplot(data=df, x = LW_self, y = IOS_stranger, x_jitter=0.1)
sns.despine()
In [49]:
pearsonr(x=LW_self,y=IOS_stranger)
Out[49]:
(-0.48525717906010263, 0.0065661683519000185)
In [11]:
# Plot it
plt.figure(figsize=(12,6))
fig8 = sns.regplot(data=df, x = LW_mother, y = IOS_stranger, x_jitter=0.1)
sns.despine()
/Users/philipb/anaconda3/lib/python3.7/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
  return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
In [12]:
pearsonr(x=LW_mother,y=IOS_stranger)
Out[12]:
(-0.37330698909625054, 0.04216096486395725)
In [2]:
# Plot it
plt.figure(figsize=(12,6))
fig8 = sns.regplot(data=df, x = LW_mother, y = IOS_stranger, x_jitter=0.1)
sns.despine()
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-2-72167aa678c2> in <module>
      1 # Plot it
----> 2 plt.figure(figsize=(12,6))
      3 fig8 = sns.regplot(data=df, x = LW_mother, y = IOS_stranger, x_jitter=0.1)
      4 sns.despine()

NameError: name 'plt' is not defined
In [22]:
# Plot with hue for sex/gender

fig10 = sns.lmplot(data = df, x = "LW_self", y = "IOS_stranger", hue = "sex", height = 8, x_jitter = 0.15)
sns.despine()
In [ ]:
fig11 = sns.lmplot(data=df, x = '')
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 

ROI NAcc activation plots

In [12]:
fig1 = sns.catplot(data=tidy, x='Target', y='NAcc',hue='Condition',kind='bar',ci=95,height=12)
(fig1.set_axis_labels('Condition','NAcc parameter estimates')
.set_titles("TEST"))
fig1.set(ylim=(-10,10))
Out[12]:
<seaborn.axisgrid.FacetGrid at 0x1a2565a3c8>
In [6]:
# Same activation bar graph for LEFT & RIGHT NAcc ROI to validate our choice of averaging them into one result

fig1 = sns.catplot(data=tidy, x='Target', y='NAcc',hue='Condition',kind='bar',ci=95,height=12)
(fig1.set_axis_labels('Condition','NAcc parameter estimates')
.set_titles("TEST"))

Precuneus FatherWIN Cluster ROI results

In [24]:
fig2 = sns.catplot(data=df, y='Precuneus_FatherWin', kind='swarm',ci=95)
fig2 = sns.catplot(data=df, y='Precuneus_FatherNoWin', kind='swarm',ci=95,color='green')
fig2.set(ylim=(0,5))
Out[24]:
<seaborn.axisgrid.FacetGrid at 0x1a247600f0>

Correlation of precuneus cluster fatherwin results and LW/IOS for father

In [43]:
sns.lmplot(data=df, x='IOS_father',y='Precuneus_FatherWin',ci=95,height=10, x_jitter=0.2)
Out[43]:
<seaborn.axisgrid.FacetGrid at 0x1a24d7ad30>
In [37]:
sns.lmplot(data=df, x='LW_father',y='Precuneus_FatherWin',ci=95,height=10, x_jitter=0.2)
Out[37]:
<seaborn.axisgrid.FacetGrid at 0x1a25ecbac8>
In [21]:
pearsonr(x=LW_father,y=df.Precuneus_FatherWin)
Out[21]:
(0.021973984278079912, 0.9082421979632522)
In [41]:
sns.lmplot(data=df, x='IOS_father',y='difference_fatherwin_father_nowin',ci=95,height=8, x_jitter=0.2)
Out[41]:
<seaborn.axisgrid.FacetGrid at 0x1a25dd7a90>
In [20]:
pearsonr(x=df.IOS_father,y=df.difference_fatherwin_father_nowin)
Out[20]:
(-0.2686549259967798, 0.15114353192447955)
In [ ]:
 
In [42]:
sns.lmplot(data=df, x='LW_father',y='difference_fatherwin_father_nowin',ci=95,height=8, x_jitter=0.2)
Out[42]:
<seaborn.axisgrid.FacetGrid at 0x1a248a57b8>
In [22]:
pearsonr(x=df.LW_father,y=df.difference_fatherwin_father_nowin)
Out[22]:
(-0.15628355960878998, 0.4095345013966075)

Timeseries visualization

In [31]:
plt.figure(figsize=(15,10))
fig11 = sns.lineplot(data= df2, x = "time", y = "signal", hue = "condition", style = "condition", 
                     markers = True, units = "subject", estimator = None)
fig11.set(xlim=(None,6))
Out[31]:
[(0.55, 6)]
In [34]:
plt.figure(figsize=(15,10))
fig11 = sns.lineplot(data= df2, x = "time", y = "signal", hue = "condition", style = "condition", markers = True, ci = 95)
fig11.set(xlim=(None,6))
Out[34]:
[(0.55, 6)]

Visualization of neuro-behavioral relationships

In [20]:
# This is a quadratic regression line fit for the difference in NAcc scores and LW scores Mother - Stranger
# Plot it
plt.figure(figsize=(12,6),dpi=150)
fig12 = sns.regplot(data=df, x = 'Like Winning Mother_vs_Stranger', y = 'NAcc OtherWin Mother_vs_Stranger',order=2)
sns.despine()
In [27]:
# Let's run a residual plot for the data above to see if a linear regression is a best of fit model
sns.residplot(data=df, x = 'Like Winning Mother_vs_Stranger', y = 'NAcc OtherWin Mother_vs_Stranger',lowess=True)
Out[27]:
<matplotlib.axes._subplots.AxesSubplot at 0x1a1f9542b0>
In [ ]: