Machine Learning with UCI EEG Dataset (Python)
'''mlModels uses a knn classifier and decision
trees to explore the UCI EEG Dataset'''
#bc ┌( ಠ_ಠ)┘@thirdBrainPrograms
import argparse
import logging
import matplotlib
import matplotlib.pyplot as plt
import os.path
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
#configure the logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG) #set the logging level
logFile = logging.FileHandler('eeg2.log', 'w') #create the log file
logFile.setLevel(logging.DEBUG) #set the logging level for file
logger.addHandler(logFile) #add handler to log at DEBUG level
#logger prints out a lot of (somewhat useful) information whenever a plot
#is generated by matplotlib.
#read in the csv with Fourier Transform already applied
def cleanDataAverage():
'''cleanDataAverage cleans, aggregates, and averages the data from each
individual absolute power trial. it also drop the 0 rows where
there is time frequency data missing and adds bi conditional
tag for ml modeling'''
#toggle for testing on small sets
df = pd.read_csv('EEG_powers.csv')
logging.debug('Raw absolute CSV loaded.')
#drop the first column
df.drop(df.columns[df.columns.str.contains('unnamed',case = False)],axis = 1,
inplace = True)
#drop the zeros from the dataframe
for i in df.columns:
df = df[df[i] != 0]
logging.debug('Zeros dropped from the analysis.')
#add the alcoholic and control column at end
df['status'] = (df['subject'].str.slice(start=3, stop=4) == "a").astype(int)
logging.debug('Conditional aspects configured.')
#aggregate the data
avgDf = df.groupby(df.subject).mean()
logging.debug('Data aggregated and trial signals averaged.')
#export this csv and log
avgDf.to_csv('EEG_averagedPowers.csv', index=True)
logging.debug('Trial Average Ready for ML Analysis.')
def cleanDataMedian():
'''cleanDataMedian cleans, aggregates, and computes the median from each
individual absolute power trial. it also drop the 0 rows where
there is time frequency data missing and adds bi conditional
tag for ml modeling'''
#toggle for testing on small sets
df = pd.read_csv('EEG_powers.csv')
logging.debug('Raw absolute CSV loaded.')
#drop the first column
df.drop(df.columns[df.columns.str.contains('unnamed',case = False)],axis = 1,
inplace = True)
#drop the zeros from the dataframe
for i in df.columns:
df = df[df[i] != 0]
logging.debug('Zeros dropped from the analysis.')
#add the alcoholic and control column at end
df['status'] = (df['subject'].str.slice(start=3, stop=4) == "a").astype(int)
logging.debug('Conditional aspects configured.')
#aggregate the data
avgDf = df.groupby(df.subject).median()
logging.debug('Data aggregated and trial signals median.')
#export this csv and log
avgDf.to_csv('EEG_medianPowers.csv', index=True)
logging.debug('Median Ready for ML Analysis.')
def knnEEG_1Mean():
'''knnEEG runs a k-nearest neighbor analysis on averaged EEG
data to differentiate between alcoholics and controls'''
#read in the data file with averaged powers
df = pd.read_csv('EEG_averagedPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
#fit the knn classifier --> 1 neighbors
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
#call prediction on the held out data set
y_pred = knn.predict(X_test)
print('Test set score for KNN 1-Neighbor: {:.2f}'.format(np.mean(y_pred == y_test)))
def knnEEG_3Mean():
'''knnEEG runs a k-nearest neighbor analysis on averaged EEG
data to differentiate between alcoholics and controls'''
#read in the data file with averaged powers
df = pd.read_csv('EEG_averagedPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
#fit the knn classifier --> 1 neighbors
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
#call prediction on the held out data set
y_pred = knn.predict(X_test)
print('Test set score for KNN 3-Neighbors: {:.2f}'.format(np.mean(y_pred == y_test)))
def knnEEG_10Mean():
'''knnEEG runs a k-nearest neighbor analysis on averaged EEG
data to differentiate between alcoholics and controls'''
#read in the data file with averaged powers
df = pd.read_csv('EEG_averagedPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
#fit the knn classifier --> 1 neighbors
knn = KNeighborsClassifier(n_neighbors=10)
knn.fit(X_train, y_train)
#call prediction on the held out data set
y_pred = knn.predict(X_test)
print('Test set score for KNN 10-Neighbors: {:.2f}'.format(np.mean(y_pred == y_test)))
def knn_eegPlotMean():
'''knn_eegPlot does a test/train analysis on the multi
dimensional EEG dataset'''
#read in the data file with averaged powers
df = pd.read_csv('EEG_averagedPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
training_accuracy = []
test_accuracy = []
# set number of neighbors from 1 to 15
neighbors_settings = range(1, 16)
for n_neighbors in neighbors_settings:
# build and fit the model
clf = KNeighborsClassifier(n_neighbors=n_neighbors)
clf.fit(X_train, y_train)
# get training set accuracy
training_accuracy.append(clf.score(X_train, y_train))
# get test accuracy
test_accuracy.append(clf.score(X_test, y_test))
plt.plot(neighbors_settings, training_accuracy, label="Train Accuracy")
plt.plot(neighbors_settings, test_accuracy, label="Test Accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
def eeg_treeMean():
'''eeg_tree builds a decision tree plotting feature importances
for alcohol vs. control EEG participants'''
df = pd.read_csv('EEG_averagedPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
tree = DecisionTreeClassifier(max_depth=4, random_state=0)
tree.fit(X_train, y_train)
n_features = 60
plt.barh(range(n_features), tree.feature_importances_, align='center')
plt.yticks(np.arange(n_features), df.columns[1:-1])
plt.xlabel('Feature Importance')
plt.ylabel('Feature')
plt.ylim(-1, n_features)
plt.yticks(fontsize=10)
plt.show()
def knnEEG_1Median():
'''knnEEG runs a k-nearest neighbor analysis on median EEG
data to differentiate between alcoholics and controls'''
#read in the data file with averaged powers
df = pd.read_csv('EEG_medianPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
#fit the knn classifier --> 1 neighbors
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
#call prediction on the held out data set
y_pred = knn.predict(X_test)
print('Test set score for KNN 1-Neighbor: {:.2f}'.format(np.mean(y_pred == y_test)))
def knnEEG_3Median():
'''knnEEG runs a k-nearest neighbor analysis on median EEG
data to differentiate between alcoholics and controls'''
#read in the data file with averaged powers
df = pd.read_csv('EEG_medianPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
#fit the knn classifier --> 1 neighbors
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
#call prediction on the held out data set
y_pred = knn.predict(X_test)
print('Test set score for KNN 3-Neighbors: {:.2f}'.format(np.mean(y_pred == y_test)))
def knnEEG_10Median():
'''knnEEG runs a k-nearest neighbor analysis on median EEG
data to differentiate between alcoholics and controls'''
#read in the data file with averaged powers
df = pd.read_csv('EEG_medianPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
#fit the knn classifier --> 1 neighbors
knn = KNeighborsClassifier(n_neighbors=10)
knn.fit(X_train, y_train)
#call prediction on the held out data set
y_pred = knn.predict(X_test)
print('Test set score for KNN 10-Neighbors: {:.2f}'.format(np.mean(y_pred == y_test)))
def knn_eegPlotMedian():
'''knn_eegPlot does a test/train analysis on the multi
dimensional EEG dataset'''
#read in the data file with averaged powers
df = pd.read_csv('EEG_medianPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
training_accuracy = []
test_accuracy = []
# set number of neighbors from 1 to 15
neighbors_settings = range(1, 16)
for n_neighbors in neighbors_settings:
# build and fit the model
clf = KNeighborsClassifier(n_neighbors=n_neighbors)
clf.fit(X_train, y_train)
# get training set accuracy
training_accuracy.append(clf.score(X_train, y_train))
# get test accuracy
test_accuracy.append(clf.score(X_test, y_test))
plt.plot(neighbors_settings, training_accuracy, label="Train Accuracy")
plt.plot(neighbors_settings, test_accuracy, label="Test Accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
def eeg_treeMedian():
'''eeg_tree builds a decision tree plotting feature importances
for alcohol vs. control EEG participants'''
df = pd.read_csv('EEG_medianPowers.csv')
#y = alcohol/no alcohol
y = df.status
#x = dataframe withoug the subject number or status identifier
X = df.drop(['subject', 'status'], axis=1)
# split into training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=42, stratify=df.status)
tree = DecisionTreeClassifier(max_depth=4, random_state=0)
tree.fit(X_train, y_train)
n_features = 60
plt.barh(range(n_features), tree.feature_importances_, align='center')
plt.yticks(np.arange(n_features), df.columns[1:-1])
plt.xlabel('Feature Importance')
plt.ylabel('Feature')
plt.ylim(-1, n_features)
plt.yticks(fontsize=10)
plt.show()
def stratifyBarPlot():
'''stratifyBarPlot show the numbers of alcoholic vs. control participants
in the actual study (before the model is trained)'''
#read in averaged dataframe
df = pd.read_csv('EEG_averagedPowers.csv')
#get status from last column in dataframes
Alcohol = len(df.status.loc[df.status ==1])
Control = len(df.status.loc[df.status == 0])
conditions = ['Alcohol', 'Control']
limits = [Alcohol, Control]
plt.bar(conditions, limits)
plt.ylabel('Number of Participants')
plt.title('Number of Alcoholic vs. Control Participants')
plt.show()
def StratifyPiePlot():
'''stratifyBarPlot show the ratio of alcoholic vs. control participants
in the actual study (before the model is trained)'''
#read in averaged dataframe
df = pd.read_csv('EEG_averagedPowers.csv')
#get status from last column in dataframes
Alcohol = len(df.status.loc[df.status ==1])
Control = len(df.status.loc[df.status == 0])
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'Alcoholic', 'Control'
sizes = [Alcohol, Control]
explode = (0, 0.1) # only "explode" the 1st slice (i.e. 'Alcoholic')
plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
def main():
'''main parses user arguments to
determine how to present the data'''
#initialize to parse command line arguments
parse = argparse.ArgumentParser()
#add the optional arguments to present to the user
#nearest neighbor scores only take effect when 'print' is called / plots need 'graph'
parse.add_argument('command', metavar='<command>',
choices=['print', 'graph'], help='execute command')
parse.add_argument('--knn','-k', choices=['knn1Mean','knn3Mean',
'knn10Mean', 'knn1Median', 'knn3Median', 'knn10Median'],
help = 'Select number of neighbors')
parse.add_argument('--plot', '-p',
choices=['knnMean','treeMean','knnMedian','treeMedian', 'stratifyBar',
'stratifyPie'], help = 'Select plot (knn) or (tree)')
#check to see if cleaned average file already exists
if not os.path.exists('EEG_averagedPowers.csv'):
#if not create new averaged file
cleanDataAverage()
#check to see if cleaned median file already exists
if not os.path.exists('EEG_medianPowers.csv'):
#if not create new median file
cleanDataMedian()
#take in the arguments
args = parse.parse_args()
if args.command == 'print':
#knn1 --> mean
if args.knn == 'knn1Mean':
knnEEG_1Mean()
#knn3 --> mean
if args.knn == 'knn3Mean':
knnEEG_3Mean()
#10 --> mean
if args.knn == 'knn10Mean':
knnEEG_10Mean()
#knn1 --> median
if args.knn == 'knn1Median':
knnEEG_1Median()
#knn3 --> median
if args.knn == 'knn3Median':
knnEEG_3Median()
#knn10 --> median
if args.knn == 'knn10Median':
knnEEG_10Mean()
if args.command == 'graph':
#knn plot for neighbors 1-15 (mean)
if args.plot == 'knnMean':
knn_eegPlotMean()
#tree plot for feature importances (mean)
if args.plot == 'treeMean':
eeg_treeMean()
#knn plot for neighbors 1-15 (median)
if args.plot == 'knnMedian':
knn_eegPlotMedian()
#tree plot for feature importances (median)
if args.plot == 'treeMedian':
eeg_treeMedian()
if args.plot =='stratifyBar':
stratifyBarPlot()
if args.plot =='stratifyPie':
StratifyPiePlot()
if __name__ == "__main__":
main()
Load & Preprocess UCI EEG Dataset (Python)
'''load_EEG takes the eeg_full dataset downloaded from
https://archive.ics.uci.edu/ml/machine-learning-databases/eeg-mld/
and loads the data, performs preprocessing, and prepares data
for use with the ml models in mlModels.py'''
#bc ┌( ಠ_ಠ)┘@thirdBrainPrograms
import logging
import mne
import numpy as np
import os
import pandas as pd
import tarfile
from mne.time_frequency import psd_welch
from scipy.integrate import simps
#configure the logger
#note: DEBUG level written to log file / INFO level printed to terminal (and file)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG) #set the logging level
logFile = logging.FileHandler('eeg.log', 'w') #create the log file
logFile.setLevel(logging.DEBUG) #set the logging level for file
logger.addHandler(logFile) #add handler to log at DEBUG level
#stream out --> this will print to the terminal
stream = logging.StreamHandler()
stream.setLevel(logging.INFO) #set the logging level
logger.addHandler(stream) #add handler to log at INFO level
def importTarTransform():
'''import Transform imports the tar files within an EEG
dataset and uses a Fourier transform to get the absolute power
within frequency bands. it then spits out a raw csv file as
an intermediary step'''
#pick out the channels of interest --> modify if needed (12 channels)
channels = ['FP1', 'FP2', 'F3', 'F4', 'F7', 'F8', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2']
#frequency band cutoff values --> use 0.1HP - 40LP (4 conditions)
band_dict = {"delta": [0.1, 4], "theta": [4, 7], "alpha": [7, 12],
"beta": [12, 30], "gamma": [30, 40]}
#set sampling rate for the frequencies
sfreq = 256
#initialize the empty subject list
subjects = []
#subject is position 0 in the index
index = ["subject"]
#iterate over the channels
for channel in channels:
#iterate over the frequency bands
for band in band_dict.keys():
#create additional index headers
index.append(channel + " " + band)
#create an absolute power dataframe
df_abs_power = pd.DataFrame(index=[], columns=index)
#now that the architecture is created, check
#to see if data folder exists --> if not make one
#we will run from desktop so this will search
#desktop
if not os.path.exists('eeg_folder'):
os.makedirs('eeg_folder')
logging.debug('eeg_folder created')
#fill in the dataframe
subjects = []
k = 0
#use logging in this part
logging.debug("Loaded:")
#root prefix for subject files in dataset
for root in ['co2a0000', 'co2c0000', 'co2c1000', 'co3a0000', 'co3c0000']:
#range of subject ID numbers in the dataset
for i in range(337, 463):
#test loading two files
#for i in range(337, 339): #short form for testing
#put them together to make the identifier file
subject = root + str(i)
try:
#two slashes to avoid syntax error on Windows
#CHANGE TO ENTER YOUR COMPUTERS PATH
filename = "C:\\Users\\Owner\\Desktop\\eeg_full\\" + subject + ".tar.gz"
#drill into tar file and extract data
tar = tarfile.open(subject + ".tar.gz", "r")
#stash tar contents in the new folder
tar.extractall(path='eeg_folder')
tar.close()
except:
#some subject numbers won't be present. skip over missing
#numbers
pass
else:
#122 subjects total
for j in range(121):
#for j in range(1): #short form for testing
try:
#get the data from the newly upzipped folder
data = np.genfromtxt("eeg_folder\\" + subject + "\\" + subject +
".rd.{:03d}.gz".format(j), dtype=None, encoding=None)
#create the raw dataframe
df_raw = pd.DataFrame(data)
#store channel and frequency info as mne object
info = mne.create_info(channels, sfreq)
samples = []
for channel in channels:
#store the sampled data
samples.append(list(df_raw.loc[df_raw['f1'] == channel]['f3']))
#create mne io array
raw = mne.io.RawArray(samples, info, verbose=False)
#create new dataframe
df_a = pd.DataFrame(index=[k], columns=index)
#iterate through the subjects
k += 1
df_a['subject'] = subject
for channel in channels:
#set parameters for high pass/low pass/
#samp rate for the Fourier transform
spectra, freqs = psd_welch(raw, fmin=0.1, fmax=40,
n_fft=256, picks=channel, verbose=False)
#grab the appropriate distributions and organize
for band, lims in band_dict.items():
low, high = lims
idx_band = np.logical_and(freqs > low, freqs <= high)
freq_res = freqs[1] - freqs[0]
abs_power = simps(spectra[0][idx_band], dx=freq_res)
df_a[channel + " " + band] = abs_power
#append row to datframe (append is deprecated)
#df_abs_power = df_abs_power.append(df_a)
df_abs_power = pd.concat([df_abs_power, df_a])
except:
#skip if we miss a number / empty files etc.
pass
subjects.append(subject)
#use logging in this part
#print the loaded subjects to the command window
#and to the log file
logging.debug(subject)
#write the dataframe with the absolute powers
#to csv for further manipulation - intermediary step
df_abs_power.to_csv('EEG_powers.csv', index=True)
logging.info('EEG_powers.csv successfully created from loaded data.')
logging.info('Fourier Transform applied to loaded data.')
def main():
#run the data import
importTarTransform()
if __name__ == "__main__":
main()
Kandinsky and Compartmentalization

Delicate Tension, Wassily Kandinsky, Thyssen-Bornemisza Museum, Wikimedia Commons, Date Accessed: November 1, 2021.
“Each color lives by its mysterious life.” – Wassily Kandinsky
Wassily Kandinsky’s Delicate Tension is a great example of the ability of color and shape to tell their own story. Kandinsky felt that circles and triangles were the two most primary and conflicting shapes, which creates a sense of tension in this watercolor painting. Larger shapes are subdivided by intersecting lines which create a latticework of color. Primary colors dominate these subdivisions giving each compartment a life of its own. These compartments lend a sense of harmony to what would otherwise be a conflicting and dissonant image. Each independent color is surrounded by barriers that keep it from blending into something our mind would struggle to disentangle. Every color unimpeded by the lack of agreement surrounding it. Sometimes our mind can function like the colors in Kandinsky’s painting, separating thoughts and ideas into boxes which don’t agree with the prevailing narrative. Those thoughts may even have lives of their own with rationales that don’t fit with our sense of self. These micro conflicts can occasionally create a sense of tension within. Yet when we take a step back, everything seems perfectly ordered and those compartmentalized thoughts appear structured by intention. Perhaps that is how Kandinsky felt while composing this work. There is a balance between harmony and dissonance, and that balance rests on a series of thin lines. Conflicting narratives of self can be subdivided to expose commonalities and discordances. All the while, differences can be comfortably resolved through the process of compartmentalization.
EEG Hardware Predictive Maintenance (Python)
'''assessNet is a program designed to perform preventative maintenance
on nets used in EEG recordings.
must be run in the directory containing:
exported RedCap session data (csv)
net noise information (xlsx)
warranty information (xlsx)
for orgranization all this information should always be kept in the same directory'''
#bc ┌( ಠ_ಠ)┘@thirdBrainPrograms
import sys
import matplotlib.pyplot as plt
import pandas as pd
#import data at beginning to speed processing
#add your file to be analyzed --> add .csv --> exported from RedCap
#analyzed in four week intervals
sessData = pd.read_csv(r'COBRAHardwareTrackin_DATA_2021-10-21_1606.csv')
#read in the netnoise data --> review in four week intervals
noiseData = pd.read_excel('COBRA_nets.xlsx', sheet_name='WeeklyNetNoise10.18_11.15')
#warranty info
data = pd.read_excel('COBRA_nets.xlsx', sheet_name='WarrantyInfo', nrows=12)
data = data.loc[:, ~data.columns.str.contains('^Unnamed')]
i=data[data['Expired']== 0]
#implement password protection
password = input("Enter a password to unlock the analysis: ")
if password != 'your password here':
sys.exit()
else:
print('\n')
#third brain programs logo
print(' ┌( ಠ_ಠ)┘')
print('\n')
print('WELCOME TO ASSESS-NET!!')
print('\n')
while True:
#session analysis pipeline
#checks for general hardware performance over time for a single net
#pick your cap --> assessNet imports all the session data for a particular EEG net
#for the past four weeks
x = sessData.loc[sessData['cap'] == input('Please enter the three letter cap identifier: ')]
z = x['cap'].to_string(index=False)
print("\n")
print("SESSION")
print("\n")
#checks for first initial net noise stats
print("The session net noise stats for the past four weeks for this cap are:")
print(x['netnoisnum'].to_string(index=False))
print("\n")
#these next three metrics are meant to check net degradation over time
#during the course of the experiment.
#checks the first impendance
print("The first timepoint impedance stats for the past four weeks for this cap are:")
print(x['firstimpbad'].to_string(index=False))
print("\n")
#checks the second impendance
print("The second timepoint impedance stats for the past four weeks for this cap are:")
print(x['secondimpbad'].to_string(index=False))
print("\n")
#checks the third impendance
print("The third timepoint impedance stats for the past four weeks for this cap are:")
print(x['thirdimpbad'].to_string(index=False))
print("\n")
#any bad channels noted in net log?
print("The bad channels for the past four weeks for this cap are:")
print(x['chan'].to_string(index=False))
print("\n")
#results of cumulative weekly testing --> baseline net noise tests
#for the past four weeks
y = noiseData.loc[noiseData['Net'] == z]
print("\n")
print("NET NOISE")
print("\n")
print("The cumulative questionable channels for the past four weeks for this net are: ")
print("\n")
print(y['Cumulative Channels'].to_string(index=False))
print("\n")
print("The cumulative bad channels for the past four weeks for this net are: ")
print("\n")
print(y['Bad Channels'].to_string(index=False))
print("\n")
#todo --> put in the ratio of cumulative channels / 128
#and bad channels / 128 as two pie charts that will
#pop up when the user executes the code for a given net
#get the data
questionableCh = y['Cumulative Count'].to_string(index=False)
badCh = y['Bad Count'].to_string(index=False)
#change to int for chart
questInt = int(questionableCh)
badInt = int(badCh)
goodChannels = 128 - (questInt + badInt)
#create the piechart
pieLabels = ['Number of Questionable Channels : ' + str(questInt), 'Number of Bad Channels: ' + str(badInt), 'Number of Good Channels: ' + str(goodChannels)]
pieData = [questInt, badInt, goodChannels]
plt.pie(pieData, labels = pieLabels)
plt.legend(loc="upper left")
plt.show()
#ask user if they want to get more net stats
anotherNet = input('Would you like to check another net (Yes or No): ')
if anotherNet.lower() == ('no'):
break
else:
continue
#warranty reminder at the end --> reminds the user of nets still
#within the warranty timeframe
print('\n')
print('As a reminder, these nets are still under warranty: ')
print('\n')
print(i.to_string(index=False))