{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# PSEUDo vs. DTW: EEG Data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In this experiment we will compare the LSH algorithm of PSEUDo to DTW using an EEG dataset. The metrics we will be comparing these two algorithms with are **computing time**, **recall** and **precision**." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We first load the EEG data and convert it to a numpy array" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "from time import time\n", "\n", "datafile = 'data/21.csv'\n", "\n", "N = 100\n", "T = 100\n", "M = 100000\n", "\n", "data = np.random.uniform(size=(M, T, N))\n", "\n", "#and convert it to numpy array:\n", "data = np.array(data, dtype = \"float32\")" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "from sklearn import preprocessing\n", "\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We sample a number of subwindows which will be used as query for the search algorithms" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[18816, 57332, 185890, 63757, 164364, 111536, 111858, 37823, 45128, 143695]\n" ] } ], "source": [ "import random\n", "from time import time\n", "\n", "targets = random.sample(list(range(len(data))), 10)\n", "print(targets)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## SAX" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# from tslearn.piecewise import SymbolicAggregateApproximation\n", "\n", "# t0 = time()\n", "# sax = SymbolicAggregateApproximation(n_segments=T, alphabet_size_avg=10)\n", "# sax_data = sax.fit_transform(data)\n", "# print('Done! Took {:.2f} seconds ({:.1f} minutes).'.format(time() - t0, (time() - t0) / 60))\n", "# sax_preprocess_time = time() - t0" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# t0 = time()\n", "# all_sax_candidates = []\n", "# for i, target in enumerate(targets):\n", "# t1 = time()\n", "# query = sax_data[target]\n", "# sax_distances = [np.linalg.norm(query - window) for window in sax_data]\n", "# print('Target #{} done! Took {:.2f} seconds ({:.1f} minutes).'.format(i, time() - t1, (time() - t1) / 60))\n", "# sax_candidates = sorted(range(len(sax_distances)), key=lambda k: sax_distances[k])\n", "# all_sax_candidates.append(sax_candidates)\n", "# sax_time = time() - t0" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## PSEUDo" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For the LSH algorithm some preprocessing is done to find the right LSH parameters." ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Preprocessing:\n", "r = 50\n", "r = 25.0\n", "r = 37.5\n", "r = 18.75\n", "r = 28.125\n", "r = 42.1875\n", "r = 21.09375\n", "r = 31.640625\n", "r = 15.8203125\n", "r = 23.73046875\n", "r = 35.595703125\n", "r = 17.7978515625\n", "r = 26.69677734375\n", "r = 40.045166015625\n", "r = 20.0225830078125\n", "r = 30.03387451171875\n", "r = 15.016937255859375\n", "r = 22.525405883789062\n", "r = 33.788108825683594\n", "r = 16.894054412841797\n", "r = 25.341081619262695\n", "r = 38.01162242889404\n", "r = 19.00581121444702\n", "r = 28.508716821670532\n", "r = 42.7630752325058\n", "r = 21.3815376162529\n", "r = 32.07230642437935\n", "r = 16.036153212189674\n", "r = 24.05422981828451\n", "r = 36.08134472742677\n", "r = 18.040672363713384\n", "r = 27.061008545570076\n", "r = 40.59151281835511\n", "r = 20.295756409177557\n", "r = 30.443634613766335\n", "r = 15.221817306883167\n", "r = 22.83272596032475\n", "r = 34.24908894048713\n", "r = 17.124544470243563\n", "r = 25.686816705365345\n", "r = 38.53022505804802\n", "r = 19.26511252902401\n", "r = 28.897668793536013\n", "Mean: 28.90310172004373\n", "Stdev: 0.16262486215758712\n", "Ratio mean: 0.9852851438976057\n", "Ratio stdev: 0.006220007778511879\n", "Theta: 28.483529575677153\n", "r: 2.6557769397251842\n", "Preprocessing time: 34.78849124908447\n", "Preprocessing done. Took 34.79 seconds (0.6 minutes).\n" ] } ], "source": [ "import sys\n", "\n", "sys.path.insert(0, '../Flaskserver')\n", "import importlib\n", "from pseudo import preprocess\n", "import _lsh\n", "\n", "topk_dtw = []\n", "\n", "print('Preprocessing:')\n", "t0 = time()\n", "r,a,sd = preprocess(data, data.shape[2])\n", "print('Preprocessing done. Took {:.2f} seconds ({:.1f} minutes).'.format(time() - t0, (time() - t0) / 60))\n", "pseudo_preprocess_time = time() - t0" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now we run the LSH algorithm for all targets and calculate the most similar subwindows" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from collections import defaultdict\n", "t0 = time()\n", "total_lsh_times = []\n", "all_lsh_candidates = []\n", "for i, target in enumerate(targets):\n", " t1 = time()\n", " query = data[target]\n", " print('doing lsh')\n", " lsh_candidates, lsh_distances, _ = _lsh.lsh(data, query, r, a, sd, 0)\n", "# topk_dtw.append(candidates)\n", " dict = defaultdict(int)\n", " for l in range(len(lsh_candidates)):\n", " for k in range(len(lsh_candidates[0])):\n", " for a in range(len(lsh_candidates[0][0])):\n", " dict[lsh_candidates[l][k][a]] += lsh_distances[l][k][a]\n", " sorted_dict = {k: v for k, v in sorted(dict.items(), key=lambda item: item[1])}\n", " candidates = list(sorted_dict.keys())\n", " total_lsh_times.append(time()-t1)\n", " print('Target #{} done! Took {:.2f} seconds ({:.1f} minutes).'.format(i, time() - t1, (time() - t1) / 60))\n", " all_lsh_candidates.append(candidates)\n", " \n", "# print(candidates[0:10])\n", "print('Done! Took {:.2f} seconds ({:.1f} minutes).'.format(time() - t0, (time() - t0) / 60))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "doing lsh\n", "Target #0 done! Took 12.94 seconds (0.2 minutes).\n", "doing lsh\n", "Target #1 done! Took 13.33 seconds (0.2 minutes).\n", "doing lsh\n", "Target #2 done! Took 13.10 seconds (0.2 minutes).\n", "doing lsh\n", "Target #3 done! Took 13.03 seconds (0.2 minutes).\n", "doing lsh\n", "Target #4 done! Took 13.21 seconds (0.2 minutes).\n", "doing lsh\n" ] } ], "source": [ "from collections import defaultdict\n", "t0 = time()\n", "total_lsh_times_ed = []\n", "all_lsh_candidates_ed = []\n", "for i, target in enumerate(targets):\n", " t1 = time()\n", " query = data[target]\n", " print('doing lsh')\n", " lsh_candidates, lsh_distances, _ = _lsh.lsh(data, query, r, a, sd, 1)\n", "# topk_dtw.append(candidates)\n", " dict = defaultdict(int)\n", " for l in range(len(lsh_candidates)):\n", " for k in range(len(lsh_candidates[0])):\n", " for a in range(len(lsh_candidates[0][0])):\n", " dict[lsh_candidates[l][k][a]] += lsh_distances[l][k][a]\n", " sorted_dict = {k: v for k, v in sorted(dict.items(), key=lambda item: item[1])}\n", " candidates = list(sorted_dict.keys())\n", " total_lsh_times_ed.append(time()-t1)\n", " print('Target #{} done! Took {:.2f} seconds ({:.1f} minutes).'.format(i, time() - t1, (time() - t1) / 60))\n", " all_lsh_candidates_ed.append(candidates)\n", " \n", "# print(candidates[0:10])\n", "print('Done! Took {:.2f} seconds ({:.1f} minutes).'.format(time() - t0, (time() - t0) / 60))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## DTW" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We do the same for DTW" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from scipy.spatial.distance import cdist\n", "from tslearn.metrics import dtw_path_from_metric\n", "from tslearn.metrics import dtw\n", "from time import time\n", "\n", "t0 = time()\n", "total_dtw_times = []\n", "all_dtw_candidates = []\n", "for i, target in enumerate(targets):\n", " t1 = time()\n", " query = data[target]\n", " dtw_distances = [dtw(window, query, global_constraint='sakoe_chiba', sakoe_chiba_radius=int(0.05 * T)) for window in data]\n", " dtw_candidates = sorted(range(len(dtw_distances)), key=lambda k: dtw_distances[k])\n", " print('Target #{} done! Took {:.2f} seconds ({:.1f} minutes).'.format(i, time() - t1, (time() - t1) / 60))\n", " total_dtw_times.append(time()-t1)\n", " all_dtw_candidates.append(dtw_candidates)\n", "print('Done! Took {:.2f} seconds ({:.1f} minutes).'.format(time() - t0, (time() - t0) / 60))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## ED" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "t0 = time()\n", "all_ed_candidates = []\n", "total_ed_times = []\n", "for i, target in enumerate(targets):\n", " t1 = time()\n", " query = data[target]\n", " ed_distances = [np.linalg.norm(query-window) for window in data]\n", " print('Target #{} done! Took {:.2f} seconds ({:.1f} minutes).'.format(i, time() - t1, (time() - t1) / 60))\n", " ed_candidates = sorted(range(len(ed_distances)), key=lambda k: ed_distances[k])\n", " total_ed_times.append(time()-t1)\n", " all_ed_candidates.append(ed_candidates)\n", "print('Done! Took {:.2f} seconds ({:.1f} minutes).'.format(time() - t0, (time() - t0) / 60))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Accuracy Comparison" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We compare the LSH candidates to the DTW candidates and test on recall, precision and number of pruned candidates" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "k = 100\n", "total_recall_pseudo = []\n", "total_precision_pseudo = []\n", "total_precision2_pseudo = []\n", "total_pruned_pseudo = []\n", "for i in range(len(targets)):\n", " top_10_percent = int(len(all_lsh_candidates[i]) * 0.1)\n", " pruned = int(100*(1-len(all_lsh_candidates[i])/len(all_dtw_candidates[i])))\n", "# print(\"Pruned: \" + str(pruned) + \"%\")\n", " recall = 0\n", " for index in all_dtw_candidates[i][0:k]:\n", " if index in all_lsh_candidates[i]:\n", " recall += 1\n", "# print(\"Recall: \" + str(100*recall/k) + \"%\")\n", "\n", " precision = 0\n", " for index in all_dtw_candidates[i][0:k]:\n", " if index in all_lsh_candidates[i][0:k]:\n", " precision += 1\n", "# print(\"Precision: \" + str(100*precision/k) + \"%\")\n", " \n", " precision2 = 0\n", " for index in all_lsh_candidates[i][0:k]:\n", " if index in all_dtw_candidates[i][0:top_10_percent]:\n", " precision2 += 1\n", "# print(\"Precision 10th percentile: \" + str(100*precision2/k) + \"%\")\n", " total_pruned_pseudo.append(pruned)\n", " total_recall_pseudo.append(recall/k)\n", " total_precision_pseudo.append(precision/k)\n", " total_precision2_pseudo.append(precision2/k)\n", " \n", "print(\"=================================================\")\n", "print(\"Total pruned: \" + str(np.mean(total_pruned_pseudo)) + \"%\")\n", "print(\"Total recall: \" + str(100 * np.mean(total_recall_pseudo)) + \"%\")\n", "print(\"Total precision: \" + str(100 * np.mean(total_precision_pseudo)) + \"%\")\n", "print(\"Total precision 2: \" + str(100 *np.mean(total_precision2_pseudo)) + \"%\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "total_recall_pseudo_ed = []\n", "total_precision_pseudo_ed = []\n", "total_precision2_pseudo_ed = []\n", "total_pruned_pseudo_ed = []\n", "for i in range(len(targets)):\n", " top_10_percent = int(len(all_lsh_candidates_ed[i]) * 0.1)\n", " pruned = int(100*(1-len(all_lsh_candidates_ed[i])/len(all_dtw_candidates[i])))\n", "# print(\"Pruned: \" + str(pruned) + \"%\")\n", " recall = 0\n", " for index in all_dtw_candidates[i][0:k]:\n", " if index in all_lsh_candidates_ed[i]:\n", " recall += 1\n", "# print(\"Recall: \" + str(100*recall/k) + \"%\")\n", "\n", " precision = 0\n", " for index in all_dtw_candidates[i][0:k]:\n", " if index in all_lsh_candidates_ed[i][0:k]:\n", " precision += 1\n", "# print(\"Precision: \" + str(100*precision/k) + \"%\")\n", " \n", " precision2 = 0\n", " for index in all_lsh_candidates_ed[i][0:k]:\n", " if index in all_dtw_candidates[i][0:top_10_percent]:\n", " precision2 += 1\n", "# print(\"Precision 10th percentile: \" + str(100*precision2/k) + \"%\")\n", " total_pruned_pseudo_ed.append(pruned)\n", " total_recall_pseudo_ed.append(recall/k)\n", " total_precision_pseudo_ed.append(precision/k)\n", " total_precision2_pseudo_ed.append(precision2/k)\n", " \n", "print(\"=================================================\")\n", "print(\"Total pruned: \" + str(np.mean(total_pruned_pseudo_ed)) + \"%\")\n", "print(\"Total recall: \" + str(100 * np.mean(total_recall_pseudo_ed)) + \"%\")\n", "print(\"Total precision: \" + str(100 * np.mean(total_precision_pseudo_ed)) + \"%\")\n", "print(\"Total precision 2: \" + str(100 *np.mean(total_precision2_pseudo_ed)) + \"%\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "total_recall_ed = []\n", "total_precision_ed = []\n", "total_precision2_ed = []\n", "total_pruned_ed = []\n", "for i in range(len(targets)):\n", " top_10_percent = int(len(all_ed_candidates[i]) * 0.1)\n", " pruned = int(100*(1-len(all_ed_candidates[i])/len(all_dtw_candidates[i])))\n", "# print(\"Pruned: \" + str(pruned) + \"%\")\n", " recall = 0\n", " for index in all_dtw_candidates[i][0:k]:\n", " if index in all_ed_candidates[i]:\n", " recall += 1\n", "# print(\"Recall: \" + str(100*recall/k) + \"%\")\n", "\n", " precision = 0\n", " for index in all_dtw_candidates[i][0:k]:\n", " if index in all_ed_candidates[i][0:k]:\n", " precision += 1\n", "# print(\"Precision: \" + str(100*precision/k) + \"%\")\n", " \n", " precision2 = 0\n", " for index in all_ed_candidates[i][0:k]:\n", " if index in all_dtw_candidates[i][0:top_10_percent]:\n", " precision2 += 1\n", "# print(\"Precision 10th percentile: \" + str(100*precision2/k) + \"%\")\n", " total_pruned_ed.append(pruned)\n", " total_recall_ed.append(recall/k)\n", " total_precision_ed.append(precision/k)\n", " total_precision2_ed.append(precision2/k)\n", " \n", "print(\"=================================================\")\n", "print(\"Total pruned: \" + str(np.mean(total_pruned_ed)) + \"%\")\n", "print(\"Total recall: \" + str(100 * np.mean(total_recall_ed)) + \"%\")\n", "print(\"Total precision: \" + str(100 * np.mean(total_precision_ed)) + \"%\")\n", "print(\"Total precision 2: \" + str(100 *np.mean(total_precision2_ed)) + \"%\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# total_recall_sax = []\n", "# total_precision_sax = []\n", "# total_precision2_sax = []\n", "# total_pruned_sax = []\n", "# for i in range(len(targets)):\n", "# top_10_percent = int(len(all_sax_candidates[i]) * 0.1)\n", "# pruned = int(100*(1-len(all_sax_candidates[i])/len(all_dtw_candidates[i])))\n", "# # print(\"Pruned: \" + str(pruned) + \"%\")\n", "# recall = 0\n", "# for index in all_dtw_candidates[i][0:k]:\n", "# if index in all_sax_candidates[i]:\n", "# recall += 1\n", "# # print(\"Recall: \" + str(100*recall/k) + \"%\")\n", "\n", "# precision = 0\n", "# for index in all_dtw_candidates[i][0:k]:\n", "# if index in all_sax_candidates[i][0:k]:\n", "# precision += 1\n", "# # print(\"Precision: \" + str(100*precision/k) + \"%\")\n", " \n", "# precision2 = 0\n", "# for index in all_sax_candidates[i][0:k]:\n", "# if index in all_dtw_candidates[i][0:top_10_percent]:\n", "# precision2 += 1\n", "# # print(\"Precision 10th percentile: \" + str(100*precision2/k) + \"%\")\n", "# total_pruned_sax.append(pruned)\n", "# total_recall_sax.append(recall/k)\n", "# total_precision_sax.append(precision/k)\n", "# total_precision2_sax.append(precision2/k)\n", " \n", "# print(\"=================================================\")\n", "# print(\"Total pruned: \" + str(np.mean(total_pruned_sax)) + \"%\")\n", "# print(\"Total recall: \" + str(100 * np.mean(total_recall_sax)) + \"%\")\n", "# print(\"Total precision: \" + str(100 * np.mean(total_precision_sax)) + \"%\")\n", "# print(\"Total precision 2: \" + str(100 *np.mean(total_precision2_sax)) + \"%\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", "labels = ['Recall', 'Precision-50', 'Precision-10%']\n", "pseudo_values = [\n", " 100 * np.mean(total_recall_pseudo), \n", " 100 * np.mean(total_precision_pseudo), \n", " 100 * np.mean(total_precision2_pseudo)\n", "]\n", "pseudo_error = [\n", " 100 * np.std(total_recall_pseudo), \n", " 100 * np.std(total_precision_pseudo), \n", " 100 * np.std(total_precision2_pseudo)\n", "]\n", "pseudo_ed_values = [\n", " 100 * np.mean(total_recall_pseudo_ed), \n", " 100 * np.mean(total_precision_pseudo_ed), \n", " 100 * np.mean(total_precision2_pseudo_ed)\n", "]\n", "pseudo_ed_error = [\n", " 100 * np.std(total_recall_pseudo_ed), \n", " 100 * np.std(total_precision_pseudo_ed), \n", " 100 * np.std(total_precision2_pseudo_ed)\n", "]\n", "ed_values = [\n", " 100 * np.mean(total_recall_ed), \n", " 100 * np.mean(total_precision_ed), \n", " 100 * np.mean(total_precision2_ed)\n", "]\n", "ed_error = [\n", " 100 * np.std(total_recall_ed), \n", " 100 * np.std(total_precision_ed), \n", " 100 * np.std(total_precision2_ed)\n", "]\n", "\n", "colors = ['#4daf4a', '#377eb8', '#ff7f00',\n", " '#f781bf', '#a65628', '#984ea3',\n", " '#999999', '#e41a1c', '#dede00']\n", "\n", "x = 1.7 * np.arange(len(labels)) # the label locations\n", "width = 0.35 # the width of the bars\n", "\n", "fig, ax = plt.subplots()\n", "fig.set_size_inches(10, 7)\n", "rects1 = ax.bar(x - width, pseudo_values, width, yerr=pseudo_error, color=colors[0], capsize=10, label='PSEUDo (DTW)')\n", "rects2 = ax.bar(x, pseudo_ed_values, width, yerr=pseudo_ed_error, color=colors[1], capsize=10, label='PSEUDo (ED)')\n", "rects3 = ax.bar(x + width, ed_values, width, yerr=ed_error, color=colors[6], capsize=10, label='ED')\n", "\n", "ax.set_ylabel('% Relative to DTW')\n", "ax.set_title('Recall and precision compared to DTW [EEG: M={}, T={}, d={}]'.format(M, T, N))\n", "ax.set_xticks(x)\n", "ax.set_xticklabels(labels)\n", "ax.legend()\n", "\n", "\n", "def autolabel(rects):\n", " \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n", " for rect in rects:\n", " height = round(rect.get_height(),0)\n", " ax.annotate('{}'.format(height)+'%',\n", " xy=(rect.get_x() + rect.get_width() / 2, height),\n", " xytext=(0, 3), # 3 points vertical offset\n", " textcoords=\"offset points\",\n", " ha='center', va='bottom')\n", "\n", "\n", "autolabel(rects1)\n", "autolabel(rects2)\n", "autolabel(rects3)\n", "\n", "fig.tight_layout()\n", "plt.savefig('images/accuracy_eeg_' + str(M) + '_' + str(T) +'_' + str(N))\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Computing time" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", "\n", "labels = ['PSEUDo (DTW)', 'PSEUDo (ED)', 'DTW', 'L2']\n", "preprocess_vales = [pseudo_preprocess_time, pseudo_preprocess_time, 0, 0]\n", "query_values = np.array([np.sum(total_lsh_times), np.sum(total_lsh_times_ed), np.sum(total_dtw_times), np.sum(total_ed_times)])\n", "\n", "x = np.arange(len(labels))\n", "width = 0.35\n", "\n", "fig, ax = plt.subplots()\n", "fig.set_size_inches(10, 7)\n", "rects1 = ax.bar(x - width/2, preprocess_vales, width, color=colors[1], label='Preprocessing')\n", "rects2 = ax.bar(x + width/2, query_values, width, color=colors[0], label='Querying')\n", "\n", "ax.set_ylabel('Time (s)')\n", "ax.set_title('Processing times of various search strategies [EEG: M={}, T={}, d={}]'.format(M, T, N))\n", "ax.set_xticks(x)\n", "ax.set_xticklabels(labels)\n", "ax.legend()\n", "\n", "\n", "def autolabel(rects):\n", " \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n", " for rect in rects:\n", " height = round(rect.get_height(),2)\n", " ax.annotate('{}'.format(height),\n", " xy=(rect.get_x() + rect.get_width() / 2, height),\n", " xytext=(0, 3), # 3 points vertical offset\n", " textcoords=\"offset points\",\n", " ha='center', va='bottom')\n", "\n", "\n", "autolabel(rects1)\n", "autolabel(rects2)\n", "\n", "fig.tight_layout()\n", "plt.savefig('images/time_eeg_' + str(M) + '_' + str(T) +'_' + str(N))\n", "\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.5" } }, "nbformat": 4, "nbformat_minor": 4 }