diff --git a/README.rst b/README.rst
index 16c203159762c0c844e051af65727958fdac170c..af739fa917e00a847ad9e8df3c92bbbc80b577bc 100644
--- a/README.rst
+++ b/README.rst
@@ -1,50 +1 @@
-========
-Overview
-========
-
-Goal
-----
-
-* Read in data for many channels and sources
-* Plot data for each channel in an interactive format
-* Easily see and keep track of combs
-* Organize plots by channel and time/date
-
-Status
-------
-
-Testing; likely to be buggy and constantly updated.
-
-=======
-Contact
-=======
-
-neunzert (at) umich (dot) edu
-
-============
-Dependencies
-============
-
-These scripts depend on a number of non-standard python libraries, namely:
-
-* gwpy (for loading data from detector channels)
-* bokeh (for interactive plotting)
-* pandas (for handling tables of data)
-* jinja2 (for creating web pages from a template)
-
-To avoid installing all of these things independently, work on the LHO cluster and source the following virtualenv::
-
-    $ source /home/aneunzert/gwypybokeh/bin/activate
-
-If you would like to work on a different cluster, email me and I will happily clone the virtualenv where necessary.
-
-======
-Basics
-======
-
-.. image:: finetooth_workflow.png
-
-* Modular workflow
-* All options are documented in their respective scripts; run ``python scriptname.py --help`` to see what is available (where scriptname is whatever python script you're trying to learn about).
-* See `the tutorial <tutorial.rst>`_ (in progress) to get started.
-* Comb finding is documented in `a separate file <combfinding.rst>`_ as it is currently a standalone task. This will also be addressed in the expanded version of the tutorial.
+Documentation for this project has recently been overhauled. Rendered docs can be found on my public_html page `here <https://ldas-jobs.ligo-wa.caltech.edu/~aneunzert/FineToothDocs/build/html/>`_ . All source files are also available in the repository under docs/source.
diff --git a/chdata.py b/chdata.py
index 0bbd6721ff099d86a2c4c95b2552c6f85b7e8fa9..8fe93114f4fe35b395db191cd4923ce9ef742525 100644
--- a/chdata.py
+++ b/chdata.py
@@ -1,72 +1,82 @@
 from __future__ import division
 import argparse
 
-parser = argparse.ArgumentParser(description="Calculate and save spectra using GWpy.")
-parser.register('type', 'bool', (lambda x: x.lower() in ("yes", "true", "t", "1")))
-parser.add_argument("--chlist",help="Channel list source (path to a newline-delimited text file)")
-parser.add_argument("--name",help="Prefix for output files (IMPORTANT: do not use underscores.)")
-parser.add_argument("--starttime",help="Start time (string, in format \"(YYYY-MM-DD-HH-MM-SS)\"")
-parser.add_argument("--durationhrs",help="Duration (hours)",type=float)
-parser.add_argument("--outputfolder",help="Output folder for data. This is the parent folder; subfolders will be created to organize the data.")
-parser.add_argument("--chreference",help="Reference channel for coherence comparison (default H1:LSC-DARM_IN1_DQ)",default="H1:LSC-DARM_IN1_DQ")
-parser.add_argument("--ffttime",help="FFT time (s) (default 20)",default=20,type=float)
-parser.add_argument("--fftoverlap",help="FFT overlap (default 0.5)",default=0.5,type=float)
-parser.add_argument("--verbose",help="Verbose output (default false)",default=False)
-parser.add_argument("--overwrite",help="Overwrite existing files (default false)",default=False)
-args=parser.parse_args()
+def makeparser():
 
+	parser = argparse.ArgumentParser(description="Calculate and save spectra using GWpy.")
+	parser.register('type', 'bool', (lambda x: x.lower() in ("yes", "true", "t", "1")))
+	parser.add_argument("--chlist",help="Channel list source (path to a newline-delimited text file)")
+	parser.add_argument("--name",help="Prefix for output files (IMPORTANT: do not use underscores.)")
+	parser.add_argument("--starttime",help="Start time (string, in format \"(YYYY-MM-DD-HH-MM-SS)\"")
+	parser.add_argument("--durationhrs",help="Duration (hours)",type=float)
+	parser.add_argument("--outputfolder",help="Output folder for data. This is the parent folder; subfolders will be created to organize the data.")
+	parser.add_argument("--chreference",help="Reference channel for coherence comparison (default H1:LSC-DARM_IN1_DQ)",default="H1:LSC-DARM_IN1_DQ")
+	parser.add_argument("--ffttime",help="FFT time (s) (default 20)",default=20,type=float)
+	parser.add_argument("--fftoverlap",help="FFT overlap (default 0.5)",default=0.5,type=float)
+	parser.add_argument("--verbose",help="Verbose output (default false)",default=False)
+	parser.add_argument("--overwrite",help="Overwrite existing files (default false)",default=False)
 
-import os
-import sys
-import traceback
-import datetime
-from gwpy.timeseries import TimeSeries
-from gwpy import time
-import numpy as np
-import hdf5io as h5
+	return parser
 
-# Get list of channels from the supplied txt file
-f=open(args.chlist,'r')
-chlist = [line[:-1] for line in f]
+def main():
+	''' '''
 
-if args.outputfolder[-1]!="/":
-	args.outputfolder+="/"
-args.outputfolder=os.path.expanduser(args.outputfolder)
+	import os
+	import sys
+	import traceback
+	import datetime
+	from gwpy.timeseries import TimeSeries
+	from gwpy import time
+	import numpy as np
+	import hdf5io as h5
 
-#Create start and end times
-starttime=datetime.datetime.strptime(args.starttime,'%Y-%m-%d-%H-%M-%S')
-endtime=starttime+datetime.timedelta(hours=args.durationhrs)
+	args=makeparser().parse_args()
 
-#Create a unique subdirectory name and print it for the user's reference
-fname=args.name+"_"+starttime.strftime('%Y-%m-%d-%H-%M-%S')+"_"+str(args.durationhrs).replace(".","p")+"-hrs"
-print("Subdirectory: {0}".format(fname))
+	# Get list of channels from the supplied txt file
+	f=open(args.chlist,'r')
+	chlist = [line[:-1] for line in f]
 
-# If subdirectory does not already exists, create it.
-if fname not in os.listdir(args.outputfolder):
-	os.mkdir("{0}{1}".format(args.outputfolder,fname))
+	if args.outputfolder[-1]!="/":
+		args.outputfolder+="/"
+	args.outputfolder=os.path.expanduser(args.outputfolder)
 
-#Loop through channels in the channel list
-for ch in chlist:
-	print("\nProcessing {}".format(ch))
-	# Create the name for the data file.
-	hdf5name=(args.outputfolder+fname+"/dat_{0}.hdf5".format(ch))
-	# If the data file already exists and the user does not want to overwrite it, skip.
-	if args.overwrite==False and os.path.isfile(hdf5name):
-		print("... Data already exists in file {}; skipping.".format(hdf5name))
-	else:
-		try:
-			refData = TimeSeries.fetch(args.chreference,starttime,endtime,verbose=args.verbose)
-			chData = TimeSeries.fetch(ch,starttime,endtime,verbose=args.verbose)
-			print("Calculating ASD...")
-			ASD=chData.asd(args.ffttime,int(args.ffttime*args.fftoverlap))
-			print("Saving in file {0}".format(hdf5name))
-			h5.save_spectrum(hdf5name,np.array(ASD.value),freq=np.array(ASD.frequencies))
-			print("... Finished successfully.")
-				
-		except RuntimeError:
-			if args.verbose:
-				print(traceback.format_exc())
-			print("... Failed to load data.")
-			continue
+	#Create start and end times
+	starttime=datetime.datetime.strptime(args.starttime,'%Y-%m-%d-%H-%M-%S')
+	endtime=starttime+datetime.timedelta(hours=args.durationhrs)
 
-print("Program complete.")
+	#Create a unique subdirectory name and print it for the user's reference
+	fname=args.name+"_"+starttime.strftime('%Y-%m-%d-%H-%M-%S')+"_"+str(args.durationhrs).replace(".","p")+"-hrs"
+	print("Subdirectory: {0}".format(fname))
+
+	# If subdirectory does not already exists, create it.
+	if fname not in os.listdir(args.outputfolder):
+		os.mkdir("{0}{1}".format(args.outputfolder,fname))
+
+	#Loop through channels in the channel list
+	for ch in chlist:
+		print("\nProcessing {}".format(ch))
+		# Create the name for the data file.
+		hdf5name=(args.outputfolder+fname+"/dat_{0}.hdf5".format(ch))
+		# If the data file already exists and the user does not want to overwrite it, skip.
+		if args.overwrite==False and os.path.isfile(hdf5name):
+			print("... Data already exists in file {}; skipping.".format(hdf5name))
+		else:
+			try:
+				refData = TimeSeries.fetch(args.chreference,starttime,endtime,verbose=args.verbose)
+				chData = TimeSeries.fetch(ch,starttime,endtime,verbose=args.verbose)
+				print("Calculating ASD...")
+				ASD=chData.asd(args.ffttime,int(args.ffttime*args.fftoverlap))
+				print("Saving in file {0}".format(hdf5name))
+				h5.save_spectrum(hdf5name,np.array(ASD.value),freq=np.array(ASD.frequencies))
+				print("... Finished successfully.")
+					
+			except RuntimeError:
+				if args.verbose:
+					print(traceback.format_exc())
+				print("... Failed to load data.")
+				continue
+
+	print("Program complete.")
+
+if __name__ == '__main__':
+	main()
diff --git a/chdataFscan.py b/chdataFscan.py
index 93b441d20c05b6e4b52e224bf5b65e7cb3cecaf1..1cbc60dda4158e179e02072dd602b490c2905946 100644
--- a/chdataFscan.py
+++ b/chdataFscan.py
@@ -2,212 +2,221 @@ from __future__ import division
 import argparse
 import datetime
 
-parser=argparse.ArgumentParser()
-parser.register('type', 'bool', (lambda x: x.lower() in ("yes", "true", "t", "1")))
-parser.add_argument("-d", "--date",help="Date in format \"YYYY-MM-DD\" (Default is today).",default=datetime.date.today().strftime('%Y-%m-%d'))
-parser.add_argument("-i", "--inputfolder", help="Parent folder where SFTs are located. If no folder is supplied, it will be found automatically from the channel subsystem, using defaults for LHO.",default="")
-parser.add_argument("-o", "--outputfolder", help="Parent folder where output data will be stored")
-parser.add_argument("-ch","--chlist",help="List of channels in a newline-delimited text file")
-parser.add_argument("-n","--name",help="Prefix for output files")
-parser.add_argument("-f","--fmin",help="Frequency minumum (default 0)",type=float,default=0)
-parser.add_argument("-b","--fband",help="Frequency bandwidth (default 2000)",type=float,default=2000)
-parser.add_argument("-s","--start",help="Start date for plots",default="")
-parser.add_argument("-c","--cumulative",help="Compute cumulative plots (default=False)",default=False)
-parser.add_argument("-w","--overwrite",help="Overwrite existing files.",type='bool',default=False)
-parser.add_argument("-x","--checkOnly",help="Don't compute anything, just print which days have data.",type='bool',default=False)
-args=parser.parse_args()
-
-import sys
-import os
-import traceback
-import subprocess
-import numpy as np
-import glob
-import hdf5io as h5
-devnull = open(os.devnull, 'w')
-
-# Correctly format the start date and the channel list 
-args.date=datetime.datetime.strptime(args.date,'%Y-%m-%d')
-chlist = [line[:-1] for line in open(args.chlist,'r')]
-
-# Add trailing /s on folders if they do not exist.
-if (len(args.inputfolder)>0) and (args.inputfolder[-1]!="/"):
-	args.inputfolder+="/"
-
-if args.outputfolder[-1]!="/":
-	args.outputfolder+="/"
-
-# If no start date is supplied, set up calculation for only 1 day.
-if args.start=="":
-	dates=[args.date]
-	ndates=1
-# Otherwise, set up calculation for a range of dates.
-else:
-	args.start=datetime.datetime.strptime(args.start,'%Y-%m-%d')
-	ndates=(args.date-args.start).days+1
-	dates=[args.start + datetime.timedelta(n) for n in range(ndates)]
-
-# Set up variable to hold "old weights" for the cumulative sum.
-oldcumulwtsname = ""
-
-# Loop through channels
-for ch in chlist:
-
-	# Just some nice formatting for channel names
-	div="="*len(ch)
-	print("\n"+div+"\n"+ch+"\n"+div)
-
-	# Loop through date range
-	for i in range(ndates):
-		date=dates[i]
-		print("\nStarting on date {0}".format(date))
-
-		# Handle case where user specifies a folder 
-		# (this folder must contain all SFTs -- this is not very useful for cumulative plots, nor a channel list which spans multiple subsystems)
-		if args.inputfolder!="":
-			try:
-				datefolder=[path for path in os.listdir(args.inputfolder) if date.strftime('%Y_%m_%d') in path][0]
-			except:
-				print("Error: Could not find SFTs for the requested date ({})".format(date.strftime('%Y-%m-%d')))
-				print(traceback.format_exc())
-				break
-
-		# Handle case where user only specifies channels to search.
-		# This section could be amended to handle different folder location logic.
-		if args.inputfolder=="":
-
-			# Get the subsystem
-			subsystem=ch.split(":")[1].split("-")[0]
-
-			# Get the corresponding Fscan subfolder for that subsystem
-			subdir=[line.split(" ")[1][:-1] for line in open("fscanSubsystemPaths.config",'r') if line.split(" ")[0]==subsystem][0]
-			inputfolder="/home/pulsar/public_html/fscan/H1_DUAL_ARM/{0}/{0}/".format(subdir)
-
-			# Get the corresponding sub-subfolder for the date
-			datefolder=[path for path in os.listdir(inputfolder) if date.strftime('%Y_%m_%d') in path]
-
-			# Check if this whole process has worked correctly and recovered 1 folder.
-			if len(datefolder)==1:
-				datefolder=datefolder[0]
-			elif len(datefolder)==0:
-				print("No Fscan data found for this channel and date.")
-				continue
-			else:
-				print("More than one folder found for channel {} on date {}. Something has gone wrong.".format(ch,date))
-				continue
-
-			# If everything worked out, look inside /sfts/tmp for sft files
-			chfolder=inputfolder+datefolder+"/"+"_".join(ch.split(":"))+"/sfts/tmp/"
-	
-		# Check if there are any SFT files in the folder
-		if os.path.isdir(chfolder) and len([path for path in os.listdir(chfolder) if path[-4:]==".sft"])>0:
-
-			# If the user is only checking for data availability, print availability and leave the loop.
-			if args.checkOnly:
-				print("Found SFT files for this channel and date.")
-				continue
-
-			# Create a name for the output data folder, labeled by date.
-			outputfolder=args.outputfolder+args.name+"_"+date.strftime('%Y-%m-%d')+"_fscans/"
-
-			# If the output folder does not exist, create it.
-			if not os.path.isdir(outputfolder):
-				os.mkdir(outputfolder)
-
-			# Create a prefix for the output data files. Also create names for the (daily) data, weights, and expected PWA files.
-			outprefix=args.name+"_"+ch+"_"+date.strftime('%Y-%m-%d')
-			datname=(outputfolder+"dat_{0}.hdf5".format(ch))
-			wtsname=(outputfolder+"wts_{0}.hdf5".format(ch))
-			outfpath=outputfolder+outprefix+"_PWA.txt"
-	
-			# If the user has specified that cumulative plots should be produced:
-			if args.cumulative:
-				# Create a name for the cumulative output data folder, labeled by both start AND end date.
-				cumuloutputfolder=args.outputfolder+args.name+"_"+date.strftime('%Y-%m-%d')+"_fscan_cumulsince_"+args.start.strftime('%Y-%m-%d')
-				# Create a name for the (cumulative) data and weights
-				cumulwtsname=cumuloutputfolder+"/wts_{0}.hdf5".format(ch)
-				cumuldatname=cumuloutputfolder+"/dat_{0}.hdf5".format(ch)
-			if ((args.cumulative and os.path.isfile(cumulwtsname)) or ((not args.cumulative) and os.path.isfile(datname))) and args.overwrite==False:
-				print("Files for this channel and date already exist, skipping.")
-			else:
-				if os.path.isfile(wtsname):
-					print("Daily weights file already exists at {}; skipping spec_avg_long.".format(wtsname))
-					print("Loading data from weights file...")
-					freq,tavgwt,sumwt=h5.load_spectrum_wts(wtsname)
+def makeparser():
+	parser=argparse.ArgumentParser()
+	parser.register('type', 'bool', (lambda x: x.lower() in ("yes", "true", "t", "1")))
+	parser.add_argument("-d", "--date",help="Date in format \"YYYY-MM-DD\" (Default is today).",default=datetime.date.today().strftime('%Y-%m-%d'))
+	parser.add_argument("-i", "--inputfolder", help="Parent folder where SFTs are located. If no folder is supplied, it will be found automatically from the channel subsystem, using defaults for LHO.",default="")
+	parser.add_argument("-o", "--outputfolder", help="Parent folder where output data will be stored")
+	parser.add_argument("-ch","--chlist",help="List of channels in a newline-delimited text file")
+	parser.add_argument("-n","--name",help="Prefix for output files")
+	parser.add_argument("-f","--fmin",help="Frequency minumum (default 0)",type=float,default=0)
+	parser.add_argument("-b","--fband",help="Frequency bandwidth (default 2000)",type=float,default=2000)
+	parser.add_argument("-s","--start",help="Start date for plots",default="")
+	parser.add_argument("-c","--cumulative",help="Compute cumulative plots (default=False)",default=False)
+	parser.add_argument("-w","--overwrite",help="Overwrite existing files.",type='bool',default=False)
+	parser.add_argument("-x","--checkOnly",help="Don't compute anything, just print which days have data.",type='bool',default=False)
+
+	return parser
+
+def main():
+	''' '''
+	args=makeparser().parse_args()
+
+	import sys
+	import os
+	import traceback
+	import subprocess
+	import numpy as np
+	import glob
+	import hdf5io as h5
+	devnull = open(os.devnull, 'w')
+
+	# Correctly format the start date and the channel list 
+	args.date=datetime.datetime.strptime(args.date,'%Y-%m-%d')
+	chlist = [line[:-1] for line in open(args.chlist,'r')]
+
+	# Add trailing /s on folders if they do not exist.
+	if (len(args.inputfolder)>0) and (args.inputfolder[-1]!="/"):
+		args.inputfolder+="/"
+
+	if args.outputfolder[-1]!="/":
+		args.outputfolder+="/"
+
+	# If no start date is supplied, set up calculation for only 1 day.
+	if args.start=="":
+		dates=[args.date]
+		ndates=1
+	# Otherwise, set up calculation for a range of dates.
+	else:
+		args.start=datetime.datetime.strptime(args.start,'%Y-%m-%d')
+		ndates=(args.date-args.start).days+1
+		dates=[args.start + datetime.timedelta(n) for n in range(ndates)]
+
+	# Set up variable to hold "old weights" for the cumulative sum.
+	oldcumulwtsname = ""
+
+	# Loop through channels
+	for ch in chlist:
+
+		# Just some nice formatting for channel names
+		div="="*len(ch)
+		print("\n"+div+"\n"+ch+"\n"+div)
+
+		# Loop through date range
+		for i in range(ndates):
+			date=dates[i]
+			print("\nStarting on date {0}".format(date))
+
+			# Handle case where user specifies a folder 
+			# (this folder must contain all SFTs -- this is not very useful for cumulative plots, nor a channel list which spans multiple subsystems)
+			if args.inputfolder!="":
+				try:
+					datefolder=[path for path in os.listdir(args.inputfolder) if date.strftime('%Y_%m_%d') in path][0]
+				except:
+					print("Error: Could not find SFTs for the requested date ({})".format(date.strftime('%Y-%m-%d')))
+					print(traceback.format_exc())
+					break
+
+			# Handle case where user only specifies channels to search.
+			# This section could be amended to handle different folder location logic.
+			if args.inputfolder=="":
+
+				# Get the subsystem
+				subsystem=ch.split(":")[1].split("-")[0]
+
+				# Get the corresponding Fscan subfolder for that subsystem
+				subdir=[line.split(" ")[1][:-1] for line in open("fscanSubsystemPaths.config",'r') if line.split(" ")[0]==subsystem][0]
+				inputfolder="/home/pulsar/public_html/fscan/H1_DUAL_ARM/{0}/{0}/".format(subdir)
+
+				# Get the corresponding sub-subfolder for the date
+				datefolder=[path for path in os.listdir(inputfolder) if date.strftime('%Y_%m_%d') in path]
+
+				# Check if this whole process has worked correctly and recovered 1 folder.
+				if len(datefolder)==1:
+					datefolder=datefolder[0]
+				elif len(datefolder)==0:
+					print("No Fscan data found for this channel and date.")
+					continue
 				else:
-					print("Daily weights file does not already exist at {}".format(wtsname))
-					print("Running spec_avg_long on {0}...".format(chfolder))
-					cmd=['/home/stephen.trembath/LALSUITE_SRCDIR/lalsuite/lalapps/src/pulsar/fscan/spec_avg_long','--SFTs',
-						chfolder+"*.sft",'--startGPS','1000000000','--endGPS', '2000000000','--fMin',str(args.fmin),'--fMax', str(args.fband),
-						'--freqRes','0.10','--timeBaseline', '1800','--IFO', 'H1','--outputBname',outprefix]
-					print(" ".join(cmd))
-					subprocess.call(cmd,cwd=outputfolder,stdout=devnull,stderr=devnull)
-					print("Done with spec_avg_long.")
-					print("Loading data from PWA file at {}...".format(outfpath))
-					freq,tavgwt,sumwt = np.loadtxt(outfpath, comments='#', usecols=(0,1,2), unpack=True)
-					if np.isinf(sumwt[0]):
-						print("This data appears to contain errors! Skipping this date.")
-						continue
+					print("More than one folder found for channel {} on date {}. Something has gone wrong.".format(ch,date))
+					continue
+
+				# If everything worked out, look inside /sfts/tmp for sft files
+				chfolder=inputfolder+datefolder+"/"+"_".join(ch.split(":"))+"/sfts/tmp/"
+		
+			# Check if there are any SFT files in the folder
+			if os.path.isdir(chfolder) and len([path for path in os.listdir(chfolder) if path[-4:]==".sft"])>0:
+
+				# If the user is only checking for data availability, print availability and leave the loop.
+				if args.checkOnly:
+					print("Found SFT files for this channel and date.")
+					continue
+
+				# Create a name for the output data folder, labeled by date.
+				outputfolder=args.outputfolder+args.name+"_"+date.strftime('%Y-%m-%d')+"_fscans/"
+
+				# If the output folder does not exist, create it.
+				if not os.path.isdir(outputfolder):
+					os.mkdir(outputfolder)
+
+				# Create a prefix for the output data files. Also create names for the (daily) data, weights, and expected PWA files.
+				outprefix=args.name+"_"+ch+"_"+date.strftime('%Y-%m-%d')
+				datname=(outputfolder+"dat_{0}.hdf5".format(ch))
+				wtsname=(outputfolder+"wts_{0}.hdf5".format(ch))
+				outfpath=outputfolder+outprefix+"_PWA.txt"
+		
+				# If the user has specified that cumulative plots should be produced:
+				if args.cumulative:
+					# Create a name for the cumulative output data folder, labeled by both start AND end date.
+					cumuloutputfolder=args.outputfolder+args.name+"_"+date.strftime('%Y-%m-%d')+"_fscan_cumulsince_"+args.start.strftime('%Y-%m-%d')
+					# Create a name for the (cumulative) data and weights
+					cumulwtsname=cumuloutputfolder+"/wts_{0}.hdf5".format(ch)
+					cumuldatname=cumuloutputfolder+"/dat_{0}.hdf5".format(ch)
+				if ((args.cumulative and os.path.isfile(cumulwtsname)) or ((not args.cumulative) and os.path.isfile(datname))) and args.overwrite==False:
+					print("Files for this channel and date already exist, skipping.")
+				else:
+					if os.path.isfile(wtsname):
+						print("Daily weights file already exists at {}; skipping spec_avg_long.".format(wtsname))
+						print("Loading data from weights file...")
+						freq,tavgwt,sumwt=h5.load_spectrum_wts(wtsname)
 					else:
-						print("Saving daily weights .hdf5 file at {}...".format(wtsname))
-						# TODO: save correctly
-						h5.save_spectrum_wts(wtsname,tavgwt,sumwt,freq)
-						print("Saving daily spectrum .hdf5 file at {}...".format(datname))
-						h5.save_spectrum(datname,tavgwt/sumwt*1800,freq)
+						print("Daily weights file does not already exist at {}".format(wtsname))
+						print("Running spec_avg_long on {0}...".format(chfolder))
+						cmd=['/home/stephen.trembath/LALSUITE_SRCDIR/lalsuite/lalapps/src/pulsar/fscan/spec_avg_long','--SFTs',
+							chfolder+"*.sft",'--startGPS','1000000000','--endGPS', '2000000000','--fMin',str(args.fmin),'--fMax', str(args.fband),
+							'--freqRes','0.10','--timeBaseline', '1800','--IFO', 'H1','--outputBname',outprefix]
+						print(" ".join(cmd))
+						subprocess.call(cmd,cwd=outputfolder,stdout=devnull,stderr=devnull)
+						print("Done with spec_avg_long.")
+						print("Loading data from PWA file at {}...".format(outfpath))
+						freq,tavgwt,sumwt = np.loadtxt(outfpath, comments='#', usecols=(0,1,2), unpack=True)
+						if np.isinf(sumwt[0]):
+							print("This data appears to contain errors! Skipping this date.")
+							continue
+						else:
+							print("Saving daily weights .hdf5 file at {}...".format(wtsname))
+							# TODO: save correctly
+							h5.save_spectrum_wts(wtsname,tavgwt,sumwt,freq)
+							print("Saving daily spectrum .hdf5 file at {}...".format(datname))
+							h5.save_spectrum(datname,tavgwt/sumwt*1800,freq)
+
+					if args.cumulative:
+						subprocess.call(['mkdir','-p',cumuloutputfolder])
+					if args.cumulative and oldcumulwtsname!="":
+						print("[Prior data loaded from {}]".format(oldcumulwtsname))
+						oldfreq,oldtavgwt,oldsumwt=h5.load_spectrum_wts(oldcumulwtsname)
+						print("Saving cumulative weights file at {}...".format(cumulwtsname))
+						h5.save_spectrum_wts(cumulwtsname,oldtavgwt+tavgwt,oldsumwt+sumwt,freq)
+						print("Saving cumulative data file at {}...".format(cumuldatname))
+						h5.save_spectrum(cumuldatname,(oldtavgwt+tavgwt)/(oldsumwt+sumwt)*1800,freq)
+						print("Finished with cumulative file saving for {0}".format(date))
+						if args.overwrite==False:
+							print("(Switching to overwrite mode due to new cumulative file being written.)")
+							args.overwrite=True
+					if args.cumulative and oldcumulwtsname=="":
+						print("No prior weights data found; assuming this is day 1")
+						print("Saving weights file at {}...".format(cumulwtsname))
+						h5.save_spectrum_wts(cumulwtsname,tavgwt,sumwt,freq)
+
+				for suffix in ["","_timestamps",".txt","_PWA.txt","_date"]:
+					tmpfname=outputfolder+outprefix+suffix
+					if os.path.isfile(tmpfname):
+						print("Removing {}".format(tmpfname))
+						subprocess.call(["rm",tmpfname])
 
 				if args.cumulative:
-					subprocess.call(['mkdir','-p',cumuloutputfolder])
-				if args.cumulative and oldcumulwtsname!="":
-					print("[Prior data loaded from {}]".format(oldcumulwtsname))
-					oldfreq,oldtavgwt,oldsumwt=h5.load_spectrum_wts(oldcumulwtsname)
-					print("Saving cumulative weights file at {}...".format(cumulwtsname))
-					h5.save_spectrum_wts(cumulwtsname,oldtavgwt+tavgwt,oldsumwt+sumwt,freq)
-					print("Saving cumulative data file at {}...".format(cumuldatname))
-					h5.save_spectrum(cumuldatname,(oldtavgwt+tavgwt)/(oldsumwt+sumwt)*1800,freq)
-					print("Finished with cumulative file saving for {0}".format(date))
-					if args.overwrite==False:
-						print("(Switching to overwrite mode due to new cumulative file being written.)")
-						args.overwrite=True
-				if args.cumulative and oldcumulwtsname=="":
-					print("No prior weights data found; assuming this is day 1")
-					print("Saving weights file at {}...".format(cumulwtsname))
-					h5.save_spectrum_wts(cumulwtsname,tavgwt,sumwt,freq)
-
-			for suffix in ["","_timestamps",".txt","_PWA.txt","_date"]:
-				tmpfname=outputfolder+outprefix+suffix
-				if os.path.isfile(tmpfname):
-					print("Removing {}".format(tmpfname))
-					subprocess.call(["rm",tmpfname])
-
-			if args.cumulative:
-				oldcumulwtsname=str(cumulwtsname)
-
-		else:
-			print("Found the relevant folder at {}, but no SFT files.".format(chfolder))
-			if not args.cumulative:
-				chfolder=inputfolder+datefolder+"/"+"_".join(ch.split(":"))+"/"
-				timeavgfiles=glob.glob(chfolder+"spec_*timeaverage")
-				if len(timeavgfiles)>0:
-					print("Found time-averaged spectrum files.")
-					if args.checkOnly:
+					oldcumulwtsname=str(cumulwtsname)
+
+			else:
+				print("Found the relevant folder at {}, but no SFT files.".format(chfolder))
+				if not args.cumulative:
+					chfolder=inputfolder+datefolder+"/"+"_".join(ch.split(":"))+"/"
+					timeavgfiles=glob.glob(chfolder+"spec_*timeaverage")
+					if len(timeavgfiles)>0:
+						print("Found time-averaged spectrum files.")
+						if args.checkOnly:
+							continue
+						print("Loading {} spectrum files...".format(len(timeavgfiles)))
+						bandmins=np.array([float(x.split("spec_")[1].split("_")[0]) for x in timeavgfiles])
+						ordargs=np.argsort(bandmins)
+						timeavgfiles=np.array(timeavgfiles)[ordargs]
+						freq=[]
+						valavg=[]
+						outputfolder=args.outputfolder+args.name+"_"+date.strftime('%Y-%m-%d')+"_fscans/"
+						subprocess.call(['mkdir','-p',outputfolder])
+						datname=(outputfolder+"dat_{0}.hdf5".format(ch))
+						for timeavgfile in timeavgfiles:
+							freqi,valavgi = np.loadtxt(timeavgfile, comments='#', usecols=(0,1), unpack=True)
+							freq.append(freqi)
+							valavg.append(valavgi)
+						print("Saving daily spectrum .hdf5 file at {}...".format(datname))
+						np.savetxt(gzname,(np.hstack(freq),np.hstack(valavg)))
+						h5.save_spectrum(datname,np.hstack(valavg),np.hstack(freq))
+						freq=[];valavg=[]
+					else:
+						print("Could not time-averaged spectrum files either.")
 						continue
-					print("Loading {} spectrum files...".format(len(timeavgfiles)))
-					bandmins=np.array([float(x.split("spec_")[1].split("_")[0]) for x in timeavgfiles])
-					ordargs=np.argsort(bandmins)
-					timeavgfiles=np.array(timeavgfiles)[ordargs]
-					freq=[]
-					valavg=[]
-					outputfolder=args.outputfolder+args.name+"_"+date.strftime('%Y-%m-%d')+"_fscans/"
-					subprocess.call(['mkdir','-p',outputfolder])
-					datname=(outputfolder+"dat_{0}.hdf5".format(ch))
-					for timeavgfile in timeavgfiles:
-						freqi,valavgi = np.loadtxt(timeavgfile, comments='#', usecols=(0,1), unpack=True)
-						freq.append(freqi)
-						valavg.append(valavgi)
-					print("Saving daily spectrum .hdf5 file at {}...".format(datname))
-					np.savetxt(gzname,(np.hstack(freq),np.hstack(valavg)))
-					h5.save_spectrum(datname,np.hstack(valavg),np.hstack(freq))
-					freq=[];valavg=[]
-				else:
-					print("Could not time-averaged spectrum files either.")
-					continue
+
+if __name__ == '__main__':
+	main()
diff --git a/chplot.py b/chplot.py
index 8e54ccf7d22eed7da5333c2ea2a2f7e33007fa67..8037e51ea1ce6632c67cf99b6a9eaeb2627dd0f5 100644
--- a/chplot.py
+++ b/chplot.py
@@ -2,109 +2,120 @@ from __future__ import division
 import argparse
 import glob
 
-parser = argparse.ArgumentParser(description="Plot comb spectra using Bokeh. To run this script, first: $source /home/aneunzert/gwpybokeh/bin/activate")
-parser.register('type', 'bool', (lambda x: x.lower() in ("yes", "true", "t", "1")))
-parser.add_argument("--foldername",nargs="+",type=str,help="Name of folder to process (should point to a folder containing gz files generated by chdata.py)")
-parser.add_argument("--tagcombs",nargs="+",help="Combs to tag, specified as \"spacing,offset\"")
-parser.add_argument("--fmin",help="Minimum frequency for default plot axes (can pan outside this range)",type=float,default=None)
-parser.add_argument("--fmax",help="Maximum frequency for default plot axes (can pan outside this range)",type=float,default=None)
-parser.add_argument("--checkrange",help="Require that marked peaks be taller than N nearest neighbors on each side (default=2)",type=int,default=2)
-parser.add_argument("--consecutive",help="Require that marked peaks be part of a chain of N consecutive comb peaks (default=5)",type=int,default=5)
-parser.add_argument("--scalerange",help="Normalize each bin using N nearest neighbors on each side (default=0, no scaling).",type=int,default=0)
-parser.add_argument("--overwrite",help="Overwrite existing files (default=False)",type='bool',default=False)
-parser.add_argument("--verbose",help="Verbose output for errors (default=False)",type='bool', default=False)
-parser.add_argument("--truefcutoff",help="Truncate the frequency range (not just limiting the x-axis of initial plotting). Good for high resolution. (default=False)",type='bool',default=False)
-parser.add_argument("--knownLinesFile",help="A file containing known lines and their descriptions, which should be added to the plot",default=None)
-parser.add_argument("--zeroKnownLines",help="Zero out the known lines (requires knownLinesFile)",type='bool',default=False)
-args=parser.parse_args()
+def makeparser():
+	parser = argparse.ArgumentParser(description="Plot comb spectra using Bokeh. To run this script, first: $source /home/aneunzert/gwpybokeh/bin/activate")
+	parser.register('type', 'bool', (lambda x: x.lower() in ("yes", "true", "t", "1")))
+	parser.add_argument("--foldername",nargs="+",type=str,help="Name of folder to process (should point to a folder containing gz files generated by chdata.py)")
+	parser.add_argument("--tagcombs",nargs="+",help="Combs to tag, specified as \"spacing,offset\"")
+	parser.add_argument("--fmin",help="Minimum frequency for default plot axes (can pan outside this range)",type=float,default=None)
+	parser.add_argument("--fmax",help="Maximum frequency for default plot axes (can pan outside this range)",type=float,default=None)
+	parser.add_argument("--checkrange",help="Require that marked peaks be taller than N nearest neighbors on each side (default=2)",type=int,default=2)
+	parser.add_argument("--consecutive",help="Require that marked peaks be part of a chain of N consecutive comb peaks (default=5)",type=int,default=5)
+	parser.add_argument("--scalerange",help="Normalize each bin using N nearest neighbors on each side (default=0, no scaling).",type=int,default=0)
+	parser.add_argument("--overwrite",help="Overwrite existing files (default=False)",type='bool',default=False)
+	parser.add_argument("--verbose",help="Verbose output for errors (default=False)",type='bool', default=False)
+	parser.add_argument("--truefcutoff",help="Truncate the frequency range (not just limiting the x-axis of initial plotting). Good for high resolution. (default=False)",type='bool',default=False)
+	parser.add_argument("--knownLinesFile",help="A file containing known lines and their descriptions, which should be added to the plot",default=None)
+	parser.add_argument("--zeroKnownLines",help="Zero out the known lines (requires knownLinesFile)",type='bool',default=False)
 
-import os
-import sys
-import traceback
-import numpy as np
-import combfinder as cf
-import hdf5io as h5
-from bokeh.plotting import figure,output_file,save,reset_output,ColumnDataSource
-from bokeh.models import HoverTool
-from bokeh.palettes import Spectral6
+	return parser
 
+def main():
+	''' '''
 
-colors=['red','orange','green','purple','black','blue','yellow','pink','cyan','magenta','brown','gold','gray']
+	args=makeparser().parse_args()
 
-for folderpattern in args.foldername:
-	folderpattern=os.path.expanduser(folderpattern)
-	for foldername in glob.glob(folderpattern):
-		if foldername[-1]!="/":
-			foldername+="/"
-		print("\nProcessing folder {}".format(foldername))
-		for fname in os.listdir(foldername):
-			if "dat_" in fname and (".gz" in fname or ".hdf5" in fname):
-				try:
-					if ".gz" in fname:
-						chname=fname.split("dat_")[1].split(".gz")[0]
-					else:
-						chname=fname.split("dat_")[1].split(".hdf5")[0]
-					print("Processing {0}".format(chname))
-					reset_output()
-					fnamehtml=foldername+chname+".html"
-					if (not os.path.isfile(fnamehtml)) or (args.overwrite==True): 
-						output_file(fnamehtml)
+	import os
+	import sys
+	import traceback
+	import numpy as np
+	import combfinder as cf
+	import hdf5io as h5
+	from bokeh.plotting import figure,output_file,save,reset_output,ColumnDataSource
+	from bokeh.models import HoverTool
+	from bokeh.palettes import Spectral6
+
+
+	colors=['red','orange','green','purple','black','blue','yellow','pink','cyan','magenta','brown','gold','gray']
+
+	for folderpattern in args.foldername:
+		folderpattern=os.path.expanduser(folderpattern)
+		for foldername in glob.glob(folderpattern):
+			if foldername[-1]!="/":
+				foldername+="/"
+			print("\nProcessing folder {}".format(foldername))
+			for fname in os.listdir(foldername):
+				if "dat_" in fname and (".gz" in fname or ".hdf5" in fname):
+					try:
 						if ".gz" in fname:
-							dat=np.loadtxt(foldername+fname)
-							freq=dat[0]
-							asd=dat[1]
+							chname=fname.split("dat_")[1].split(".gz")[0]
 						else:
-							print(fname)
-							freq,asd=h5.load_spectrum(foldername+fname)
-						if args.scalerange!=0:
-							asd=cf.scale_data(asd,args.scalerange)
+							chname=fname.split("dat_")[1].split(".hdf5")[0]
+						print("Processing {0}".format(chname))
+						reset_output()
+						fnamehtml=foldername+chname+".html"
+						if (not os.path.isfile(fnamehtml)) or (args.overwrite==True): 
+							output_file(fnamehtml)
+							if ".gz" in fname:
+								dat=np.loadtxt(foldername+fname)
+								freq=dat[0]
+								asd=dat[1]
+							else:
+								print(fname)
+								freq,asd=h5.load_spectrum(foldername+fname)
+							if args.scalerange!=0:
+								asd=cf.scale_data(asd,args.scalerange)
 
-						if (args.fmin is None) and (args.fmax is None):
-							print("Plotting all data.")
-							keepasd=asd[:]
-							args.fmin=freq[0]
-							args.fmax=freq[-1]
-						elif (args.fmin is not None) and (args.fmax is not None):
-							keepasd=asd[(freq>args.fmin)&(freq<args.fmax)]
-						else:
-							print("Error: you have specified either fmin or fmax, but not both!")
-							sys.exit()
+							if (args.fmin is None) and (args.fmax is None):
+								print("Plotting all data.")
+								keepasd=asd[:]
+								args.fmin=freq[0]
+								args.fmax=freq[-1]
+							elif (args.fmin is not None) and (args.fmax is not None):
+								keepasd=asd[(freq>args.fmin)&(freq<args.fmax)]
+							else:
+								print("Error: you have specified either fmin or fmax, but not both!")
+								sys.exit()
 
-						if args.truefcutoff==True:
-							asd=keepasd[:]
-							freq=freq[(freq>args.fmin)&(freq<args.fmax)]
-						p=figure(webgl=True,tools="pan,box_zoom,wheel_zoom,resize,reset,save",y_axis_type="log",
-							x_axis_label="Frequency",y_axis_label="ASD",x_range=[args.fmin,args.fmax],y_range=[np.percentile(keepasd,0),np.percentile(keepasd,100)],
-							plot_width=1000,plot_height=500)
-						l1=p.line(freq,asd)
-						p.add_tools(HoverTool(renderers=[l1], tooltips='@x{1.1111} Hz',line_policy='nearest'))
-						iColors=0
-						if args.knownLinesFile:
-							cat=np.genfromtxt(args.knownLinesFile,delimiter=",",dtype=np.str)
-							knownLines=cat[:,0].astype(np.float)
-							knownLineTags=cat[:,1]
 							if args.truefcutoff==True:
-								knownLineTags=knownLineTags[(knownLines>args.fmin)&(knownLines<args.fmax)]
-								knownLines=knownLines[(knownLines>args.fmin)&(knownLines<args.fmax)]
-							knownLineInds=cf.arrNearest(knownLines,freq)
-							if args.zeroKnownLines:
-								asd=cf.zero_bins(asd,1,5,knownLineInds)
-							knownSource=ColumnDataSource(data=dict(xK=freq[knownLineInds],yK=asd[knownLineInds],descK=knownLineTags,truept=knownLines))
-							known=p.diamond('xK','yK',source=knownSource,color='black',size=5)
-							p.add_tools(HoverTool(renderers=[known],tooltips="@xK{1.1111} Hz<br>@descK",line_policy='nearest'))
-						if args.tagcombs:
-							for tagcomb in args.tagcombs:
-								combspacing,comboffset=np.array(tagcomb.split(","),dtype=np.float)
-								comb_inds=cf.predict_comb(freq,comboffset,combspacing)
-								mark_inds=cf.mark_comb(asd,comb_inds,args.checkrange,args.consecutive)
-								p.circle(freq[mark_inds],asd[mark_inds],size=10,color=colors[iColors],fill_alpha=.5,legend="Spacing {}, offset {}".format(combspacing,comboffset))
-								iColors+=1
-						save(p)
-						print("... Finished successfully.")
-					else:
-						print("... Output file already exists; skipping.")
-				except (RuntimeError,IndexError,TypeError,NameError):
-					print("... Plotting failed; data may not be valid.")
-					if args.verbose==True:
-						print(traceback.format_exc())
-					continue
+								asd=keepasd[:]
+								freq=freq[(freq>args.fmin)&(freq<args.fmax)]
+							p=figure(webgl=True,tools="pan,box_zoom,wheel_zoom,resize,reset,save",y_axis_type="log",
+								x_axis_label="Frequency",y_axis_label="ASD",x_range=[args.fmin,args.fmax],y_range=[np.percentile(keepasd,0),np.percentile(keepasd,100)],
+								plot_width=1000,plot_height=500)
+							l1=p.line(freq,asd)
+							p.add_tools(HoverTool(renderers=[l1], tooltips='@x{1.1111} Hz',line_policy='nearest'))
+							iColors=0
+							if args.knownLinesFile:
+								cat=np.genfromtxt(args.knownLinesFile,delimiter=",",dtype=np.str)
+								knownLines=cat[:,0].astype(np.float)
+								knownLineTags=cat[:,1]
+								if args.truefcutoff==True:
+									knownLineTags=knownLineTags[(knownLines>args.fmin)&(knownLines<args.fmax)]
+									knownLines=knownLines[(knownLines>args.fmin)&(knownLines<args.fmax)]
+								knownLineInds=cf.arrNearest(knownLines,freq)
+								if args.zeroKnownLines:
+									asd=cf.zero_bins(asd,1,5,knownLineInds)
+								knownSource=ColumnDataSource(data=dict(xK=freq[knownLineInds],yK=asd[knownLineInds],descK=knownLineTags,truept=knownLines))
+								known=p.diamond('xK','yK',source=knownSource,color='black',size=5)
+								p.add_tools(HoverTool(renderers=[known],tooltips="@xK{1.1111} Hz<br>@descK",line_policy='nearest'))
+							if args.tagcombs:
+								for tagcomb in args.tagcombs:
+									combspacing,comboffset=np.array(tagcomb.split(","),dtype=np.float)
+									comb_inds=cf.predict_comb(freq,comboffset,combspacing)
+									mark_inds=cf.mark_comb(asd,comb_inds,args.checkrange,args.consecutive)
+									p.circle(freq[mark_inds],asd[mark_inds],size=10,color=colors[iColors],fill_alpha=.5,legend="Spacing {}, offset {}".format(combspacing,comboffset))
+									iColors+=1
+							save(p)
+							print("... Finished successfully.")
+						else:
+							print("... Output file already exists; skipping.")
+					except (RuntimeError,IndexError,TypeError,NameError):
+						print("... Plotting failed; data may not be valid.")
+						if args.verbose==True:
+							print(traceback.format_exc())
+						continue
+
+
+if __name__ == '__main__':
+	main()
diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css
new file mode 100644
index 0000000000000000000000000000000000000000..5a069217aa8dafc03bc5a17b6a599828405ef2d6
--- /dev/null
+++ b/docs/source/_static/custom.css
@@ -0,0 +1,7 @@
+.literal {
+font-weight: bold!important;
+}
+
+p {
+padding-bottom:10px!important;
+}
diff --git a/finetooth_workflow.png b/docs/source/_static/finetooth_workflow.png
similarity index 100%
rename from finetooth_workflow.png
rename to docs/source/_static/finetooth_workflow.png
diff --git a/combfinding.rst b/docs/source/combfinding.rst
similarity index 57%
rename from combfinding.rst
rename to docs/source/combfinding.rst
index 0e315cbebb9ff01ce4f584daa7ad8b4bb6f60b43..e0c7dd085a4b343296b0d707762b997999044e64 100644
--- a/combfinding.rst
+++ b/docs/source/combfinding.rst
@@ -1,9 +1,13 @@
-Introduction
-============
++++++++++++++++++++++++++
+Comb finding introduction
++++++++++++++++++++++++++
+
+Notes
+-----
 
 This document is an overview of the function *auto_find_comb()* in combfinder.py .
 
-NOTICE: this is in an early stage of development, is likely to be buggy, and will be constantly updated!
+NOTICE: this is in an early stage of development, is likely to be buggy, and will be constantly updated. Trust nothing!
 
 Quick start
 -----------
@@ -18,30 +22,27 @@ This will return a structured array containing frequency spacings ('sp'), offset
         comb[2]['str']
 
 
-Detailed explanation of optional parameters
--------------------------------------------
-
-Complete syntax::
-
-        auto_find_comb(vFreq,vData,scaleRange=5,nCheckAdj=2,nConsecReq=5,nTopPeaks=600,nNearestAmpNeighbors=30,nMostCommonCombs=300,spacingmin=None,spacingmax=None,verbose=False,showBeforeZeroing=False):
+Algorithm and parameter details
+-------------------------------
 
-This kind of requires a full explanation of the code. Optional parameter names are *emphasized*  so that its clear where they appear.
+.. autofunction:: combfinder.auto_find_comb
+   :noindex:
 
 This algorithm proceeds as follows:
 
-* Flatten the data, to avoid getting thrown off by changes in the overall noise level across the spectrum. For each bin, the sum of the nearest *scaleRange* bins is calculated, and the bin values is divided by that total.
+* Flatten the data, to avoid getting thrown off by changes in the overall noise level across the spectrum. For each bin, the sum of the nearest ``scaleRange`` bins is calculated, and the bin values is divided by that total.
 
-* Determine which points on the spectrum are peaks. Peaks are defined as bins with values higher than the *nCheckAdj* adjacent bins.
+* Determine which points on the spectrum are peaks. Peaks are defined as bins with values higher than the ``nCheckAdj`` adjacent bins.
 
-* Get a sorted list of the *nTopPeaks* highest-amplitude peaks.
+* Get a sorted list of the ``nTopPeaks`` highest-amplitude peaks.
 
-* For each peak in the list, consider its *nNearestAmpNeighbors* nearest neighbors in amplitude. For each (peak, neighbor) pair, calculate the spacing and offset of the simplest comb on which both members of the pair would fall. (Spacing = frequency difference, offset = frequency of either, modulo spacing.) Append this spacing and offset to a list of comb candidates.
+* For each peak in the list, consider its ``nNearestAmpNeighbors`` nearest neighbors in amplitude. For each (peak, neighbor) pair, calculate the spacing and offset of the simplest comb on which both members of the pair would fall. (Spacing = frequency difference, offset = frequency of either, modulo spacing.) Append this spacing and offset to a list of comb candidates.
 
-* If the user has supplied *spacingmin* or *spacingmax*, reject any combs with spacings that fall outside this range. Otherwise, skip this step.
+* If the user has supplied ``spacingmin`` or ``spacingmax``, reject any combs with spacings that fall outside this range. Otherwise, skip this step.
 
-* Count how many times each comb candidate appears in the list. Retain the *nMostCommonCombs* most common candidates.
+* Count how many times each comb candidate appears in the list. Retain the ``nMostCommonCombs`` most common candidates.
 
-* Calculate an improved strength statistic for each comb candidate retained. This uses *nCheckAdj* (again) and *nConsecReq*. More detail is given in the following section.
+* Calculate an improved strength statistic for each comb candidate retained. This uses ``nCheckAdj`` (again) and ``nConsecReq``. More detail is given in the following section.
 
 * Organize the combs by strength statistic.
 
@@ -51,10 +52,10 @@ This algorithm proceeds as follows:
 
 * Return all the combs found.
 
-* If *verbose* is specified, print detailed information. If *showBeforeZeroing* is specified, show all the combs prior to the iterative zeroing process.
+* If ``verbose`` is specified, print detailed information. If ``showBeforeZeroing`` is specified, show all the combs prior to the iterative zeroing process.
 
-Strength statistic details
---------------------------
+Comb "likelihood" statistic
+---------------------------
 
 At present, the strength of a comb is given by this formula (which was found by some vague logic + trial and error) (pseudocode).
 
@@ -62,9 +63,9 @@ Note that this is a very ad-hoc method and I'm working on improving it.
 
 * Predict bins where the comb should lie.
 
-* Determine if each predicted bin contains a peak, using the requirement that it be higher than *nCheckAdj* of its neighbors.
+* Determine if each predicted bin contains a peak, using the requirement that it be higher than ``nCheckAdj`` of its neighbors.
 
-* If the bin contains a peak, and is also part of a row of more than *nConsecReq* consecutive peaks, consider it to be a 'real' part of the comb.
+* If the bin contains a peak, and is also part of a row of more than ``nConsecReq`` consecutive peaks, consider it to be a 'real' part of the comb.
 
 * Multiply the following quantities together:
 
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8fb6252993cdf6bafcbcc6db987ae46de68bce7
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,341 @@
+# -*- coding: utf-8 -*-
+#
+# FineTooth documentation build configuration file, created by
+# sphinx-quickstart on Thu Sep  8 13:52:19 2016.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+sys.path.insert(0, os.path.abspath('../..'))
+
+def setup(app):
+    app.add_stylesheet('custom.css')
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.mathjax',
+    'sphinx.ext.viewcode',
+    'sphinx.ext.napoleon',
+    'sphinxarg.ext'
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'FineTooth'
+copyright = u'2016, Ansel Neunzert'
+author = u'Ansel Neunzert'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = u'0.1'
+# The full version, including alpha/beta/rc tags.
+release = u'0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#
+# today = ''
+#
+# Else, today_fmt is used as the format for a strftime call.
+#
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'nature'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents.
+# "<project> v<release> documentation" by default.
+#
+html_title = u'FineTooth v0.1'
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#
+# html_logo = None
+
+# The name of an image file (relative to this directory) to use as a favicon of
+# the docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#
+# html_extra_path = []
+
+# If not None, a 'Last updated on:' timestamp is inserted at every page
+# bottom, using the given strftime format.
+# The empty string is equivalent to '%b %d, %Y'.
+#
+# html_last_updated_fmt = None
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+#
+# html_domain_indices = True
+
+# If false, no index is generated.
+#
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
+#
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# 'ja' uses this config value.
+# 'zh' user can custom change `jieba` dictionary path.
+#
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'FineTooth'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+     # The paper size ('letterpaper' or 'a4paper').
+     #
+     # 'papersize': 'letterpaper',
+
+     # The font size ('10pt', '11pt' or '12pt').
+     #
+     # 'pointsize': '10pt',
+
+     # Additional stuff for the LaTeX preamble.
+     #
+     # 'preamble': '',
+
+     # Latex figure (float) alignment
+     #
+     # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+    (master_doc, 'FineTooth.tex', u'FineTooth Documentation',
+     u'Ansel Neunzert', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+#
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#
+# latex_appendices = []
+
+# If false, no module index is generated.
+#
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    (master_doc, 'finetoothdocs', u'FineTooth Documentation',
+     [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    (master_doc, 'FineTooth', u'FineTooth Documentation',
+     author, 'FineTooth', 'One line description of project.',
+     'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+#
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#
+# texinfo_no_detailmenu = False
diff --git a/docs/source/funcdocs.rst b/docs/source/funcdocs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c144e5b2e2d5b5be3f250962e5236c1868b37b73
--- /dev/null
+++ b/docs/source/funcdocs.rst
@@ -0,0 +1,60 @@
+========================
+Module and function docs
+========================
+
+chdata.py (script)
+------------------
+
+Source
+~~~~~~
+.. automodule:: chdata
+   :members:
+
+Options
+~~~~~~~
+.. argparse::
+   :ref: chdata.makeparser
+   :prog: chdata
+
+chdataFscan.py (script)
+-----------------------
+
+Source
+~~~~~~
+.. automodule:: chdataFscan
+   :members:
+
+Options
+~~~~~~~
+.. argparse::
+   :ref: chdataFscan.makeparser
+   :prog: chdataFscan
+
+chplot.py (script)
+------------------
+
+Source
+~~~~~~
+.. automodule:: chplot
+   :members:
+
+Options
+~~~~~~~
+.. argparse::
+   :ref: chplot.makeparser
+   :prog: chplot
+
+combfinder.py (module only)
+---------------------------
+
+.. automodule:: combfinder
+   :members:
+
+hdf5io.py (module only)
+-----------------------
+
+(This module has yet to be documented/expanded)
+
+.. automodule:: hdf5io
+   :members:
+   :undoc-members:
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..02f7b494f05f5b388878ba676e9175d1828b3285
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,20 @@
+.. FineTooth documentation master file, created by
+   sphinx-quickstart on Thu Sep  8 13:52:19 2016.
+
+FineTooth documentation
+=======================
+
+[`Gitlab repository <https://gitlab.aei.uni-hannover.de/aneunzert/FineTooth/>`_]
+
+.. toctree::
+   :maxdepth: 2
+
+   overview
+   tutorial
+   funcdocs
+   combfinding
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/docs/source/overview.rst b/docs/source/overview.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c1ae5b0f5b1577e97fde6c700735910e25f32c75
--- /dev/null
+++ b/docs/source/overview.rst
@@ -0,0 +1,43 @@
+++++++++
+Overview
+++++++++
+
+Goals
+=====
+
+* Read in data for many channels and sources
+* Plot data for each channel in an interactive format
+* Easily see and keep track of combs
+* Organize plots by channel and time/date
+
+Status
+======
+
+Testing; likely to be buggy and constantly updated.
+
+Contact
+=======
+
+neunzert (at) umich (dot) edu
+
+Dependencies
+============
+
+These scripts depend on a number of non-standard python libraries, namely:
+
+* gwpy (for loading data from detector channels)
+* bokeh (for interactive plotting)
+* pandas (for handling tables of data)
+* jinja2 (for creating web pages from a template)
+
+To avoid installing all of these things independently, work on the LHO cluster and source the following virtualenv::
+
+    $ source /home/aneunzert/gwypybokeh/bin/activate
+
+If you would like to work on a different cluster, email me and I will happily clone the virtualenv where necessary.
+
+Structure
+=========
+
+.. image:: _static/finetooth_workflow.png
+
diff --git a/tutorial.rst b/docs/source/tutorial.rst
similarity index 98%
rename from tutorial.rst
rename to docs/source/tutorial.rst
index 8a4c5cb8ffb44b3a16de4250f4d0082532751c83..28d998b53214a9e44a273e03c4e60855bcfec911 100644
--- a/tutorial.rst
+++ b/docs/source/tutorial.rst
@@ -1,6 +1,7 @@
-[WORK IN PROGRESS]
+++++++++
+Tutorial
+++++++++
 
-This is a tutorial for getting started with FineTooth. If you can run through this tutorial, everything should be in working order.
 
 Loading and plotting data
 -------------------------