I have the following code to overplot three sets of data, count rate vs time, for three different sets of time ranges:
#!/usr/bin/env python from pylab import rc, array, subplot, zeros, savefig, ylim, xlabel, ylabel, errorbar, FormatStrFormatter, gca, axis from scipy import optimize, stats import numpy as np import pyfits, os, re, glob, sys rc('font',**{'family':'serif','serif':['Helvetica']}) rc('ps',usedistiller='xpdf') rc('text', usetex=True) #------------------------------------------------------ tmin=56200 tmax=56249 data=pyfits.open('http://heasarc.gsfc.nasa.gov/docs/swift/results/transients/weak/GX304-1.orbit.lc.fits') time = data[1].data.field(0)/86400. + data[1].header['MJDREFF'] + data[1].header['MJDREFI'] rate = data[1].data.field(1) error = data[1].data.field(2) data.close() cond = ((time > tmin-5) & (time < tmax)) time=time[cond] rate=rate[cond] error=error[cond] errorbar(time, rate, error, fmt='r.', capsize=0) gca().xaxis.set_major_formatter(FormatStrFormatter('%5.1f')) axis([tmin-10,tmax,-0.00,0.45]) xlabel('Time, MJD') savefig("sync.eps",orientation='portrait',papertype='a4',format='eps') As, in this way, the plot is too much confusing, I thought to fit the curves. I tried with UnivariateSpline, but this completely messes up my data. Any advice please? Should I first define a function to fit those data? I also looked for "least-squared": is this the best solution to this problem?